https://github.com/vitessio/vitess/releases
https://github.com/planetscale/vitess-releases
# 本地安装Vitess
https://abc.htmltoo.com/thread-46450.htm
10.216.91.116 | 3308 | 3313 | 3314 | vttablet_100,200,300,400 | vtgate:15306 | |
10.216.91.117 | 3308 | 3313 | 3314 | vttablet_101,201,301,401 | vtctld | vtgate:15306 |
10.216.91.118 | 3308 | 3313 | 3314 | vttablet_102,202,302,402 | vtgate:15306 |
10.216.91.117 | zk1 |
10.216.91.118 | zk2 |
10.216.91.119 | zk3 |
---
mkdir /data/service/vitess
tar -xvzf vitess-6.0.20-20200818-90741b8.tar.gz -C /data/service/vitess --strip-components 1
chown -R mysql.mysql /data/service/vitess
vim /home/mysql/.bashrc
export TOPO=zk2
export VTROOT=/data/service/vitess
export VTTOP=$VTROOT
#export MYSQL_FLAVOR=MySQL57
export VTDATAROOT=/data/service_data/vitess/vtdataroot
export PATH=${VTROOT}/bin:${PATH}
source /home/mysql/.bashrc
mkdir /data/service_data/vitess
chown -R mysql.mysql /data/service_data/vitess
-vttablet节点配置hosts文件
10.216.91.117 zk_server1
10.216.91.118 zk_server2
10.216.91.119 zk_server3
-zoo.cfg
server.1=10.216.91.117:2882:3882 server.2=10.216.91.118:2882:3882 server.3=10.216.91.119:2882:3882
-编辑env.sh脚本,配置zk信息
hostname=$(hostname -f)
hostname1=zk_server1
hostname2=zk_server2
hostname3=zk_server3
vtctld_web_port=15000
vtctld_host="10.216.91.117"
export VTDATAROOT="${VTDATAROOT:-${PWD}/vtdataroot}"
function fail() {
echo "ERROR: $1"
exit 1
}
if [[ $EUID -eq 0 ]]; then
fail "This script refuses to be run as root. Please switch to a regular user."
fi
# mysqld might be in /usr/sbin which will not be in the default PATH
PATH="/usr/sbin:$PATH"
for binary in mysqld curl vtctlclient vttablet vtgate vtctld; do
command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions."
done;
if [ "${TOPO}" = "zk2" ]; then
# Set topology environment parameters.
ZK_SERVER="${hostname1}:2881,${hostname2}:2881,${hostname3}:2881"
# shellcheck disable=SC2034
TOPOLOGY_FLAGS="-topo_implementation zk2 -topo_global_server_address ${ZK_SERVER} -topo_global_root /vitess/global"
mkdir -p "${VTDATAROOT}/tmp"
elif [ "${TOPO}" = "k8s" ]; then
# Set topology environment parameters.
K8S_ADDR="localhost"
K8S_PORT="8443"
K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig
# shellcheck disable=SC2034
TOPOLOGY_FLAGS="-topo_implementation k8s -topo_k8s_kubeconfig ${K8S_KUBECONFIG} -topo_global_server_address ${K8S_ADDR}:${K8S_PORT} -topo_global_root /vitess/global"
else
ETCD_SERVER="localhost:2379"
TOPOLOGY_FLAGS="-topo_implementation etcd2 -topo_global_server_address $ETCD_SERVER -topo_global_root /vitess/global"
mkdir -p "${VTDATAROOT}/etcd"
fi
mkdir -p "${VTDATAROOT}/tmp"
# Set aliases to simplify instructions.
# In your own environment you may prefer to use config files,
# such as ~/.my.cnf
alias mysql="command mysql -h 127.0.0.1 -P 15306"
alias vtctlclient="command vtctlclient -server localhost:15999 -log_dir ${VTDATAROOT}/tmp -alsologtostderr"
# Make sure aliases are expanded in non-interactive shell
shopt -s expand_aliases---启动zk,初始化topo服务
-编辑zk-up.sh脚本
#!/bin/bash
# start zk
source ./env.sh
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1
fi
cell=${CELL:-'test'}
echo $TOPOLOGY_FLAGS
echo $ZK_SERVER
# Add the CellInfo description for the $CELL cell.
# If the node already exists, it's fine, means we used existing data.
set +e
vtctl $TOPOLOGY_FLAGS AddCellInfo \
-root /vitess/$cell \
-server_address $ZK_SERVER \
$cell
set -e
echo "Configured zk servers."---初始化topo服务
sh scripts/zk-up.sh
如果多个vitess集群使用同一个toplogy server,需要配置不同的-topo_global_root 目录作为区分。
-登陆zkserver查看topo信息是否完成
sh zkCli.sh -server 10.216.91.118:2881
---启动vtctld
-vtctld-up.sh
#!/bin/bash
# This is an example script that starts vtctld.
source ./env.sh
CELL="zone1"
cell=${CELL:-'test'}
grpc_port=15999
echo "Starting vtctld..."
# shellcheck disable=SC2086
vtctld \
$TOPOLOGY_FLAGS \
-cell $cell \
-workflow_manager_init \
-workflow_manager_use_election \
-service_map 'grpc-vtctl' \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
-log_dir $VTDATAROOT/tmp \
-port $vtctld_web_port \
-grpc_port $grpc_port \
-pid_file $VTDATAROOT/tmp/vtctld.pid \
> $VTDATAROOT/tmp/vtctld.out 2>&1 &-启动脚本
sh scripts/vtctld-up.sh
-登陆vtctld web服务
http://10.216.91.117:15000/
---启动vttablet
-创建vttablet配置文件
mkdir /data/service/vitess/config/tablet cd /data/service/vitess/config/tablet vim tablet_100_commerce.yaml tabletID: zone1-100 init: dbName: db1 # init_db_name_override keyspace: commerce # init_keyspace shard: 0 # init_shard tabletType: replica # init_tablet_type timeoutSeconds: 60 # init_timeout db: socket: # db_socket host: 10.216.91.117 # db_host port: 3308 # db_port charSet: utf8 # db_charset flags: 0 # db_flags flavor: MySQL8 # db_flavor sslCa: # db_ssl_ca sslCaPath: # db_ssl_ca_path sslCert: # db_ssl_cert sslKey: # db_ssl_key serverName: # db_server_name connectTimeoutMilliseconds: 0 # db_connect_timeout_ms app: user: msandbox # db_app_user password: msandbox # db_app_password useSsl: true # db_app_use_ssl preferTcp: false dba: user: dba # db_dba_user password: doumi1.q # db_dba_password useSsl: true # db_dba_use_ssl preferTcp: false filtered: user: msandbox # db_filtered_user password: msandbox # db_filtered_password useSsl: true # db_filtered_use_ssl preferTcp: false repl: user: dmrepl # db_repl_user password: doumi1.q # db_repl_password useSsl: true # db_repl_use_ssl preferTcp: false appdebug: user: msandbox # db_appdebug_user password: msandbox # db_appdebug_password useSsl: true # db_appdebug_use_ssl preferTcp: false allprivs: user: dba # db_allprivs_user password: doumi1.q # db_allprivs_password useSsl: true # db_allprivs_use_ssl preferTcp: false oltpReadPool: size: 16 # queryserver-config-pool-size timeoutSeconds: 0 # queryserver-config-query-pool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap olapReadPool: size: 200 # queryserver-config-stream-pool-size timeoutSeconds: 0 # queryserver-config-query-pool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-stream-pool-prefill-parallelism maxWaiters: 0 txPool: size: 20 # queryserver-config-transaction-cap timeoutSeconds: 1 # queryserver-config-txpool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-transaction-prefill-parallelism maxWaiters: 50000 # queryserver-config-txpool-waiter-cap oltp: queryTimeoutSeconds: 30 # queryserver-config-query-timeout txTimeoutSeconds: 30 # queryserver-config-transaction-timeout maxRows: 10000 # queryserver-config-max-result-size warnRows: 0 # queryserver-config-warn-result-size healthcheck: intervalSeconds: 20 # health_check_interval degradedThresholdSeconds: 30 # degraded_threshold unhealthyThresholdSeconds: 7200 # unhealthy_threshold gracePeriods: transactionShutdownSeconds: 0 # transaction_shutdown_grace_period transitionSeconds: 0 # serving_state_grace_period replicationTracker: mode: disable # enable_replication_reporter heartbeatIntervalMilliseconds: 0 # heartbeat_enable, heartbeat_interval hotRowProtection: mode: disable|dryRun|enable # enable_hot_row_protection, enable_hot_row_protection_dry_run # Recommended value: same as txPool.size. maxQueueSize: 20 # hot_row_protection_max_queue_size maxGlobalQueueSize: 1000 # hot_row_protection_max_global_queue_size maxConcurrency: 5 # hot_row_protection_concurrent_transactions consolidator: enable|disable|notOnMaster # enable-consolidator, enable-consolidator-replicas passthroughDML: false # queryserver-config-passthrough-dmls streamBufferSize: 32768 # queryserver-config-stream-buffer-size queryCacheSize: 5000 # queryserver-config-query-cache-size schemaReloadIntervalSeconds: 1800 # queryserver-config-schema-reload-time watchReplication: false # watch_replication_stream terseErrors: false # queryserver-config-terse-errors messagePostponeParallelism: 4 # queryserver-config-message-postpone-cap cacheResultFields: true # enable-query-plan-field-caching # The following flags are currently not supported. # enforce_strict_trans_tables # queryserver-config-strict-table-acl # queryserver-config-enable-table-acl-dry-run # queryserver-config-acl-exempt-acl # enable-tx-throttler # tx-throttler-config # tx-throttler-healthcheck-cells # enable_transaction_limit # enable_transaction_limit_dry_run # transaction_limit_per_user # transaction_limit_by_username # transaction_limit_by_principal # transaction_limit_by_component # transaction_limit_by_subcomponent
---创建后端数据库账号
create user 'msandbox'@'%' identified by 'msandbox';
grant all privileges on *.* to 'msandbox'@'%';
create user 'dba'@'%' identified by 'doumi1.q';
grant all privileges on *.* to 'dba'@'%';
create user 'dmrepl'@'%' identified by 'doumi1.q';
GRANT REPLICATION SLAVE ON *.* TO 'dmrepl'@'%';
---编辑vttablet启动脚本
#!/bin/bash
source ./env.sh
CELL="zone1"
KEYSPACE=$1
# tablet_uid in (100,200)
TABLET_UID=$2
#DB_NAME=$3
#mysql_port=$4
SHARD=$3
tablet_config="/data/service/vitess/config/tablet/tablet_${TABLET_UID}_${KEYSPACE}.yaml"
echo "$tablet_config"
if [[ $# != 3 ]];then
#echo "usage: sh script/vttablet-up.sh keyspace tablet_uid db_name mysql_port SHARD"
echo "usage: sh script/vttablet-up.sh keyspace tablet_uid shard"
exit 1
fi
cell=${CELL:-'test'}
keyspace=${KEYSPACE:-'test_keyspace'}
shard=${SHARD:-'0'}
uid=$TABLET_UID
port=$[15000 + $uid]
grpc_port=$[16000 + $uid]
printf -v alias '%s-%010d' $cell $uid
printf -v tablet_dir 'vt_%010d' $uid
tablet_hostname=''
printf -v tablet_logfile 'vttablet_%010d_querylog.txt' $uid
tablet_type=replica
if [ ! -d $VTDATAROOT/$tablet_dir ];then
mkdir -p $VTDATAROOT/$tablet_dir
fi
echo "Starting vttablet for $alias..."
echo $TOPOLOGY_FLAGS
echo $vtctld_host
vttablet \
$TOPOLOGY_FLAGS \
-logtostderr \
-log_queries_to_file $VTDATAROOT/tmp/$tablet_logfile \
-tablet-path $alias \
-port $port \
-init_keyspace $keyspace \
-init_shard $SHARD \
-init_tablet_type replica \
-grpc_port $grpc_port \
-service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
-pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
-vtctld_addr http://$vtctld_host:$vtctld_web_port/ \
-init_populate_metadata \
-enable_semi_sync \
-enable_replication_reporter \
-tablet_config $tablet_config \
> $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
# Block waiting for the tablet to be listening
# Not the same as healthy
for i in $(seq 0 300); do
curl -I "http://$hostname:$port/debug/status" >/dev/null 2>&1 && break
sleep 0.1
done
# check one last time
curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!"---启动vttablet
# 10.216.91.116 sh scripts/vttablet-up-config.sh commerce 100 0 # 10.216.91.117 sh scripts/vttablet-up-config.sh commerce 101 0 # 10.216.91.118 sh scripts/vttablet-up-config.sh commerce 102 0
此处会创建_vt库,创建过程中会使用SET @@session.sql_log_bin = 0。
---初始化master节点
vtctlclient \
-server 10.216.91.117:15999 \
-log_dir /data/service_data/vitess/vtdataroot/tmp/ \
-alsologtostderr \
InitShardMaster -force commerce/0 zone1-101
-此处默认会创建vt_commerce库,如果需要使用自定义库,启动vttablet时,使用参数 -init_db_name_override $DB_NAME
-在已经有复制结构的分片上使用InitShardMaster有可能造成数据丢失,可以使用PlannedReparentShard or EmergencyReparentShard命令。
---测试数据
-创建schema
vtctlclient \ -server 10.216.91.117:15999 \ -log_dir /data/service_data/vitess/vtdataroot/tmp/ \ -alsologtostderr \ ApplySchema -sql-file create_commerce_schema.sql commerce
-创建vschema
vtctlclient \ -server 10.216.91.117:15999 \ -log_dir /data/service_data/vitess/vtdataroot/tmp/ \ -alsologtostderr \ ApplyVSchema -vschema_file vschema_commerce_initial.json commerce
-初始化数据
mysql -h 127.0.0.1 -P 15306 < ../common/insert_commerce_data.sql
-查看导入的数据
mysql -h 127.0.0.1 -P 15306 < ../common/select_commerce_data.sql
---启动vtgate
-编辑vtgate-up.sh
#!/bin/bash
# This is an example script that starts a single vtgate.
source ./env.sh
CELL="zone1"
cell=${CELL:-'test'}
web_port=15001
grpc_port=15991
mysql_server_port=15306
mysql_server_socket_path="/tmp/mysql_${mysql_server_port}.sock"
# Start vtgate.
# shellcheck disable=SC2086
vtgate \
$TOPOLOGY_FLAGS \
-log_dir $VTDATAROOT/tmp \
-log_queries_to_file $VTDATAROOT/tmp/vtgate_querylog.txt \
-port $web_port \
-grpc_port $grpc_port \
-mysql_server_port $mysql_server_port \
-mysql_server_socket_path $mysql_server_socket_path \
-cell $cell \
-cells_to_watch $cell \
-tablet_types_to_wait MASTER,REPLICA \
-gateway_implementation discoverygateway \
-service_map 'grpc-vtgateservice' \
-pid_file $VTDATAROOT/tmp/vtgate.pid \
-mysql_auth_server_impl none \
> $VTDATAROOT/tmp/vtgate.out 2>&1 &
# Block waiting for vtgate to be listening
# Not the same as healthy
echo "Waiting for vtgate to be up..."
while true; do
curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break
sleep 0.1
done;
echo "vtgate is up!"
echo "Access vtgate at http://$hostname:$web_port/debug/status"
disown -a-启动vtgate
sh scripts/vtgate-up.sh
-查看vtgate web:
http://10.216.91.118:15001/debug/status
---读写分离
使用语法:@[master|replica|rdonly] use commerce@replica; 或者 select * from commerce@replica.corder;
---Vitess 云原生数据库系统是用于部署、扩展和管理大型 MySQL 实例集群的数据库解决方案。它是一个分布式 MySQL 工具集,它可以自动分片存储 MySQL 数据表,将单个 SQL 查询改写为分布式发送到多个 MySQL Server 上,支持行缓存(比 MySQL 本身缓存效率高)与复制容错等。
https://blog.csdn.net/wl21787/article/details/108468521