Skip to main content

How to upgrade the vitess inside a rke2 cluster

· 6 min read
Hreniuc Cristian-Alexandru

Check when a new version is released here.

Development env - contabo

Update the vitess dev cluster first and see if everything works, check the logs of the vitess pods and also check if the backend can connect to the db.

cd vitess/contabo/

kubectl apply -f vitess-cluster.yaml

# Watch the pods being created
kubectl get pod

# Check logs
# Make sure the version is printed correctly: Version: 14.0.0
I0630 16:12:57.015279 1 servenv.go:100] Version: 14.0.0 (Git revision 9665c1850cf3e155667178104891f0fc41b71a9d branch 'heads/v14.0.0') built on Tue Jun 28 17:34:59 UTC 2022 by vitess@buildkitsandbox using go1.18.3 linux/amd64


# Vtgate - the last part of the name will be different
kubectl logs -f pod/vt-decontabodusseldorf-vtgate-f81fd0bc-5b7bfffb96-jxcjj

# vttablet
kubectl logs -f pod/vt-vttablet-decontabodusseldorf-2620423388-0c5af156

# vtctld
kubectl logs pod/vt-decontabodusseldorf-vtctld-55130465-65cd85fcc-n9ljn

Connect to the app and check the logs for the backend:

kubectl logs -f domain-com-backend-64d86787c5-g4vkv

Production env - hetzner

Afterwards, do the same for the hetzner cluster:

cd vitess/hetzner/

kubectl apply -f vitess-cluster.yaml

# Same as above

Also, make sure you upgrade the version for vitess for the backup cronjob from the rancher interface, for both envs.

Sometimes you might need to restart the backends, because this triggers a restart for the vitess, and the backend sometimes doesn't refresh the connection to the db. Check the logs of the backend.

An example of vitess-deployment.yml:

# The following vt is minimalist. The security policies
# and resource specifications are not meant to be used in production.
# Please refer to the operator documentation for recommendations on
# production settings.
apiVersion: planetscale.com/v2
kind: VitessCluster
metadata:
name: vt
spec:
imagePullPolicies:
vtctld: IfNotPresent
vtgate: IfNotPresent
vttablet: IfNotPresent
vtbackup: IfNotPresent
mysqld: IfNotPresent
mysqldExporter: IfNotPresent
# IfNotPresent/Always
images:
vtctld: vitess/lite:v14.0.0-mysql80
vtgate: vitess/lite:v14.0.0-mysql80
vttablet: vitess/lite:v14.0.0-mysql80
vtbackup: vitess/lite:v14.0.0-mysql80
mysqld:
mysql80Compatible: vitess/lite:v14.0.0-mysql80
mysqldExporter: prom/mysqld-exporter:v0.11.0
backup:
locations:
- s3:
bucket: "domain-com-hetzner-vitess"
region: "eu-central-003"
endpoint: "s3.eu-central-003.backblazeb2.com"
authSecret:
name: backblaze-domain-com-hetzner-vitess
key: domain-com-hetzner-vitess-key
engine: xtrabackup
cells:
- name: dehetznernuremberg
gateway:
authentication:
static:
secret:
name: vt-config-secret
key: users.json
extraFlags:
# planner_version: "Gen4Fallback"
# schema_change_signal: "true"
# schema_change_signal_user: "vschema_watcher"
replicas: 1
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
memory: 256Mi
vitessDashboard:
cells:
- dehetznernuremberg
extraFlags:
security_policy: read-only
replicas: 1
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi

keyspaces:
- name: domain-com
turndownPolicy: Immediate
partitionings:
- equal:
parts: 1
shardTemplate:
databaseInitScriptSecret:
name: vt-config-secret
key: init_db.sql
replication:
enforceSemiSync: false
initializeBackup: true
tabletPools:
- cell: dehetznernuremberg
type: replica
replicas: 1
extraVolumes:
- name: acl-domain-com
configMap:
name: vt-config-configmap
extraVolumeMounts:
- name: acl-domain-com
mountPath: /mnt
vttablet:
extraFlags:
db_charset: utf8mb4
enforce-tableacl-config: "true"
queryserver-config-strict-table-acl: "true"
table-acl-config: /mnt/acl_users_rights.json
table-acl-config-reload-interval: 5m
# queryserver-config-schema-change-signal: "true"
resources:
limits:
memory: 1Gi
requests:
cpu: 200m
memory: 1Gi
mysqld:
resources:
requests:
cpu: 300m
memory: 1Gi
dataVolumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
updateStrategy:
type: Immediate
---
apiVersion: v1
kind: ConfigMap
metadata:
name: vt-config-configmap
data:
acl_users_rights.json: |
{
"table_groups": [
{
"name": "domain-com",
"table_names_or_prefixes": ["%"],
"readers": ["domain-com_backend","domain-com_admin", "domain-com_read", "vschema_watcher"],
"writers": [ "domain-com_backend","domain-com_admin", "domain-com_write", "vschema_watcher"],
"admins": ["domain-com_admin", "vschema_watcher"]
}
]
}
---
apiVersion: v1
kind: Secret
metadata:
name: vt-config-secret
type: Opaque
stringData:
users.json: |
{
"domain-com_backend": [{
"UserData": "domain-com_backend",
"Password": "domain-com_backend_"
},
{
"UserData": "domain-com_backend",
"Password": "domain-com_backend_"
}],
"domain-com_admin": [{
"UserData": "domain-com_admin",
"Password": "domain-com_admin_"
}],
"domain-com_read": [{
"UserData": "domain-com_read",
"Password": "domain-com_read_"
}],
"domain-com_write": [{
"UserData": "domain-com_write",
"Password": "domain-com_write_"
}],
"vschema_watcher": [{
"UserData": "vschema_watcher",
"Password": "vschema_watcher12!?"
}]
}
init_db.sql: |
# This file is executed immediately after mysql_install_db,
# to initialize a fresh data directory.

###############################################################################
# Equivalent of mysql_secure_installation
###############################################################################

# Changes during the init db should not make it to the binlog.
# They could potentially create errant transactions on replicas.
SET sql_log_bin = 0;
# Remove anonymous users.
DELETE FROM mysql.user WHERE User = '';

# Disable remote root access (only allow UNIX socket).
DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost';

# Remove test database.
DROP DATABASE IF EXISTS test;

###############################################################################
# Vitess defaults
###############################################################################

# Vitess-internal database.
CREATE DATABASE IF NOT EXISTS _vt;
# Note that definitions of local_metadata and shard_metadata should be the same
# as in production which is defined in go/vt/mysqlctl/metadata_tables.go.
CREATE TABLE IF NOT EXISTS _vt.local_metadata (
name VARCHAR(255) NOT NULL,
value VARCHAR(255) NOT NULL,
db_name VARBINARY(255) NOT NULL,
PRIMARY KEY (db_name, name)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS _vt.shard_metadata (
name VARCHAR(255) NOT NULL,
value MEDIUMBLOB NOT NULL,
db_name VARBINARY(255) NOT NULL,
PRIMARY KEY (db_name, name)
) ENGINE=InnoDB;

# Admin user with all privileges.
CREATE USER 'vt_dba'@'localhost';
GRANT ALL ON *.* TO 'vt_dba'@'localhost';
GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost';

# User for app traffic, with global read-write access.
CREATE USER 'vt_app'@'localhost';
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
ON *.* TO 'vt_app'@'localhost';

# User for app debug traffic, with global read access.
CREATE USER 'vt_appdebug'@'localhost';
GRANT SELECT, SHOW DATABASES, PROCESS ON *.* TO 'vt_appdebug'@'localhost';

# User for administrative operations that need to be executed as non-SUPER.
# Same permissions as vt_app here.
CREATE USER 'vt_allprivs'@'localhost';
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
ON *.* TO 'vt_allprivs'@'localhost';

# User for slave replication connections.
# TODO: Should we set a password on this since it allows remote connections?
CREATE USER 'vt_repl'@'%';
GRANT REPLICATION SLAVE ON *.* TO 'vt_repl'@'%';

# User for Vitess filtered replication (binlog player).
# Same permissions as vt_app.
CREATE USER 'vt_filtered'@'localhost';
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
ON *.* TO 'vt_filtered'@'localhost';

# User for Orchestrator (https://github.com/openark/orchestrator).
# TODO: Reenable when the password is randomly generated.
#CREATE USER 'orc_client_user'@'%' IDENTIFIED BY 'orc_client_user_password';
#GRANT SUPER, PROCESS, REPLICATION SLAVE, RELOAD
# ON *.* TO 'orc_client_user'@'%';
#GRANT SELECT
# ON _vt.* TO 'orc_client_user'@'%';

FLUSH PRIVILEGES;

RESET SLAVE ALL;
RESET MASTER;