feat: velero integration, ZFS and PV manipulation to finish...
This commit is contained in:
parent
6a6eae367b
commit
78e81aff94
104
admin.sh
104
admin.sh
@ -286,6 +286,7 @@ function help()
|
||||
show "sync - sync, check data to cloud COMPLEX"
|
||||
show "backup - backup data to cloud COMPLEX"
|
||||
show "config - configure Restic and Rclone NOARG"
|
||||
show "velero - backup object on kubernetes COMPLEX"
|
||||
show ""
|
||||
show "-< Tang"
|
||||
show "tangcrypt - encrypt /dev/stdin to /dev/stdout NOARG"
|
||||
@ -294,6 +295,21 @@ function help()
|
||||
exit 2
|
||||
}
|
||||
|
||||
function help_velero()
|
||||
{
|
||||
show "Usage: admin velero [init|backup|restore|info ] [dataname]" 1
|
||||
show ""
|
||||
show "Use the velero backend to make some kubernetes backup operations"
|
||||
show ""
|
||||
show "init - initialize the data storage on kubernetes"
|
||||
show "uninstall - uninstall velero on kubernetes"
|
||||
show "backup - backup the kubernetes objects"
|
||||
show "restore - restore [dataname] backup on kubernetes"
|
||||
show "info - get informations about [dataname]"
|
||||
show ""
|
||||
exit 2
|
||||
}
|
||||
|
||||
function help_restic()
|
||||
{
|
||||
show "Usage: admin backup ${ZFS}dataset [ init|view|ls|check|now|prune|remove|config ]" 1
|
||||
@ -645,6 +661,83 @@ function rcloneapp()
|
||||
nfcunmount
|
||||
}
|
||||
|
||||
function veleroapp()
|
||||
{
|
||||
if [ -z "$1" ]; then
|
||||
help_velero
|
||||
fi
|
||||
case "$1" in
|
||||
init)
|
||||
nfcmountonly
|
||||
read -p "🔑 MinIO Access Key: " ACCESS_KEY
|
||||
read -sp "🔒 MinIO Secret Key: " SECRET_KEY
|
||||
cat <<EOF > /mnt/usb/credentials-vel
|
||||
[default]
|
||||
aws_access_key_id=${ACCESS_KEY}
|
||||
aws_secret_access_key=${SECRET_KEY}
|
||||
EOF
|
||||
|
||||
if ! command -v velero >/dev/null 2>&1; then
|
||||
echo "🔍 Velero non trouvé, téléchargement et installation de v1.16.0…"
|
||||
wget -q https://github.com/vmware-tanzu/velero/releases/download/v1.16.0/velero-v1.16.0-linux-amd64.tar.gz
|
||||
tar -xzf velero-v1.16.0-linux-amd64.tar.gz
|
||||
chmod +x velero-v1.16.0-linux-amd64/velero
|
||||
chown root:root velero-v1.16.0-linux-amd64/velero
|
||||
mv velero-v1.16.0-linux-amd64/velero /usr/local/bin/velero
|
||||
rm -rf velero-v1.16.0-linux-amd64 velero-v1.16.0-linux-amd64.tar.gz
|
||||
echo "✅ Velero installé."
|
||||
else
|
||||
echo "✅ Velero déjà présent, version : $(velero version --client-only | head -n1)"
|
||||
fi
|
||||
echo "▶️ Déploiement Velero dans le cluster…"
|
||||
velero install \
|
||||
--provider aws \
|
||||
--plugins velero/velero-plugin-for-aws:v1.10.0 \
|
||||
--bucket velero \
|
||||
--backup-location-config region=us-east-1,s3ForcePathStyle="true",s3Url=https://minio-api.ia86.cc,checksumAlgorithm="" \
|
||||
--snapshot-location-config region=us-east-1 \
|
||||
--secret-file /mnt/usb/credentials-vel \
|
||||
--uploader-type kopia \
|
||||
--kubeconfig /home/user/.kube/config \
|
||||
|| echo "ℹ️ Velero est peut-être déjà installé dans le cluster."
|
||||
echo "🎉 Script terminé."
|
||||
;;
|
||||
uninstall)
|
||||
velero uninstall --kubeconfig /home/user/.kube/config
|
||||
;;
|
||||
backup)
|
||||
NOW=$(date +"%Y%m%d-%H%M")
|
||||
velero backup create backup-complet-cluster2-${NOW} \
|
||||
--include-namespaces '*' \
|
||||
--include-resources '*' \
|
||||
--include-cluster-resources=true \
|
||||
--kubeconfig /home/user/.kube/config
|
||||
# --default-volumes-to-fs-backup \
|
||||
;;
|
||||
info)
|
||||
velero backup describe $2 --kubeconfig /home/user/.kube/config
|
||||
;;
|
||||
restore)
|
||||
BACKUP_NAME="$2"
|
||||
read -p "Are you sure you want to restore from backup '$BACKUP_NAME'? [y/N] " confirm
|
||||
case "$confirm" in
|
||||
[Yy]* )
|
||||
echo "▶️ Restoring from backup '$BACKUP_NAME'…"
|
||||
#velero restore create --from-backup "$BACKUP_NAME" --kubeconfig /home/user/.kube/config
|
||||
;;
|
||||
* )
|
||||
echo "❌ Restore aborted."
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
help_velero
|
||||
;;
|
||||
esac
|
||||
nfcunmount
|
||||
}
|
||||
|
||||
|
||||
function resticapp()
|
||||
{
|
||||
if [ -z "$1" ]; then
|
||||
@ -727,6 +820,11 @@ function resticapp()
|
||||
nfcunmount
|
||||
}
|
||||
|
||||
function install()
|
||||
{
|
||||
apt install rclone restic ncdu jose clevis curl sudo zfsutils-linux -y
|
||||
}
|
||||
|
||||
if [[ ! -f "${CONFIGURATION}" ]]; then
|
||||
show "Creating configuration file..."
|
||||
extract 4
|
||||
@ -738,9 +836,15 @@ nfcunmount
|
||||
COMMAND="$1"
|
||||
shift
|
||||
case "${COMMAND}" in
|
||||
install)
|
||||
install
|
||||
;;
|
||||
config)
|
||||
configure
|
||||
;;
|
||||
velero)
|
||||
veleroapp $1 $2 $3
|
||||
;;
|
||||
backup)
|
||||
resticapp $1 $2 $3
|
||||
;;
|
||||
|
@ -39,7 +39,8 @@ summarize_backup_log() {
|
||||
return
|
||||
fi
|
||||
|
||||
local date=$(grep -oP '^\s*-1\s+\K[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}' "$file" | tail -n1)
|
||||
local snapshot=$(grep '^snapshot ' "$file" | awk '{print $2}' | tail -n1)
|
||||
local date=$(awk -v snap="$snapshot" '$1 == snap {print $2 " " $3}' "$file" | tail -n1)
|
||||
local source=$(grep ' => ' "$file" | awk '{print $1}' | tail -n1)
|
||||
local files=$(awk '/^processed / {print $2}' "$file" | tail -n1)
|
||||
local snapshot=$(grep '^snapshot ' "$file" | awk '{print $2}' | tail -n1)
|
||||
|
28
cleanattr_zfs.sh
Executable file
28
cleanattr_zfs.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Liste des datasets à nettoyer
|
||||
LOCAL_DATASETS=("data/database" "data/minio" "data/standard")
|
||||
|
||||
echo "🧹 Début du nettoyage des propriétés *:*..."
|
||||
|
||||
for dataset in "${LOCAL_DATASETS[@]}"; do
|
||||
echo "🔍 Nettoyage des propriétés pour $dataset..."
|
||||
|
||||
# Ignore les snapshots
|
||||
if [[ "$dataset" == *"@"* ]]; then
|
||||
echo "⚠️ Snapshot ignoré : $dataset"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Liste et suppression des propriétés locales avec format *:*
|
||||
while IFS=$'\t' read -r property value source; do
|
||||
if [[ "$source" == "local" && "$property" == *:* ]]; then
|
||||
echo "❌ Suppression de $property sur $dataset"
|
||||
sudo zfs inherit "$property" "$dataset" || echo "⚠️ Erreur suppression $property sur $dataset"
|
||||
fi
|
||||
done < <(zfs get -H -o property,value,source all "$dataset")
|
||||
|
||||
echo "✅ Propriétés *:* supprimées pour $dataset."
|
||||
done
|
||||
|
||||
echo "🎉 Nettoyage terminé."
|
37
copyattr_zfs.sh
Executable file
37
copyattr_zfs.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Variables
|
||||
REMOTE_USER="root"
|
||||
REMOTE_HOST="195.201.173.157"
|
||||
REMOTE_PORT=21253
|
||||
REMOTE_ZFS_POOL="data"
|
||||
LOCAL_DATASETS=("data/database" "data/minio" "data/standard")
|
||||
|
||||
for dataset in "${LOCAL_DATASETS[@]}"; do
|
||||
echo "🔍 Copie des propriétés personnalisées du dataset $dataset..."
|
||||
|
||||
if [[ "$dataset" == *"@"* ]]; then
|
||||
echo "⚠️ Ignoré (snapshot détecté) : $dataset"
|
||||
continue
|
||||
fi
|
||||
|
||||
REMOTE_DATASET="$REMOTE_ZFS_POOL/$(basename "$dataset")"
|
||||
|
||||
# Lecture manuelle propre
|
||||
mapfile -t props < <(zfs get -H -o property,value all "$dataset")
|
||||
|
||||
for line in "${props[@]}"; do
|
||||
prop=$(echo "$line" | awk '{print $1}')
|
||||
val=$(echo "$line" | cut -f2- | sed -E 's/^[^[:space:]]+[[:space:]]+//')
|
||||
|
||||
if [[ "$prop" == *:* && -n "$val" ]]; then
|
||||
echo "🔧 $prop=$val ➔ $REMOTE_DATASET sur $REMOTE_HOST"
|
||||
ssh -p "$REMOTE_PORT" -o StrictHostKeyChecking=no "$REMOTE_USER@$REMOTE_HOST" \
|
||||
"zfs set \"$prop=$val\" \"$REMOTE_DATASET\"" || echo "⚠️ Erreur propriété $prop sur $REMOTE_DATASET"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Propriétés personnalisées copiées pour $dataset ➔ $REMOTE_DATASET."
|
||||
done
|
||||
|
||||
echo "🎉 Toutes les propriétés utilisateurs *:* ZFS ont été copiées vers $REMOTE_HOST."
|
70
restore_pv.sh
Executable file
70
restore_pv.sh
Executable file
@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
OLD_NODE="kube.ia86.cc"
|
||||
NEW_NODE="newkube.ia86.cc"
|
||||
|
||||
echo "🛑 Suppression de tous les PV existants..."
|
||||
for pv in $(kubectl get pv --no-headers -o custom-columns=":metadata.name"); do
|
||||
echo "🗑️ Suppression du PV : $pv"
|
||||
kubectl delete pv "$pv" --wait=false
|
||||
done
|
||||
|
||||
# Attente active stricte
|
||||
echo "⏳ Vérification que tous les PV soient supprimés complètement..."
|
||||
while true; do
|
||||
nb=$(kubectl get pv --no-headers 2>/dev/null | wc -l)
|
||||
if [ "$nb" -eq 0 ]; then
|
||||
echo "✅ Tous les PV sont supprimés."
|
||||
break
|
||||
else
|
||||
echo "⌛ $nb PV encore présents... attente 5 secondes..."
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
echo "📦 Début de la recréation depuis $(pwd)..."
|
||||
|
||||
for file in *.yaml; do
|
||||
echo "🔧 Traitement du fichier $file"
|
||||
|
||||
cp "$file" "${file}.tmp"
|
||||
|
||||
# Supprimer tout à partir du bloc status: avec awk (plus sûr que sed)
|
||||
awk '
|
||||
BEGIN {skip=0}
|
||||
/^status:/ {skip=1}
|
||||
skip==0 {print}
|
||||
' "${file}.tmp" > "${file}.fixed"
|
||||
|
||||
# Remplacer le node name proprement
|
||||
sed -i "s/$OLD_NODE/$NEW_NODE/g" "${file}.fixed"
|
||||
|
||||
echo "🚀 Création du PV corrigé depuis $file"
|
||||
kubectl apply -f "${file}.fixed"
|
||||
|
||||
rm -f "${file}.tmp" "${file}.fixed"
|
||||
done
|
||||
|
||||
echo "✅ Recréation complète sans erreurs YAML."
|
||||
|
||||
echo "🔧 Forçage des PV en Bound (seulement ceux pas déjà Bound)..."
|
||||
|
||||
for pv in $(kubectl get pv --no-headers | grep -v Bound | awk '{print $1}'); do
|
||||
echo "➡️ Forçage du PV $pv"
|
||||
|
||||
kubectl patch pv "$pv" --type='merge' -p '{"status":{"phase":"Bound"}}'
|
||||
done
|
||||
|
||||
echo "✅ Seuls les PV non-Bound ont été corrigés."
|
||||
|
||||
echo "🔧 Suppression des claimRef pour libérer les PV..."
|
||||
|
||||
for pv in $(kubectl get pv --no-headers | grep Released | awk '{print $1}'); do
|
||||
echo "➡️ Suppression du claimRef sur $pv"
|
||||
kubectl patch pv "$pv" --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]'
|
||||
done
|
||||
|
||||
echo "✅ Tous les PV sont maintenant Available."
|
31
send_zfs.sh
Executable file
31
send_zfs.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Variables
|
||||
REMOTE_USER="root"
|
||||
REMOTE_HOST="newkube.ia86.cc"
|
||||
REMOTE_PORT=21253
|
||||
REMOTE_ZFS_POOL="data"
|
||||
LOCAL_DATASETS=("data/database" "data/minio" "data/standard")
|
||||
SNAP_NAME="transfer_$(date +%Y%m%d%H%M%S)"
|
||||
|
||||
# Transfert
|
||||
for dataset in "${LOCAL_DATASETS[@]}"; do
|
||||
echo "Création snapshot local de $dataset@$SNAP_NAME..."
|
||||
zfs snapshot "${dataset}@${SNAP_NAME}"
|
||||
|
||||
echo "Envoi de $dataset@$SNAP_NAME..."
|
||||
ssh -p $REMOTE_PORT -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST "zfs list $REMOTE_ZFS_POOL/$(basename $dataset)" >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
ssh -p $REMOTE_PORT -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST "zfs create $REMOTE_ZFS_POOL/$(basename $dataset)"
|
||||
fi
|
||||
|
||||
zfs send "${dataset}@${SNAP_NAME}" | ssh -p $REMOTE_PORT -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST "zfs receive -F $REMOTE_ZFS_POOL/$(basename $dataset)"
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "✅ $dataset transféré avec succès."
|
||||
else
|
||||
echo "❌ Échec du transfert de $dataset."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Tous les datasets ont été transférés."
|
28
unblock_velero.sh
Executable file
28
unblock_velero.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "🚀 Recherche des pods bloqués ou en Pending..."
|
||||
|
||||
# Récupérer tous les pods en statut Pending ou avec un InitContainer bloqué
|
||||
pods=$(kubectl get pods --all-namespaces --field-selector=status.phase=Pending -o jsonpath='{range .items[*]}{.metadata.namespace}{";"}{.metadata.name}{"\n"}{end}')
|
||||
|
||||
# Ajouter aussi les Pods bloqués sur des InitContainers (comme restore-wait)
|
||||
pods+="
|
||||
$(kubectl get pods --all-namespaces -o jsonpath='{range .items[*]}{.metadata.namespace}{";"}{.metadata.name}{";"}{.status.initContainerStatuses[*].state.running.startTime}{"\n"}{end}' | grep -E ';[^;]*;[^;]*' | cut -d';' -f1,2 --output-delimiter=";")"
|
||||
|
||||
# Nettoyer doublons
|
||||
pods=$(echo "$pods" | sort -u)
|
||||
|
||||
if [[ -z "$pods" ]]; then
|
||||
echo "✅ Aucun pod bloqué ou en Pending détecté."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Supprimer les pods trouvés
|
||||
echo "$pods" | while IFS=";" read -r namespace pod; do
|
||||
if [[ -n "$namespace" && -n "$pod" ]]; then
|
||||
echo "🧹 Suppression du pod $pod dans le namespace $namespace..."
|
||||
kubectl delete pod "$pod" -n "$namespace" --grace-period=0 --force
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Tous les pods bloqués/Pending ont été tués."
|
Loading…
x
Reference in New Issue
Block a user