Skip to content

Commit 4fd220f

Browse files
authored
Merge pull request #255 from KosukeOkamoto/update-discovery-scripts-for-4.8.2
Update script for Watson Discovery 4.8.2 on CP4D
2 parents a55030a + 653e4c5 commit 4fd220f

File tree

3 files changed

+31
-31
lines changed

3 files changed

+31
-31
lines changed

discovery-data/latest/all-backup-restore.sh

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ set -e
44

55
BACKUP_DIR="tmp"
66
TMP_WORK_DIR="tmp/all_backup"
7-
SPLITE_DIR=./tmp_split_bakcup
7+
SPLIT_DIR=./tmp_split_backup
88
EXTRA_OC_ARGS="${EXTRA_OC_ARGS:-}"
99

1010
SCRIPT_DIR=$(dirname $0)
@@ -23,12 +23,12 @@ Usage:
2323
2424
Options:
2525
--help, -h Show help
26-
--file, -f Speccify backup file
26+
--file, -f Specify backup file
2727
--mapping, -m <mapping_file> Specify mapping file for restore to multi tenant clusters
2828
--instance-name, -i <instance_name> Instance name for a new Discovery instance. This name will be used if there is no Discovery instance when restore backup of Discovery 4.0.5 or older
2929
--cp4d-user-id <user_id> User ID to create Discovery instance. Default: admin user ID.
3030
--cp4d-user-name <user_name> User name to create Discovery instance. Default: admin.
31-
--log-output-dir <directory_path> Specify outout direcotry of detailed component logs
31+
--log-output-dir <directory_path> Specify output directory of detailed component logs
3232
--continue-from <component_name> Resume backup or restore from specified component. Values: wddata, etcd, postgresql, elastic, minio, archive, migration, post-restore
3333
--quiesce-on-error=[true|false] If true, not unquiesce on error during backup or restore. Default false on backup, true on restore.
3434
--clean Remove existing tmp directory before start backup or restore.
@@ -38,8 +38,8 @@ Basically, you don't need these advanced options.
3838
3939
--archive-on-local Archive the backup files of etcd and postgresql on local machine. Use this flag to reduce the disk usage on their pod or compress the files with specified option, but it might take much time.
4040
--backup-archive-option="<tar_option>" Tar options for compression used on archiving the backup file. Default none.
41-
--datastore-archive-option="<tar_option>" Tar options for comporession used on archiving the backup files of ElasticSearch, MinIO and internal configuration. Default "-z".
42-
--postgresql-archive-option="<tar_option>" Tar options for comporession used on archiving the backup files of postgres. Note that the backup files of postgresql are archived on its pod by default. Default "-z".
41+
--datastore-archive-option="<tar_option>" Tar options for compression used on archiving the backup files of ElasticSearch, MinIO and internal configuration. Default "-z".
42+
--postgresql-archive-option="<tar_option>" Tar options for compression used on archiving the backup files of postgres. Note that the backup files of postgresql are archived on its pod by default. Default "-z".
4343
--etcd-archive-option="<tar_option>" Tar options used on archiving the backup files of etcd. Note that the backup files of etcd are archived on its pod by default. Default "-z".
4444
--skip-verify-archive Skip the all verifying process of the archive.
4545
--skip-verify-backup Skip verifying the backup file.
@@ -335,11 +335,11 @@ if [ -z "${CONTINUE_FROM_COMPONENT+UNDEF}" ] && [ -d "${BACKUP_DIR}" ] ; then
335335
fi
336336
fi
337337

338-
if [ -d "${SPLITE_DIR}" ] ; then
338+
if [ -d "${SPLIT_DIR}" ] ; then
339339
if "${CLEAN}" ; then
340-
rm -rf "${SPLITE_DIR}"
340+
rm -rf "${SPLIT_DIR}"
341341
else
342-
brlog "ERROR" "Please remove ${SPLITE_DIR}"
342+
brlog "ERROR" "Please remove ${SPLIT_DIR}"
343343
exit 1
344344
fi
345345
fi

discovery-data/latest/lib/function.bash

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ validate_version(){
9999
VERSIONS=(${SCRIPT_VERSION//./ })
100100
VERSION="${VERSIONS[0]}.${VERSIONS[1]}.${VERSIONS[2]}"
101101
if [ $(compare_version "${VERSION}" "${WD_VERSION}") -lt 0 ] ; then
102-
brlog "ERROR" "Invalid script version. The version of scripts '${SCRIPT_VERSION}' is not valid for the version of Watson Doscovery '${WD_VERSION}' "
102+
brlog "ERROR" "Invalid script version. The version of scripts '${SCRIPT_VERSION}' is not valid for the version of Watson Discovery '${WD_VERSION}' "
103103
exit 1
104104
fi
105105
}
@@ -223,8 +223,8 @@ kube_cp_from_local(){
223223
shift
224224
POD_BACKUP=$1
225225
shift
226-
SPLITE_DIR=./tmp_split_bakcup
227-
SPLITE_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
226+
SPLIT_DIR=./tmp_split_backup
227+
SPLIT_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
228228
LOCAL_BASE_NAME=$(basename "${LOCAL_BACKUP}")
229229
POD_DIST_DIR=$(dirname "${POD_BACKUP}")
230230

@@ -252,15 +252,15 @@ kube_cp_from_local(){
252252

253253
STAT_CMD="$(get_stat_command) ${LOCAL_BACKUP}"
254254
LOCAL_SIZE=$(eval "${STAT_CMD}")
255-
if [ ${SPLITE_SIZE} -ne 0 -a ${LOCAL_SIZE} -gt ${SPLITE_SIZE} ] ; then
256-
rm -rf ${SPLITE_DIR}
257-
mkdir -p ${SPLITE_DIR}
258-
split -a 5 -b ${SPLITE_SIZE} ${LOCAL_BACKUP} ${SPLITE_DIR}/${LOCAL_BASE_NAME}.split.
259-
for splitfile in ${SPLITE_DIR}/*; do
255+
if [ ${SPLIT_SIZE} -ne 0 -a ${LOCAL_SIZE} -gt ${SPLIT_SIZE} ] ; then
256+
rm -rf ${SPLIT_DIR}
257+
mkdir -p ${SPLIT_DIR}
258+
split -a 5 -b ${SPLIT_SIZE} ${LOCAL_BACKUP} ${SPLIT_DIR}/${LOCAL_BASE_NAME}.split.
259+
for splitfile in ${SPLIT_DIR}/*; do
260260
FILE_BASE_NAME=$(basename "${splitfile}")
261261
_oc_cp "${splitfile}" "${POD}:${POD_DIST_DIR}/${FILE_BASE_NAME}" $@
262262
done
263-
rm -rf ${SPLITE_DIR}
263+
rm -rf ${SPLIT_DIR}
264264
run_cmd_in_pod ${POD} "cat ${POD_DIST_DIR}/${LOCAL_BASE_NAME}.split.* > ${POD_BACKUP} && rm -rf ${POD_DIST_DIR}/${LOCAL_BASE_NAME}.split.*" $@
265265
else
266266
_oc_cp "${LOCAL_BACKUP}" "${POD}:${POD_BACKUP}" $@
@@ -279,8 +279,8 @@ kube_cp_to_local(){
279279
shift
280280
POD_BACKUP=$1
281281
shift
282-
SPLITE_DIR=./tmp_split_bakcup
283-
SPLITE_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
282+
SPLIT_DIR=./tmp_split_backup
283+
SPLIT_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
284284
POD_DIST_DIR=$(dirname "${POD_BACKUP}")
285285

286286
if "${IS_RECURSIVE}" ; then
@@ -308,17 +308,17 @@ kube_cp_to_local(){
308308
fi
309309

310310
POD_SIZE=$(oc $@ exec ${POD} -- sh -c "stat -c "%s" ${POD_BACKUP}")
311-
if [ ${SPLITE_SIZE} -ne 0 -a ${POD_SIZE} -gt ${SPLITE_SIZE} ] ; then
312-
rm -rf ${SPLITE_DIR}
313-
mkdir -p ${SPLITE_DIR}
314-
run_cmd_in_pod ${POD} "split -d -a 5 -b ${SPLITE_SIZE} ${POD_BACKUP} ${POD_BACKUP}.split." $@
311+
if [ ${SPLIT_SIZE} -ne 0 -a ${POD_SIZE} -gt ${SPLIT_SIZE} ] ; then
312+
rm -rf ${SPLIT_DIR}
313+
mkdir -p ${SPLIT_DIR}
314+
run_cmd_in_pod ${POD} "split -d -a 5 -b ${SPLIT_SIZE} ${POD_BACKUP} ${POD_BACKUP}.split." $@
315315
FILE_LIST=$(oc exec $@ ${POD} -- sh -c "ls ${POD_BACKUP}.split.*")
316316
for splitfile in ${FILE_LIST} ; do
317317
FILE_BASE_NAME=$(basename "${splitfile}")
318-
_oc_cp "${POD}:${splitfile}" "${SPLITE_DIR}/${FILE_BASE_NAME}" $@
318+
_oc_cp "${POD}:${splitfile}" "${SPLIT_DIR}/${FILE_BASE_NAME}" $@
319319
done
320-
cat ${SPLITE_DIR}/* > ${LOCAL_BACKUP}
321-
rm -rf ${SPLITE_DIR}
320+
cat ${SPLIT_DIR}/* > ${LOCAL_BACKUP}
321+
rm -rf ${SPLIT_DIR}
322322
oc exec $@ ${POD} -- bash -c "rm -rf ${POD_BACKUP}.split.*"
323323
else
324324
_oc_cp "${POD}:${POD_BACKUP}" "${LOCAL_BACKUP}" $@
@@ -396,9 +396,9 @@ keep_minio_port_forward(){
396396
while [ -e ${TMP_WORK_DIR}/keep_minio_port_forward ]
397397
do
398398
if [ -n "${S3_NAMESPACE+UNDEF}" ] ; then
399-
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-foward.log" &
399+
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
400400
else
401-
oc ${OC_ARGS} port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-foward.log" &
401+
oc ${OC_ARGS} port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
402402
fi
403403
PORT_FORWARD_PID=$!
404404
while [ -e ${TMP_WORK_DIR}/keep_minio_port_forward ] && kill -0 ${PORT_FORWARD_PID} &> /dev/null
@@ -763,7 +763,7 @@ EOF
763763
files=$(fetch_cmd_result ${pod} "ls /tmp" $@)
764764
if echo "${files}" | grep "${WD_CMD_FAILED_TOKEN}" > /dev/null ; then
765765
oc exec $@ ${pod} -- bash -c "rm -f /tmp/${WD_CMD_FAILED_TOKEN}"
766-
brlog "ERROR" "Something error happned while running command in ${pod}. See ${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log for details."
766+
brlog "ERROR" "Something error happened while running command in ${pod}. See ${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log for details."
767767
exit 1
768768
fi
769769
}
@@ -1273,7 +1273,7 @@ create_restore_instance_mappings(){
12731273
brlog "ERROR" "Failed to create Discovery service instance for ${src_instances[$i]}"
12741274
return 1
12751275
else
1276-
brlog "INFO" "Created Disocvery service instance: ${instance_id}"
1276+
brlog "INFO" "Created Discovery service instance: ${instance_id}"
12771277
mapping=$(fetch_cmd_result ${ELASTIC_POD} "echo '${mapping}' | jq -r '.instance_mappings |= . + [{\"source_instance_id\": \"${src_instances[$i]}\", \"dest_instance_id\": \"${instance_id}\"}]'" -c elasticsearch)
12781278
fi
12791279
done

discovery-data/latest/version.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
The Backup and Restore Scripts for the Watson Discovery on CP4D.
2-
Scripts Version: 4.8.0
2+
Scripts Version: 4.8.2

0 commit comments

Comments
 (0)