diff --git a/docs/commands/backup.md b/docs/commands/backup.md new file mode 100644 index 00000000000..21cba28762e --- /dev/null +++ b/docs/commands/backup.md @@ -0,0 +1,301 @@ +Backup +=============================================================================== + +Usage +------------------------------------------------------------------------------- +Usage information can be obtained using `mas backup --help` + +``` +usage: mas backup [-i MAS_INSTANCE_ID] [--backup-version BACKUP_VERSION] [--backup-storage-size BACKUP_STORAGE_SIZE] + [--clean-backup] [--no-clean-backup] [--upload-backup] [--aws-access-key-id AWS_ACCESS_KEY_ID] + [--aws-secret-access-key AWS_SECRET_ACCESS_KEY] [--s3-bucket-name S3_BUCKET_NAME] [--s3-region S3_REGION] + [--artifactory-url ARTIFACTORY_URL] [--artifactory-repository ARTIFACTORY_REPOSITORY] + [--backup-manage-app] [--manage-workspace-id MANAGE_WORKSPACE_ID] [--backup-manage-db] + [--manage-db2-namespace MANAGE_DB2_NAMESPACE] [--manage-db2-instance-name MANAGE_DB2_INSTANCE_NAME] + [--manage-db2-backup-type {offline,online}] [--include-sls] [--exclude-sls] + [--mongodb-namespace MONGODB_NAMESPACE] [--mongodb-instance-name MONGODB_INSTANCE_NAME] + [--mongodb-provider {community}] [--sls-namespace SLS_NAMESPACE] [--cert-manager-provider {redhat,ibm}] + [--artifactory-username ARTIFACTORY_USERNAME] [--artifactory-token ARTIFACTORY_TOKEN] [--dev-mode] [--no-confirm] + [--skip-pre-check] [-h] + +IBM Maximo Application Suite Admin CLI v18.10.0 +Backup a MAS instance by configuring and launching the MAS Backup Tekton Pipeline. + +Interactive Mode: +Omitting the --instance-id option will trigger an interactive prompt + +MAS Instance: + -i, --instance-id MAS_INSTANCE_ID + MAS Instance ID to backup + +Backup Configuration: + --backup-version BACKUP_VERSION + Version/timestamp for the backup (auto-generated if not provided) + --backup-storage-size BACKUP_STORAGE_SIZE + Size of the backup PVC storage (default: 20Gi) + --clean-backup Clean backup and config workspaces after completion (default: true) + --no-clean-backup Do not clean backup and config workspaces after completion + +Upload Configuration: + --upload-backup Upload the backup archive after completion + --aws-access-key-id AWS_ACCESS_KEY_ID + AWS Access Key ID for S3 upload + --aws-secret-access-key AWS_SECRET_ACCESS_KEY + AWS Secret Access Key for S3 upload + --s3-bucket-name S3_BUCKET_NAME + S3 bucket name for backup upload + --s3-region S3_REGION + AWS region for S3 bucket + --artifactory-url ARTIFACTORY_URL + Artifactory URL for backup upload (dev-mode only) + --artifactory-repository ARTIFACTORY_REPOSITORY + Artifactory repository for backup upload (dev-mode only) + +Manage Application Backup: + --backup-manage-app Backup the Manage application + --manage-workspace-id MANAGE_WORKSPACE_ID + Manage workspace ID + --backup-manage-db Backup the Manage application database (Db2) + --manage-db2-namespace MANAGE_DB2_NAMESPACE + Manage Db2 namespace (default: db2u) + --manage-db2-instance-name MANAGE_DB2_INSTANCE_NAME + Manage Db2 instance name + --manage-db2-backup-type {offline,online} + Manage Db2 backup type: offline (database unavailable) or online (database remains available) + +Components: + --include-sls Include SLS in backup (default: true) + --exclude-sls Exclude SLS from backup (use if SLS is external) + +Dependencies Configuration: + --mongodb-namespace MONGODB_NAMESPACE + MongoDB namespace (default: mongoce) + --mongodb-instance-name MONGODB_INSTANCE_NAME + MongoDB instance name to backup (default: mas-mongo-ce) + --mongodb-provider {community} + MongoDB provider (only community is supported for backup) + --sls-namespace SLS_NAMESPACE + SLS namespace (default: ibm-sls) + --cert-manager-provider {redhat} + Certificate manager provider (default: redhat) + +More: + --artifactory-username ARTIFACTORY_USERNAME + Username for access to development builds on Artifactory (dev-mode only) + --artifactory-token ARTIFACTORY_TOKEN + API Token for access to development builds on Artifactory (dev-mode only) + --dev-mode Configure backup for development mode + --no-confirm Launch the backup without prompting for confirmation + --skip-pre-check Skips the 'pre-backup-check' task in the backup pipeline + -h, --help Show this help message and exit +``` + +Examples +------------------------------------------------------------------------------- + +### Interactive Backup +Launch an interactive backup session that will prompt you for all required configuration: + +```bash +mas backup +``` + +### Non-Interactive Backup with Minimal Configuration +Backup a specific MAS instance with default settings: + +```bash +mas backup --instance-id inst1 --no-confirm +``` + +### Backup with Custom Storage Size +Specify a custom storage size for the backup PVC: + +```bash +mas backup --instance-id inst1 --backup-storage-size 50Gi --no-confirm +``` + +### Backup with Custom Version +Set a custom backup version identifier: + +```bash +mas backup --instance-id inst1 --backup-version 1.0.0 --no-confirm +``` + +### Backup with S3 Upload +Create a backup and automatically upload it to an S3 bucket: + +```bash +mas backup \ + --instance-id inst1 \ + --upload-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name my-mas-backups \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Backup Excluding SLS +Create a backup without including Suite License Service (useful when SLS is external): + +```bash +mas backup --instance-id inst1 --exclude-sls --no-confirm +``` + +### Backup with Custom MongoDB Configuration +Specify custom MongoDB settings: + +```bash +mas backup \ + --instance-id inst1 \ + --mongodb-namespace my-mongo \ + --mongodb-instance-name my-mongo-instance \ + --mongodb-provider community \ + --no-confirm +``` + +### Backup with Custom SLS Configuration +Specify a custom SLS namespace: + +```bash +mas backup \ + --instance-id inst1 \ + --sls-namespace my-sls \ + --no-confirm +``` + + +### Backup Skipping Pre-Check +Skip the pre-backup validation check (use with caution): + +```bash +mas backup --instance-id inst1 --skip-pre-check --no-confirm +``` + +### Backup Without Workspace Cleanup +Keep backup and config workspace contents after completion (useful for troubleshooting): + +```bash +mas backup --instance-id inst1 --no-clean-backup --no-confirm +``` + +!!! note + By default, workspaces are cleaned after backup completion to free up storage. Use `--no-clean-backup` only when you need to inspect the workspace contents for troubleshooting purposes. + +### Complete Non-Interactive Backup Example +A comprehensive example with all major options configured: + +```bash +mas backup \ + --instance-id inst1 \ + --backup-version 20240315-prod \ + --backup-storage-size 100Gi \ + --include-sls \ + --mongodb-namespace mongoce \ + --mongodb-instance-name mas-mongo-ce \ + --mongodb-provider community \ + --sls-namespace ibm-sls \ + --cert-manager-provider redhat \ + --upload-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name my-mas-backups \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Backup with Manage Application +Backup MAS instance including the Manage application and its database: + +```bash +mas backup \ + --instance-id inst1 \ + --backup-manage-app \ + --manage-workspace-id masdev \ + --backup-manage-db \ + --manage-db2-namespace db2u \ + --manage-db2-instance-name mas-inst1-masdev-manage \ + --manage-db2-backup-type offline \ + --no-confirm +``` + +### Backup with Manage Application Only (No Database) +Backup the Manage application without backing up its database: + +```bash +mas backup \ + --instance-id inst1 \ + --backup-manage-app \ + --manage-workspace-id masdev \ + --no-confirm +``` + +Notes +------------------------------------------------------------------------------- + +### Backup Process +The backup command performs the following operations: + +1. **Validates the target cluster** - Ensures OpenShift cluster connectivity and checks for MAS instances +2. **Prepares the pipeline namespace** - Creates or updates the `mas-{instance-id}-pipelines` namespace +3. **Installs OpenShift Pipelines** - Validates or installs the OpenShift Pipelines Operator +4. **Creates backup PVC** - Provisions persistent storage for the backup data +5. **Launches the backup pipeline** - Submits a Tekton PipelineRun to perform the backup +6. **Optionally uploads the backup** - If configured, uploads the backup archive to S3 or Artifactory + +### Default Values +If not specified, the following defaults are used: + +- **Backup Storage Size**: `20Gi` +- **Backup Version**: Auto-generated timestamp in format `YYYYMMDD-HHMMSS` +- **Clean Workspaces**: `true` (workspaces are cleaned after completion) +- **MongoDB Namespace**: `mongoce` +- **MongoDB Instance Name**: `mas-mongo-ce` +- **MongoDB Provider**: `community` +- **SLS Namespace**: `ibm-sls` +- **Certificate Manager Provider**: `redhat` +- **Include SLS**: `true` + +### Storage Requirements +Ensure sufficient storage is available for the backup PVC. The required size depends on: + +- Size of MAS configuration data +- Size of MongoDB database +- Size of SLS data (if included) +- Any additional application data + +### Upload Destinations +Two upload destinations are supported: + +- **S3**: Standard AWS S3 bucket upload (available in all modes) +- **Artifactory**: Artifactory repository upload (requires `--dev-mode`) + +### Manage Application Backup +The backup command can optionally include the Manage application and its Db2 database: + +- **Manage Application**: Backs up the Manage namespace resources and persistent volume data +- **Manage Database**: Backs up the Db2 database associated with the Manage workspace + - **Offline backup**: Database is unavailable during backup (required for circular logging) + - **Online backup**: Database remains available during backup (requires archive logging) + +!!! note + If your Db2 instance uses circular logging (default), you must use offline backup type. + +### Interactive Mode +When running without `--instance-id`, the command enters interactive mode and will prompt for: + +1. Target OpenShift cluster connection +2. MAS instance selection (if multiple instances exist) +3. Backup storage size +4. Backup version (or auto-generate) +5. Workspace cleanup preference +6. Manage application backup configuration (optional) +7. Upload configuration (optional) + +#### Example Interactive Mode Output + +![backup-cmd](../img/backup-cmd.png) + + +### Pipeline Monitoring +After launching the backup, a URL to the Tekton PipelineRun will be displayed. Use this URL to monitor the backup progress in the OpenShift Console. \ No newline at end of file diff --git a/docs/commands/restore.md b/docs/commands/restore.md new file mode 100644 index 00000000000..2115a86cdee --- /dev/null +++ b/docs/commands/restore.md @@ -0,0 +1,506 @@ +Restore +=============================================================================== + +Usage +------------------------------------------------------------------------------- +Usage information can be obtained using `mas restore --help` + +``` +usage: mas restore [-i MAS_INSTANCE_ID] [--restore-version RESTORE_VERSION] [--backup-storage-size BACKUP_STORAGE_SIZE] + [--mas-domain-restore MAS_DOMAIN_ON_RESTORE] [--sls-url-restore SLS_URL_ON_RESTORE] + [--dro-url-restore DRO_URL_ON_RESTORE] [--include-slscfg-from-backup] [--exclude-slscfg-from-backup] + [--sls-cfg-file SLS_CFG_FILE] [--dro-cfg-file DRO_CFG_FILE] [--include-drocfg-from-backup] + [--exclude-drocfg-from-backup] [--clean-backup] [--no-clean-backup] [--download-backup] + [--aws-access-key-id AWS_ACCESS_KEY_ID] [--aws-secret-access-key AWS_SECRET_ACCESS_KEY] + [--s3-bucket-name S3_BUCKET_NAME] [--s3-region S3_REGION] [--artifactory-url ARTIFACTORY_URL] + [--artifactory-repository ARTIFACTORY_REPOSITORY] [--custom-backup-archive-name BACKUP_ARCHIVE_NAME] + [--include-grafana] [--exclude-grafana] [--include-dro] [--exclude-dro] [--include-sls] [--exclude-sls] + [--sls-domain SLS_DOMAIN] [--ibm-entitlement-key IBM_ENTITLEMENT_KEY] [--contact-email DRO_CONTACT_EMAIL] + [--contact-firstname DRO_CONTACT_FIRSTNAME] [--contact-lastname DRO_CONTACT_LASTNAME] + [--dro-namespace DRO_NAMESPACE] [--override-mongodb-storageclass] [--mongodb-storageclass-name MONGODB_STORAGECLASS_NAME] + [--restore-manage-app] [--restore-manage-db] [--override-manage-app-storageclass] + [--manage-app-storage-class-rwx MANAGE_APP_STORAGE_CLASS_RWX] [--manage-app-storage-class-rwo MANAGE_APP_STORAGE_CLASS_RWO] + [--override-manage-db-storageclass] + [--manage-db-storage-class-rwx MANAGE_DB_STORAGE_CLASS_RWX] [--manage-db-storage-class-rwo MANAGE_DB_STORAGE_CLASS_RWO] + [--artifactory-username ARTIFACTORY_USERNAME] [--artifactory-token ARTIFACTORY_TOKEN] [--dev-mode] [--no-confirm] [--skip-pre-check] [-h] + +IBM Maximo Application Suite Admin CLI +Restore a MAS instance from backup by configuring and launching the MAS Restore Tekton Pipeline. + +Interactive Mode: +Omitting the --instance-id option will trigger an interactive prompt + +MAS Instance: + -i, --instance-id MAS_INSTANCE_ID + MAS Instance ID to restore, must match the instance ID of the backup. + --mas-domain-restore MAS_DOMAIN_ON_RESTORE + MAS Domain to restore. If not specified, the domain will be taken from the backup. + --sls-url-restore SLS_URL_ON_RESTORE + SLS URL to restore in Suite configuration. If not specified, the url will be taken from the backup. + --dro-url-restore DRO_URL_ON_RESTORE + DRO URL to restore in Suite configuration. If not specified, the url will be taken from the backup. + --include-slscfg-from-backup + Use SLS config from backup during Suite restore. + --exclude-slscfg-from-backup + Exclude SLS config from backup during Suite restore. + --sls-cfg-file SLS_CFG_FILE + SLS config file path to restore, must be provided if own SLS is used. + --dro-cfg-file DRO_CFG_FILE + DRO config file path to restore, must be provided if own DRO is used. + --include-drocfg-from-backup + Include DRO config from backup during Suite restore. + --exclude-drocfg-from-backup + Exclude DRO config from backup during Suite restore. + +Restore Configuration: + --restore-version RESTORE_VERSION + Version/timestamp used in backup. Example: YYYYMMDD-HHMMSS + --backup-storage-size BACKUP_STORAGE_SIZE + Size of the PVC storage, must be bigger than backup archive size. (default: 20Gi) + --clean-backup Clean backup and config workspaces after completion (default: true) + --no-clean-backup Do not clean backup and config workspaces after completion + +Download Configuration: + --download-backup Download the backup archive from S3 or Artifactory + --aws-access-key-id AWS_ACCESS_KEY_ID + AWS Access Key ID for S3 download + --aws-secret-access-key AWS_SECRET_ACCESS_KEY + AWS Secret Access Key for S3 download + --s3-bucket-name S3_BUCKET_NAME + S3 bucket name for backup download + --s3-region S3_REGION + AWS region for S3 bucket + --artifactory-url ARTIFACTORY_URL + Artifactory URL for backup download + --artifactory-repository ARTIFACTORY_REPOSITORY + Artifactory repository for backup download + --custom-backup-archive-name BACKUP_ARCHIVE_NAME + Custom backup archive name to download from S3 or Artifactory + +Components: + --include-grafana Include Grafana in restore (default: true) + --exclude-grafana Skip installing Grafana. + --include-dro Include DRO in restore, this will install new DRO instance (default: true) + --exclude-dro Skip installing DRO. + --include-sls Include SLS in restore (default: true) + --exclude-sls Exclude SLS from restore (use if SLS is external) + +IBM Suite License Service Operator: + --sls-domain SLS_DOMAIN + SLS domain to use during SLS instance restore (optional). + +IBM Data Reporting Operator: + --ibm-entitlement-key IBM_ENTITLEMENT_KEY + IBM entitlement key + --contact-email DRO_CONTACT_EMAIL + Contact e-mail address + --contact-firstname DRO_CONTACT_FIRSTNAME + Contact first name + --contact-lastname DRO_CONTACT_LASTNAME + Contact last name + --dro-namespace DRO_NAMESPACE + Namespace for DRO + +MongoDB Storage Class Override: + --override-mongodb-storageclass + Override the storage class for MongoDB during restore + --mongodb-storageclass-name MONGODB_STORAGECLASS_NAME + MongoDB storage class name (ReadWriteOnce). If not specified, cluster default will be used. + +Manage Application Restore: + --restore-manage-app Restore the Manage application including namespace resources and persistent volume data + --restore-manage-db Restore the Manage incluster Db2 database + --override-manage-app-storageclass + Override storage class for Manage application persistent volumes + --manage-app-storage-class-rwx MANAGE_APP_STORAGE_CLASS_RWX + Manage Application ReadWriteMany storage class name + --manage-app-storage-class-rwo MANAGE_APP_STORAGE_CLASS_RWO + Manage Application ReadWriteOnce storage class name + --override-manage-db-storageclass + Override storage class for Manage Db2 database persistent volumes + --manage-db-storage-class-rwx MANAGE_DB_STORAGE_CLASS_RWX + Db2 ReadWriteMany storage class name + --manage-db-storage-class-rwo MANAGE_DB_STORAGE_CLASS_RWO + Db2 ReadWriteOnce storage class name + +More: + --artifactory-username ARTIFACTORY_USERNAME + Username for access to development builds on Artifactory + --artifactory-token ARTIFACTORY_TOKEN + API Token for access to development builds on Artifactory + --dev-mode Configure restore in development mode + --no-confirm Launch the backup without prompting for confirmation + --skip-pre-check Skips the 'pre-restore-check' task in the restore pipeline + -h, --help Show this help message and exit +``` + +Examples +------------------------------------------------------------------------------- + +### Interactive Restore +Launch an interactive restore session that will prompt you for all required configuration: + +```bash +mas restore +``` + +### Non-Interactive Restore with Minimal Configuration +Restore a specific MAS instance from a backup with default settings: + +```bash +mas restore --instance-id inst1 --restore-version 20260117-191701 --no-confirm +``` + +### Restore with Custom Storage Size +Specify a custom storage size for the restore PVC: + +```bash +mas restore --instance-id inst1 --restore-version 20260117-191701 --backup-storage-size 50Gi --no-confirm +``` + +### Restore with Changed MAS Domain +Restore a backup and change the MAS domain in the Suite CR: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --mas-domain-restore new.domain.com \ + --no-confirm +``` + +### Restore with S3 Download +Download a backup from S3 and restore it: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --download-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name my-mas-backups \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Restore with Custom Backup Archive Name +Download and restore a backup with a custom archive name: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --download-backup \ + --custom-backup-archive-name custom-backup-name.tar.gz \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name my-mas-backups \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Restore Excluding SLS +Restore a backup without including Suite License Service (useful when using external SLS): + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-sls \ + --no-confirm +``` + +### Restore with Custom SLS Configuration File +Restore using a custom SLS configuration file instead of the one from backup: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-slscfg-from-backup \ + --sls-cfg-file /path/to/sls-config.yaml \ + --no-confirm +``` + +### Restore with Changed SLS URL +Restore SLS configuration from backup but change the SLS URL: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --include-slscfg-from-backup \ + --sls-url-restore https://new-sls-url.com \ + --no-confirm +``` + +### Restore with Custom DRO Configuration File +Restore using a custom DRO/BAS configuration file instead of the one from backup: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-drocfg-from-backup \ + --dro-cfg-file /path/to/dro-config.yaml \ + --no-confirm +``` + +### Restore with DRO Installation +Restore and install a new DRO instance: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --include-dro \ + --ibm-entitlement-key YOUR_ENTITLEMENT_KEY \ #pragma: allowlist secret + --contact-email admin@example.com \ + --contact-firstname John \ + --contact-lastname Doe \ + --dro-namespace redhat-marketplace \ + --no-confirm +``` + +### Restore with Custom SLS Domain +Restore SLS instance with a custom domain: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --include-sls \ + --sls-domain custom-sls.domain.com \ + --no-confirm +``` + +### Restore Excluding Grafana +Restore without installing Grafana: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-grafana \ + --no-confirm +``` + +### Restore with MongoDB Storage Class Override +Override the storage class for MongoDB during restore (useful when restoring to a cluster with different storage classes): + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --override-mongodb-storageclass \ + --mongodb-storageclass-name custom-rwo-storage \ + --no-confirm +``` + +### Restore with Manage Application +Restore the Manage application including namespace resources and persistent volume data: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --no-confirm +``` + +### Restore with Manage Application and Database +Restore both the Manage application and its incluster Db2 database: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --restore-manage-db \ + --no-confirm +``` + +### Restore Manage with Custom Storage Classes +Restore Manage application and database with custom storage class overrides: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --restore-manage-db \ + --override-manage-app-storageclass \ + --manage-app-storage-class-rwx custom-rwx-storage \ + --manage-app-storage-class-rwo custom-rwo-storage \ + --override-manage-db-storageclass \ + --manage-db-storage-class-rwx custom-rwx-storage \ + --manage-db-storage-class-rwo custom-rwo-storage \ + --no-confirm +``` + +### Restore Skipping Pre-Check +Skip the pre-restore validation check (use with caution): + +```bash +mas restore --instance-id inst1 --restore-version 20260117-191701 --skip-pre-check --no-confirm +``` + +### Restore Without Workspace Cleanup +Keep backup and config workspace contents after completion (useful for troubleshooting): + +```bash +mas restore --instance-id inst1 --restore-version 20260117-191701 --no-clean-backup --no-confirm +``` + +!!! note + By default, workspaces are cleaned after restore completion to free up storage. Use `--no-clean-backup` only when you need to inspect the workspace contents for troubleshooting purposes. + +### Complete Non-Interactive Restore Example +A comprehensive example with all major options configured: + +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --backup-storage-size 100Gi \ + --mas-domain-restore new.domain.com \ + --include-sls \ + --sls-domain custom-sls.domain.com \ + --include-slscfg-from-backup \ + --sls-url-restore https://new-sls-url.com \ + --include-grafana \ + --include-dro \ + --ibm-entitlement-key YOUR_ENTITLEMENT_KEY \ #pragma: allowlist secret + --contact-email admin@example.com \ + --contact-firstname John \ + --contact-lastname Doe \ + --dro-namespace redhat-marketplace \ + --download-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name my-mas-backups \ + --s3-region us-east-1 \ + --no-confirm +``` + +Notes +------------------------------------------------------------------------------- + +### Restore Process +The restore command performs the following operations: + +1. **Validates the target cluster** - Ensures OpenShift cluster connectivity +2. **Prepares the pipeline namespace** - Creates or updates the `mas-{instance-id}-pipelines` namespace +3. **Installs OpenShift Pipelines** - Validates or installs the OpenShift Pipelines Operator +4. **Creates backup PVC** - Provisions persistent storage for the backup data +5. **Downloads backup archive** - If configured, downloads the backup from S3 or Artifactory +6. **Launches the restore pipeline** - Submits a Tekton PipelineRun to perform the restore +7. **Restores MAS components** - Restores MAS instance, configurations, and selected components + +### Default Values +If not specified, the following defaults are used: + +- **Backup Storage Size**: `20Gi` +- **Clean Workspaces**: `true` (workspaces are cleaned after completion) +- **Include SLS**: `true` +- **Include Grafana**: `true` +- **Include DRO**: `true` +- **Include SLSCfg from Backup**: `true` +- **Include DROCfg from Backup**: `true` + +### Storage Requirements +Ensure sufficient storage is available for the restore PVC. The required size must be larger than the backup archive size and depends on: + +- Size of the backup archive +- Temporary extraction space needed during restore +- Any additional workspace requirements + +### Download Sources +Two download sources are supported: + +- **S3**: Standard AWS S3 bucket download (available in all modes) +- **Artifactory**: Artifactory repository download (requires `--dev-mode`) + +### Configuration Options +The restore command provides flexibility in how configurations are restored: + +#### SLS Configuration +- **From Backup**: Use `--include-slscfg-from-backup` (default) to restore SLS configuration from the backup +- **Custom File**: Use `--exclude-slscfg-from-backup` and provide `--sls-cfg-file` to use a custom configuration +- **Change URL**: Use `--sls-url-restore` to modify the SLS URL while keeping other configuration from backup + +#### DRO Configuration +- **From Backup**: Use `--include-drocfg-from-backup` (default) to restore DRO configuration from the backup +- **Custom File**: Use `--exclude-drocfg-from-backup` and provide `--dro-cfg-file` to use a custom configuration +- **Change URL**: Use `--dro-url-restore` to modify the DRO URL while keeping other configuration from backup +- **READ**: When `include-dro` is enabled to install a new DRO instance, the `--include-drocfg-from-backup` flag is automatically set to `false` to prevent conflicts. Configuration of the new DRO instance will be automatically applied to the Suite. + +#### MAS Domain +- By default, the MAS domain is restored from the backup +- Use `--mas-domain-restore` to change the domain during restore + +### Component Installation +The restore process can optionally install components that are not part of the backup: + +- **Grafana**: Monitoring and visualization (not backed up, can be installed during restore). Use `--include-grafana` to install grafana during restore or `--exclude-grafana` to skip grafana installation. +- **DRO**: Data Reporting Operator (not backed up, can be installed during restore). Use `--include-dro` to install DRO during restore or `--exclude-dro` to skip DRO installation. +- **SLS**: Suite License Service (backed up, can be restored or skipped if using external SLS). Use `--include-sls` to restore SLS from backup or `--exclude-sls` to skip SLS installation. + +### MongoDB Storage Class Override +When restoring to a cluster with different storage classes than the original backup, you can override the MongoDB storage class: + +- Use `--override-mongodb-storageclass` to enable storage class override +- Optionally specify `--mongodb-storageclass-name` for a custom ReadWriteOnce storage class +- If no custom storage class is specified, the cluster default will be used + +This is particularly useful for: +- Migrating between different cloud providers +- Restoring to clusters with different storage infrastructure +- Testing restores in different environments + +### Manage Application Restore +The restore process can now restore the Manage application in addition to the MAS Suite: + +- **Manage Application**: Use `--restore-manage-app` to restore Manage namespace resources and persistent volume data +- **Manage Database**: Use `--restore-manage-db` to restore the incluster Db2 database associated with the Manage workspace +- **Storage Class Overrides**: + - Use `--override-manage-app-storageclass` to override Manage application storage classes, then specify `--manage-app-storage-class-rwx` and `--manage-app-storage-class-rwo` + - Use `--override-manage-db-storageclass` to override Db2 database storage classes, then specify `--manage-db-storage-class-rwx` and `--manage-db-storage-class-rwo` + +!!! note + - Manage database restore is an offline operation - the Manage application will be unavailable during the restore + - The restore process handles both the application resources and the database data + - Storage class overrides are useful when restoring to clusters with different storage infrastructure + - A single RWX and RWO storage class is applied across all Db2 persistent volumes (meta, data, backup, logs, temp) + +### Interactive Mode +When running without `--instance-id`, the command enters interactive mode and will prompt for: + +1. Target OpenShift cluster connection +2. MAS instance ID (must match the backup) +3. Backup version to restore +4. MongoDB storage class override configuration +5. Grafana installation preference +6. SLS installation and configuration +7. DRO installation and configuration +8. MAS domain configuration +9. Manage application restore configuration +10. Backup storage size +11. Download configuration (optional) + +### Pipeline Monitoring +After launching the restore, a URL to the Tekton PipelineRun will be displayed. Use this URL to monitor the restore progress in the OpenShift Console. + +### Important Considerations + +!!! warning + - The MAS instance ID used for restore must match the instance ID from the backup + - Ensure the target cluster has sufficient resources for the restored instance + - Review and update configuration URLs (SLS, DRO) if the target environment differs from the backup source + - If using external SLS or DRO, provide appropriate configuration files + +!!! tip + - Use `--skip-pre-check` only if you're confident about the cluster state + - Keep `--no-clean-backup` disabled unless troubleshooting to save storage space + - When changing domains or URLs, ensure DNS and network configurations are updated accordingly \ No newline at end of file diff --git a/docs/guides/backup-restore.md b/docs/guides/backup-restore.md new file mode 100644 index 00000000000..9ec2bd7e5e1 --- /dev/null +++ b/docs/guides/backup-restore.md @@ -0,0 +1,1378 @@ +Backup and Restore +=============================================================================== + +This guide provides comprehensive information on backing up and restoring IBM Maximo Application Suite (MAS) instances. The backup process captures critical configuration data, MongoDB databases, Suite License Service (SLS) data, and certificate manager configurations to enable disaster recovery scenarios. + +!!! tip + This guide covers both **backup and restore operations** for IBM Maximo Application Suite instances. + +!!! warning + Before you begin + + Be aware of the following versioning considerations for the MAS CLI releases: + + The MAS backup and restore in CLI release v19.0.0 and later contains process, backup archive file and directory changes that are not backward compatible with earlier backup and restore versions. + + Run the backup processes using v19.0.0 or later to ensure that you can successfully run a restore. You cannot run a restore process using v19.0.0 or later from back ups created on an older version. + +**Supported MAS versions** + - MAS 9.1.x + - MAS 9.0.x (in testing) + +**User Permissions Required** + - `oc` CLI with cluster admin permissions + - `mas` CLI with appropriate permissions + - Access to Tekton pipeline resources + +**Quick Navigation:** + - [Backup Overview](#backup-overview) - Information about backing up MAS instances + - [Restore Overview](#restore-overview) - Information about restoring MAS instances + + +Backup Overview +------------------------------------------------------------------------------- + +The MAS backup process uses Tekton pipelines to orchestrate the backup of multiple components. The Tekton pipeline executes [Ansible DevOps Collection](https://ibm-mas.github.io/ansible-devops/) roles to perform the actual backup operations. + +### Backup Components + +- **IBM Operator Catalogs** - Catalog source definitions +- **Certificate Manager** - Certificate configurations (RedHat only) +- **MongoDB** - MAS configuration database (Community Edition only) +- **Suite License Service (SLS)** - License server data (optional) +- **MAS Suite Configuration** - Core MAS instance configuration and custom resources +- **MAS Applications** - Application-specific resources and persistent volume data (optional) +- **Db2 Database** - Db2 instance resources and database backups (optional) + +The backup creates a compressed archive for each supported component that can be stored locally or uploaded to cloud storage (S3 or Artifactory). + +### Backup Limitations + +!!! warning + Be aware of the following limitations before performing a backup: + +- **MongoDB Community Edition only** - The backup process supports only in-cluster MongoDB Community Edition. External or enterprise MongoDB deployments are not backed up. +- **Db2 standalone operator only** - The backup process supports only the in-cluster standalone Db2 operator. Other Db2 operator implementations are not included. +- **Certificate Manager (RedHat only)** - Certificate Manager backup is supported only for RedHat Certificate Manager. Other certificate manager implementations are not included. +- **No support for some apps** - Only Manage application is supported for now. Other MAS applications (Facilities, Monitor, IoT, Predict, etc.) are not supported, but will be added in later releases. +- **No OpenShift cluster state** - The backup does not capture the full OpenShift cluster state, node configurations, or cluster-level resources outside of MAS namespaces. +- **No IBM Cloud Pak for Data backups** - The backup process does not support backing up CP4D itself. +- **No Incremental backups** - Each backup is a full backup; incremental or differential backups are not supported. +- **Single MAS instance per backup** - Each backup operation targets a single MAS instance. Multi-instance environments require separate backup runs per instance. +- **Tekton pipeline dependency** - The backup process requires Tekton pipelines to be available and functional on the cluster. +- **Storage class dependency** - Backup of Manage application's persistent volumes depends on the storage class supporting volume snapshots or the relevant backup mechanism. +- **S3/Artifactory upload is optional** - Without configuring cloud storage upload, backups are stored locally in the cluster and may be lost if the cluster is decommissioned. +- **Download backup archives to local machine manually** - The backup archives are stored in the cluster's pvc or uploaded to S3/Artifactory and must be downloaded to a local machine manually. + +!!! tip + We are working on reducing the limitations of the backup process and will be adding new capabilties and support for other MAS applications in future releases. + +### Ansible DevOps Integration + +The `mas backup` command launches a Tekton pipeline that executes the following Ansible roles from the [IBM MAS DevOps Collection](https://ibm-mas.github.io/ansible-devops/): + +- [`ibm.mas_devops.ibm_catalogs`](https://ibm-mas.github.io/ansible-devops/roles/ibm_catalogs/) - Backs up IBM Operator Catalog definitions +- [`ibm.mas_devops.cert_manager`](https://ibm-mas.github.io/ansible-devops/roles/cert_manager/) - Backs up Certificate Manager configurations +- [`ibm.mas_devops.mongodb`](https://ibm-mas.github.io/ansible-devops/roles/mongodb/) - Backs up MongoDB Community Edition instance and database +- [`ibm.mas_devops.sls`](https://ibm-mas.github.io/ansible-devops/roles/sls/) - Backs up Suite License Service data +- [`ibm.mas_devops.suite_backup`](https://ibm-mas.github.io/ansible-devops/roles/suite_backup/) - Backs up MAS Core configuration +- [`ibm.mas_devops.db2`](https://ibm-mas.github.io/ansible-devops/roles/db2/) - Backs up DB2 resources and persistent volume data +- [`ibm.mas_devops.suite_app_backup`](https://ibm-mas.github.io/ansible-devops/roles/suite_app_backup/) - Backs up MAS application resources and persistent volume data + +For detailed information about the underlying Ansible automation, see the [Backup and Restore Playbook Documentation](https://ibm-mas.github.io/ansible-devops/playbooks/backup-restore/). + +!!! tip + Advanced users can use the Ansible roles directly for custom backup workflows. The CLI provides a managed, simplified interface to these roles with additional features like automatic pipeline setup and cloud upload capabilities. + +### Backup Artifacts + +Backups are stored in the pipeline namespace PVC at: + +- **Backup Directory**: `/workspace/backups` + +When S3/artifactory upload is enabled, the backup archives will be uploaded to the bucket/artifactory repo under `mas--backups` directory. + +**S3 Backup Archive Directory Structure:** + +``` +s3://bucket-name/ (or Artfactory - https://na.artifactory.swg-devops.com/artifactory/repo-name/) +├── mas--backups/ + ├── mas--backup--catalog.tar.gz + ├── mas--backup--certmanager.tar.gz + ├── mas--backup--db2u-manage.tar.gz + ├── mas--backup--mongoce.tar.gz + ├── mas--backup--sls.tar.gz + └── mas--backup--suite.tar.gz + ├── mas--backup--app-manage.tar.gz +``` + +Each backup archive follows the naming convention: `-backup--.tar.gz` + +**Archive Components:** + +| Archive | Description | +|---------|-------------| +| `catalog.tar.gz` | IBM Operator Catalog configurations | +| `certmanager.tar.gz` | Certificate Manager configurations | +| `mongoce.tar.gz` | MongoDB Community Edition database backup | +| `sls.tar.gz` | Suite License Service data (if included) | +| `suite.tar.gz` | MAS Core configuration and data | +| `db2u-manage.tar.gz` | Manage Db2 database backup (if included) | +| `app-manage.tar.gz` | Manage application configuration (if included) | + +When to Backup +------------------------------------------------------------------------------- + +### Regular Backup Schedule +Establish a regular backup schedule based on your organization's requirements: + +- **Before major upgrades** - Always backup before upgrading MAS or its dependencies +- **After configuration changes** - Backup after significant configuration modifications +- **Regular intervals** - Weekly or monthly backups for disaster recovery +- **Before cluster maintenance** - Backup before OpenShift cluster maintenance windows + +### Migration Scenarios +Backups are essential for: + +- **Cluster migration** - Moving MAS from one OpenShift cluster to another +- **Disaster recovery** - Recovering from cluster failures or data corruption +- **Environment cloning** - Creating test/dev environments from production backups +- **Version rollback** - Reverting to a previous configuration state + + +Component Selection +------------------------------------------------------------------------------- + +### Including SLS in Backups + +**Include SLS (`--include-sls` or default behavior)** when: + +- SLS is deployed **in-cluster** in the same OpenShift environment as MAS +- You are using the standard MAS installation with bundled SLS +- The SLS namespace is accessible from your backup environment +- You want a complete, self-contained backup for disaster recovery + +**Exclude SLS (`--exclude-sls`)** when: + +- SLS is deployed **externally** in a separate cluster or environment +- You are using a shared SLS instance across multiple MAS installations +- SLS is managed by a different team or organization +- The SLS namespace is not accessible from your backup environment +- You only need to backup MAS-specific configuration + +!!! note + The default behavior is to **include SLS** in backups. You must explicitly use `--exclude-sls` to skip SLS backup. + +### Data Reporter Operator (DRO) + +The Data Reporter Operator (DRO) is **not included in backup operations** as it is typically configured during restore or installation. DRO configuration is handled separately and can be: + +- **Installed during restore** - DRO will be installed when restoring from a backup when `--include-dro` is specified +- **Configured externally** - If using an external DRO instance, it should be configured independently +- **Skipped** - DRO installation can be skipped during restore if not required, use `--exclude-dro` to skip DRO installation + +!!! info + DRO backup and restore behavior is managed by the underlying [Ansible DevOps roles](https://ibm-mas.github.io/ansible-devops/playbooks/backup-restore/). The CLI backup command focuses on capturing MAS configuration and data, while DRO is handled during the restore process. + +### MongoDB Configuration + +The backup process supports **MongoDB Community Edition only**. Ensure you specify the correct MongoDB configuration: + +- **Namespace** - Where MongoDB is deployed (default: `mongoce`) +- **Instance Name** - MongoDB instance identifier (default: `mas-mongo-ce`) +- **Provider** - Must be `community` (only supported provider for backup) + +!!! warning + IBM Cloud Databases for MongoDB and other external MongoDB providers are not supported by the backup process. You must use their native backup mechanisms. + +### Certificate Manager + +Specify the certificate manager provider used in your environment: + +- **Red Hat Certificate Manager** (`--cert-manager-provider redhat`) - Default option, and the only supported provider. + +The backup captures certificate configurations but not the actual certificates, which are regenerated during restore. + +### MAS Application Backup + +The backup process supports backing up MAS application resources and persistent volume data. Currently supported: + +- **Manage Application** - Backs up Manage namespace resources and persistent volume data + +When backing up a Manage application, the following resources are included: + +**Namespace Resources**: +- `ManageApp` custom resource +- `ManageWorkspace` custom resource +- Encryption secrets (dynamically determined from ManageWorkspace CR) +- Certificates with `mas.ibm.com/instanceId` label +- Subscription and OperatorGroup +- IBM entitlement secret +- All referenced secrets (auto-discovered) + +**Persistent Volume Data** (if configured in ManageWorkspace CR): +- All persistent volumes defined in `spec.settings.deployment.persistentVolumes` +- Data backed up as compressed tar.gz archives +- Each PVC's mount path archived separately +- Common PVCs include JMS server data, custom fonts, and attachments + +!!! note + Application backup is optional and configured during the interactive backup process or via command-line parameters (`--backup-manage-app`, `--manage-workspace-id`). + +### Db2 Database Backup + +The backup process supports backing up Db2 databases used by MAS applications. When backing up a Db2 database, the following are included: + +**Db2 Instance Resources**: +- `Db2uCluster` custom resource +- Secrets (instance password, certificates, LDAP credentials) +- ConfigMaps +- Services and routes +- Operator subscription + +**Database Data**: +- Complete database backup (full backup) +- Stored in the backup archive alongside other components +- Supports both online and offline backup modes + +**Backup Types**: + +- **Online Backup** - Database remains accessible during backup; requires archive logging enabled +- **Offline Backup** - Database unavailable during backup; works with circular logging (default configuration) + +!!! warning + If your Db2 instance uses circular logging (the default configuration), you **must** use offline backup type. Online backups require archive logging to be enabled via `LOGARCHMETH1` and `LOGARCHMETH2` configuration. + +!!! note + Db2 backup is optional and configured during the interactive backup process or via command-line parameters (`--backup-manage-db`, `--manage-db2-namespace`, `--manage-db2-instance-name`, `--manage-db2-backup-type`). + + +Backup Modes +------------------------------------------------------------------------------- + +### Interactive Mode + +Interactive mode guides you through the backup process with prompts for all required configuration. This is the recommended approach for manual backups. + +```bash +docker run -ti --rm quay.io/ibmmas/cli mas backup +``` + +The interactive session will: + +1. Prompt for OpenShift cluster connection +2. Display detected MAS instances +3. Request backup storage size +4. Offer auto-generated or custom backup version +5. Configure optional upload to S3 or Artifactory + +### Non-Interactive Mode + +Non-interactive mode is ideal for automation, scheduled backups, and CI/CD pipelines. All required parameters must be provided via command-line arguments. + +```bash +docker run -ti --rm quay.io/ibmmas/cli mas backup \ + --instance-id inst1 \ + --no-confirm +``` + + +Backup Scenarios - Non-Interactive Mode +------------------------------------------------------------------------------- + +### Scenario 1: Standard In-Cluster Deployment + +**Environment:** +- MAS with all dependencies in a single OpenShift cluster +- MongoDB Community Edition +- In-cluster SLS +- Red Hat Certificate Manager + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-storage-size 50Gi \ + --no-confirm +``` + +This uses all default values and includes SLS in the backup. + +### Scenario 2: External SLS Deployment + +**Environment:** +- MAS in OpenShift cluster +- MongoDB Community Edition in-cluster +- SLS deployed in separate cluster or external environment +- Red Hat Certificate Manager + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-storage-size 30Gi \ + --exclude-sls \ + --no-confirm +``` + +Use `--exclude-sls` to skip backing up SLS when it's managed externally. + +### Scenario 3: Custom MongoDB Configuration and backup version + +**Environment:** +- MAS with custom MongoDB namespace +- Custom backup version desired +- Custom MongoDB instance name +- In-cluster SLS +- Red Hat Certificate Manager + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-version prod-backup-$(date +%Y%m%d) \ + --backup-storage-size 50Gi \ + --mongodb-namespace my-mongodb \ + --mongodb-instance-name custom-mongo-instance \ + --mongodb-provider community \ + --no-confirm +``` + +### Scenario 4: Backup with S3 Upload + +**Environment:** +- Standard MAS deployment +- Custom backup version desired +- Automatic upload to AWS S3 for off-site storage + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-version prod-$(date +%Y%m%d-%H%M%S) \ + --backup-storage-size 50Gi \ + --upload-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name mas-backups-prod \ + --s3-region us-east-1 \ + --no-confirm +``` + +!!! tip + Store AWS credentials securely using environment variables or secrets management systems rather than hardcoding them in scripts. + +### Scenario 5: Backup with Manage Application and Db2 Database + +**Environment:** +- Standard MAS deployment with Manage application +- Manage workspace with persistent volumes configured +- In-cluster Db2 database for Manage +- Need to backup application resources, PV data, and database + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-storage-size 100Gi \ + --backup-manage-app \ + --manage-workspace-id masdev \ + --backup-manage-db \ + --manage-db2-namespace db2u \ + --manage-db2-instance-name mas-inst1-masdev-manage \ + --manage-db2-backup-type offline \ + --no-confirm +``` + +!!! tip + When backing up Manage with Db2, ensure sufficient backup storage (100Gi+ recommended) to accommodate application PV data and database backups. Use offline backup type if your Db2 instance uses the default circular logging configuration. + +### Scenario 6: Backup with Manage Application Only (External Db2) + +**Environment:** +- MAS deployment with Manage application +- External Db2 database (managed separately) +- Only need to backup application resources and PV data + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-storage-size 50Gi \ + --backup-manage-app \ + --manage-workspace-id masdev \ + --no-confirm +``` + +!!! note + When using an external Db2 database, omit the `--backup-manage-db` flag. The database should be backed up separately using your organization's database backup procedures. + +### Scenario 7: Backup for Troubleshooting (No Cleanup) + +**Environment:** +- Backup for troubleshooting purposes +- Custom backup version desired +- Need to inspect workspace contents after backup +- Workspace cleanup disabled + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-version debug-$(date +%Y%m%d-%H%M%S) \ + --backup-storage-size 50Gi \ + --no-clean-backup \ + --no-confirm +``` + +!!! note + Use `--no-clean-backup` when you need to inspect the backup workspace contents for troubleshooting. Remember to manually clean up the workspaces later to free up storage. + +### Scenario 8: Minimal Backup (Skip Pre-Check) + +**Environment:** +- Emergency backup scenario +- Custom backup version desired +- Skip pre-backup validation for speed, and when the cluster is not 100% healthy + +**Backup Command:** +```bash +mas backup \ + --instance-id inst1 \ + --backup-version emergency-$(date +%Y%m%d-%H%M%S) \ + --backup-storage-size 50Gi \ + --skip-pre-check \ + --no-confirm +``` + +!!! warning + Use `--skip-pre-check` only in emergency situations. Pre-backup checks validate cluster health and can prevent incomplete backups. + + +Storage Requirements +------------------------------------------------------------------------------- + +### Backup Storage Sizing + +The backup storage size depends on several factors: + +| Component | Typical Size | Notes | +|-----------|-------------|-------| +| MAS Configuration | < 1 MB | Core MAS custom resources and configurations | +| MongoDB Database | 0.05-20 GB | Varies based on MAS app count and data volume | +| SLS Data | < 1 MB | License server database and configuration | +| IBM Catalogs | < 1 MB | Operator catalog definitions | +| Certificate Manager | < 1 MB | Certificate configurations | +| Manage App Resources | < 10 MB | Manage namespace Kubernetes resources | +| Manage PV Data | 1-100 GB | JMS server, fonts, attachments (if configured) | +| Db2 Instance Resources | < 10 MB | Db2 Kubernetes resources and metadata | +| Db2 Database Backup | Varies | 0.5-2x database size when compressed; depends on data volume | + +!!! tip + Monitor your first backup to determine actual storage requirements, then adjust the `--backup-storage-size` parameter for future backups. When backing up Manage with Db2, plan for significantly larger storage requirements (100GB+ recommended). + +### Storage Class Considerations + +The backup process automatically selects appropriate storage: + +- **Single Node OpenShift (SNO)**: Uses ReadWriteOnce (RWO) storage +- **Multi-node clusters**: Prefers ReadWriteMany (RWX) storage when available +- Falls back to RWO if RWX is not available + +The storage class is determined from your cluster's default storage classes. + + +Backup Process Details +------------------------------------------------------------------------------- + +### Pipeline Execution + +When you run `mas backup`, the following occurs: + +1. **Validation** - Verifies cluster connectivity and MAS instance existence +2. **Namespace Preparation** - Creates/updates `mas-{instance-id}-pipelines` namespace +3. **OpenShift Pipelines** - Validates or installs OpenShift Pipelines Operator +4. **PVC Creation** - Provisions persistent volume for backup storage +5. **Tekton Pipeline Launch** - Submits PipelineRun with configured parameters +6. **Component Backup** - Executes backup tasks in parallel where possible: + - IBM Catalogs backup + - Certificate Manager backup + - MongoDB backup + - SLS backup (if included) +7. **Suite Backup** - Backs up MAS core configuration +8. **Database Backup** (optional) - Backs up Db2 instance and database: + - Db2 instance resources backup + - Db2 database backup (online or offline) +9. **Application Backup** (optional) - Backs up MAS application resources and persistent volumes: + - Manage namespace resources backup + - Manage persistent volume data backup +10. **Archive Creation** - Compresses backup into tar.gz archives for each component +11. **Upload** (optional) - Uploads archives to S3 or Artifactory +12. **Workspace Cleanup** (optional, default: enabled) - Cleans backup and config workspaces to free up storage + +### Monitoring Progress + +After launching the backup, a URL to the Tekton PipelineRun is displayed: + +``` +View progress: + https://console-openshift-console.apps.cluster.example.com/k8s/ns/mas-inst1-pipelines/tekton.dev~v1beta1~PipelineRun/mas-backup-20240315-120000 +``` + +Use this URL to: + +- Monitor real-time backup progress +- View logs from individual backup tasks +- Troubleshoot any failures +- Verify successful completion + +### Workspace Cleanup + +By default, the backup pipeline automatically cleans the workspace directories after backup completion to free up storage space. This cleanup occurs in the pipeline's `finally` block, ensuring it runs regardless of backup success or failure. + +**To disable workspace cleanup:** + +- **Interactive mode**: Answer "No" when prompted about cleaning workspaces +- **Non-interactive mode**: Use the `--no-clean-backup` flag + +**When to disable cleanup:** + +- Troubleshooting backup issues and need to inspect workspace contents +- Running multiple backups in sequence and want to preserve intermediate files +- Custom post-backup processing that requires access to workspace files + +!!! tip + Workspace cleanup is recommended for production backups to prevent PVC storage exhaustion. Only disable it when you have a specific need to inspect or process the workspace contents. + + +Best Practices +------------------------------------------------------------------------------- + +### Backup Strategy + +1. **Regular Schedule** - Implement automated backups on a regular schedule +2. **Version Naming** - Use descriptive backup versions (e.g., `prod-20240315-pre-upgrade`) +3. **Retention Policy** - Define how long to keep backups based on compliance requirements +4. **Off-site Storage** - Upload backups to S3 or Artifactory for disaster recovery +5. **Test Restores** - Periodically test restore procedures in non-production environments +6. **Document Configuration** - Keep records of custom configurations and dependencies +7. **Application Backups** - Include Manage application and Db2 database in regular backup schedule +8. **Coordinate Backups** - When backing up Manage, always include the Db2 database for consistency +9. **Storage Planning** - Allocate sufficient backup storage when including applications and databases (100Gi+ recommended) + +### Security Considerations + +1. **Credentials** - Never hardcode credentials in scripts; use environment variables or secrets +2. **Access Control** - Restrict access to backup storage and archives +3. **Encryption** - Consider encrypting backup archives for sensitive environments +4. **Audit Trail** - Maintain logs of backup operations and access + +### Automation + +For automated backups, you have several options depending on your infrastructure and requirements: + +#### Option 1: Shell Script with MAS CLI + +Create a simple shell script or CI/CD pipeline using the MAS CLI: + +```bash +#!/bin/bash +# Automated MAS Backup Script + +INSTANCE_ID="inst1" +BACKUP_VERSION="auto-$(date +%Y%m%d-%H%M%S)" +S3_BUCKET="mas-backups-prod" + +# Login to OpenShift +oc login --token=${OCP_TOKEN} --server=${OCP_SERVER} + +# Run backup with S3 upload +docker run --rm \ + -v ~/.kube:/root/.kube:z \ + -v ~:/mnt/home \ + quay.io/ibmmas/cli mas backup \ + --instance-id ${INSTANCE_ID} \ + --backup-version ${BACKUP_VERSION} \ + --backup-storage-size 50Gi \ + --upload-backup \ + --aws-access-key-id ${AWS_ACCESS_KEY_ID} \ + --aws-secret-access-key ${AWS_SECRET_ACCESS_KEY} \ + --s3-bucket-name ${S3_BUCKET} \ + --s3-region us-east-1 \ + --no-confirm + +# Check exit code +if [ $? -eq 0 ]; then + echo "Backup completed successfully: ${BACKUP_VERSION}" +else + echo "Backup failed!" + exit 1 +fi +``` + +#### Option 2: Red Hat Ansible Automation Platform + +For enterprise-grade automation with advanced features, use **Red Hat Ansible Automation Platform (AAP)** to execute the backup playbooks and roles directly. The [MAS DevOps Execution Environment](https://ibm-mas.github.io/ansible-devops/execution-environment/) provides a pre-built container image (`quay.io/ibmmas/ansible-devops-ee`) that includes the `ibm.mas_devops` collection and all required dependencies. + +**Benefits of using AAP:** + +- **Centralized Management** - Single control plane for all automation +- **Role-Based Access Control (RBAC)** - Fine-grained permissions for backup operations +- **Scheduling** - Built-in job scheduling for regular backups +- **Audit Logging** - Complete audit trail of all backup operations +- **Credential Management** - Secure storage and injection of credentials +- **Notifications** - Integration with email, Slack, PagerDuty, and other systems +- **Job Templates** - Reusable backup configurations +- **Workflow Automation** - Chain backup with other operations (e.g., validation, upload) + +**To use AAP for MAS backups:** + +1. **Configure the Execution Environment** - Set up AAP to use the `quay.io/ibmmas/ansible-devops-ee` image (see [Execution Environment setup guide](https://ibm-mas.github.io/ansible-devops/execution-environment/)) +2. **Create a Project** - Point to your playbook repository (or use the sample playbooks as a starting point) +3. **Create Job Templates** - Configure job templates for backup operations using the [`ibm.mas_devops.br_core`](https://ibm-mas.github.io/ansible-devops/playbooks/backup-restore/) playbook +4. **Configure Credentials** - Set up OpenShift credentials and any cloud storage credentials +5. **Schedule Backups** - Set up recurring schedules for automated backups +6. **Configure Notifications** - Set up alerts for backup success/failure + +**Example AAP Job Template Variables:** + +```yaml +mas_instance_id: inst1 +br_action: backup +mas_backup_dir: /backup/mas +backup_version: "{{ ansible_date_time.date }}-{{ ansible_date_time.hour }}{{ ansible_date_time.minute }}" +include_sls: true +mongodb_namespace: mongoce +``` + +For detailed information on setting up and using Ansible Automation Platform with MAS DevOps, see: +- [MAS DevOps Execution Environment](https://ibm-mas.github.io/ansible-devops/execution-environment/) - Complete AAP setup guide +- [Backup and Restore Playbook](https://ibm-mas.github.io/ansible-devops/playbooks/backup-restore/) - Playbook documentation and examples + +!!! tip + AAP is recommended for production environments where you need enterprise features like RBAC, audit logging, and centralized management. For simpler use cases, the MAS CLI with shell scripts may be sufficient. + + +Troubleshooting +------------------------------------------------------------------------------- + +### Common Issues + +**Issue: "No MAS instances were detected on the cluster"** + +- Verify you're connected to the correct OpenShift cluster +- Ensure MAS is installed and the Suite CR exists +- Check that you have permissions to view Suite resources + +**Issue: "OpenShift Pipelines Operator installation failed"** + +- Verify cluster admin permissions +- Check cluster connectivity and operator hub availability +- Review operator installation logs + +**Issue: "Insufficient storage for backup PVC"** + +- Increase `--backup-storage-size` parameter +- Verify storage class has available capacity +- Check cluster storage quotas + +**Issue: "MongoDB backup failed"** + +- Verify MongoDB namespace and instance name are correct +- Ensure MongoDB is running and accessible +- Check MongoDB provider is set to `community` + +**Issue: "SLS backup failed"** + +- Verify SLS namespace is correct +- Ensure SLS is running and accessible +- Consider using `--exclude-sls` if SLS is external + +**Issue: "Upload to S3 failed"** + +- Verify AWS credentials are correct +- Check S3 bucket exists and is accessible +- Verify network connectivity to AWS +- Ensure IAM permissions allow PutObject operations + +**Issue: "Manage application backup failed"** + +- Verify Manage workspace ID is correct +- Ensure ManageWorkspace CR exists in the cluster +- Check that Manage pods are running and healthy +- Verify persistent volumes are properly configured in ManageWorkspace CR +- Ensure sufficient storage space in backup PVC + +**Issue: "Db2 backup failed"** + +- Verify Db2 namespace and instance name are correct +- Ensure Db2 instance is running and accessible +- Check backup type matches Db2 logging configuration (use offline for circular logging) +- Verify sufficient storage space in Db2 backup PVC +- Review Db2 pod logs for database-specific errors + +**Issue: "Manage persistent volume backup is slow"** + +- PV backup duration depends on data volume +- Large JMS server or attachment PVCs can take significant time +- Monitor backup progress in Tekton pipeline logs +- Consider scheduling backups during maintenance windows +- Ensure network bandwidth is sufficient for data transfer + + +Restore Overview +------------------------------------------------------------------------------- + +The MAS restore process uses Tekton pipelines to orchestrate the restoration of MAS instances from backup archives. The restore operation can recover a complete MAS environment or selectively restore components based on your requirements. The restore process provides extensive configuration flexibility, allowing you to modify key settings during restoration such as domain names, SLS/DRO URLs, and storage classes. + +### Restore Components + +The restore process handles the following components: + +- **IBM Operator Catalogs** - Restores catalog source definitions +- **Certificate Manager** - Restores certificate configurations (RedHat only) +- **MongoDB** - Restores MongoDB instance with SLS & MAS databases (Community Edition only) +- **Suite License Service (SLS)** - Restores SLS instance with license server data (optional, can use external SLS) +- **MAS Suite Configuration** - Restores core MAS instance configuration and custom resources +- **Suite-level SLSCfg** - Restores or provides custom Suite-level SLS configuration with optional URL override +- **Suite-level BASCfg/DROCfg** - Restores or provides custom Suite-level DRO/BAS configuration with optional URL override +- **Manage Database** - Optionally restores incluster Db2 database associated with Manage workspace +- **Manage Application** - Optionally restores Manage application namespace resources and persistent volume data +- **Grafana** - Optionally installs Grafana for monitoring (not part of backup) +- **Data Reporter Operator (DRO)** - Optionally installs DRO (not part of backup), when DRO is installed, an auto-generated Suite-level BASCfg CR will be applied automatically. + +### Restore Limitations + +!!! warning + Be aware of the following limitations before performing a restore: + +- **Restoring from S3 or Artifactory Only** - When using the pipeline, the restore process is limited to restoring from S3 or Artifactory. Restoring from a local backup file is not supported yet. +- **MongoDB Community Edition only** - Restore supports only in-cluster MongoDB Community Edition. Restoring to an external or enterprise MongoDB deployment is not supported. +- **Db2 standalone operator only** - The restore process supports only the in-cluster standalone Db2 operator. Other Db2 operator implementations are not included. +- **Db2uInstance not supported, only Db2uCluster** - The restore process does not support Db2uInstance for now. Will be supported in future release. +- **Certificate Manager (RedHat only)** - Certificate Manager restore is supported only for RedHat Certificate Manager. Other implementations are not handled during restore. +- **Same MAS version required** - Restoring a backup to a cluster running a different MAS version may result in incompatibilities. It is strongly recommended to restore to the same MAS version as the backup source. +- **Same MAS Instance ID required** - It is strongly recommended to restore to the same MAS instance ID as the backup source. +- **Manage application only for app restore** - Only the Manage application is supported. Other MAS applications will be supported in future releases. +- **Tekton pipeline dependency** - The restore process requires Tekton pipelines to be available and functional on the target cluster. +- **Target cluster must be pre-provisioned** - The restore process does not provision a new OpenShift cluster. A running, accessible cluster with sufficient resources must already exist. +- **Storage class compatibility** - The target cluster must have compatible storage classes. If storage classes differ from the source cluster, overrides must be explicitly configured. +- **No partial component restore** - Individual components cannot be selectively restored in isolation without running the full pipeline; component selection is configured at pipeline launch time. +- **Manual Certificate Management Restriction:** Certificates and secrets from backups will be restored. However, changing the domain during the restoration process will cause issues with manual certificates/secrets, and manual updates of certificates and secrets are required. +- **Domain changes require DNS updates** - If restoring with a domain change, DNS records and TLS certificates must be updated manually outside of the restore process. +- **Single MAS instance per restore** - Each restore operation targets a single MAS instance. Restoring multiple instances requires separate restore runs. +- **Grafana and DRO are not restored from backup** - Grafana and DRO are optionally installed fresh during restore; their previous configurations are not recovered from the backup archive. However, Suite-level BASCFG CR resource is backed up and can be restored. +- **No support for CP4D** - The restore process does not support restoring CP4D environments. + +!!! tip + We are working on reducing the limitations of the restore process and will be adding new capabilties and support for other MAS applications in future releases. + +### Configuration Flexibility + +The restore process supports several configuration overrides to adapt the restored environment to new infrastructure: + +- **Domain Configuration** - Change the MAS domain in the Suite CR during restore +- **SLS Configuration** - Restore Suite-level SLSCfg from backup or provide custom configuration file, with optional SLS URL override +- **DRO/BAS Configuration** - Restore Suite-level BASCfg from backup or provide custom configuration file, with optional DRO URL override +- **Storage Class Override** - Override storage classes for all components (MongoDB, Manage app, Manage DB) when restoring to clusters with different storage providers +- **SLS Domain Override** - Change the SLS domain used in the License Service CR +- **Backup Download** - Download backup archives from S3 or Artifactory before restore (useful for cross-cluster restores) + +### Backup Archive Management + +The restore process can work with backup archives in multiple ways: + +- **Local Backup** - Restore from backup archives already present in the cluster +- **S3 Download** - Download backup archives from S3-compatible storage before restore +- **Artifactory Download** - Download backup archives from Artifactory (development mode only) +- **Custom Archive Names** - Support for custom backup archive naming conventions +- **Automatic Cleanup** - Optional cleanup of downloaded archives after successful restore + +When downloading from S3 or Artifactory, the `download_backup_archive` role selectively downloads only the archives required for the restore operation. The following archive selection parameters control which archives are downloaded: + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `include_sls_archive` | `false` | Download the SLS backup archive | +| `include_manage_db_archive` | `false` | Download the Manage Db2 database backup archive | +| `include_manage_app_archive` | `false` | Download the Manage application backup archive | + +These parameters are automatically set by the restore pipeline based on the restore configuration (e.g. `--restore-manage-app`, `--restore-manage-db`, `--include-sls`), so you do not need to set them manually when using the `mas restore` command. + +### Ansible DevOps Integration + +The `mas restore` command launches a Tekton pipeline that executes the following Ansible roles from the [IBM MAS DevOps Collection](https://ibm-mas.github.io/ansible-devops/): + +- [`ibm.mas_devops.ibm_catalogs`](https://ibm-mas.github.io/ansible-devops/roles/ibm_catalogs/) - Restores IBM Operator Catalog definitions +- [`ibm.mas_devops.cert_manager`](https://ibm-mas.github.io/ansible-devops/roles/cert_manager/) - Restores Certificate Manager configurations +- [`ibm.mas_devops.mongodb`](https://ibm-mas.github.io/ansible-devops/roles/mongodb/) - Restores MongoDB Community Edition instance and database +- [`ibm.mas_devops.sls`](https://ibm-mas.github.io/ansible-devops/roles/sls/) - Restores Suite License Service data +- [`ibm.mas_devops.suite_restore`](https://ibm-mas.github.io/ansible-devops/roles/suite_restore/) - Restores MAS Core configuration +- [`ibm.mas_devops.db2`](https://ibm-mas.github.io/ansible-devops/roles/db2/) - Restores Db2u instance and database +- [`ibm.mas_devops.suite_app_restore`](https://ibm-mas.github.io/ansible-devops/roles/suite_app_restore/) - Restores supported MAS Application configuration +- [`ibm.mas_devops.grafana`](https://ibm-mas.github.io/ansible-devops/roles/grafana/) - Installs Grafana (optional) +- [`ibm.mas_devops.dro`](https://ibm-mas.github.io/ansible-devops/roles/dro/) - Installs Data Reporter Operator (optional) + + +Restore Modes +------------------------------------------------------------------------------- + +### Interactive Mode + +Interactive mode guides you through the restore process with prompts for all required configuration. This is the recommended approach for manual restores. + +```bash +docker run -ti --rm quay.io/ibmmas/cli mas restore +``` + +The interactive session will: + +1. Prompt for OpenShift cluster connection +2. Request MAS instance ID (must match backup) +3. Request backup version to restore +4. Configure MongoDB storage class override +5. Configure Grafana installation +6. Configure SLS restoration +7. Configure DRO installation +8. Configure MAS domain settings +9. Configure SLS and DRO configuration options +10. Configure Manage application restore +11. Configure Manage Db2 restore +12. Request backup storage size +13. Offer optional download from S3 or Artifactory + +### Non-Interactive Mode + +Non-interactive mode is ideal for automation, scheduled restores, and CI/CD pipelines. All required parameters must be provided via command-line arguments. + +```bash +docker run -ti --rm quay.io/ibmmas/cli mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --no-confirm +``` + + +Restore Process Details +------------------------------------------------------------------------------- + +### Pipeline Execution + +When you run `mas restore`, the following occurs: + +1. **Validation** - Verifies cluster connectivity and prerequisites +2. **Namespace Preparation** - Creates/updates `mas-{instance-id}-pipelines` namespace +3. **OpenShift Pipelines** - Validates or installs OpenShift Pipelines Operator +4. **PVC Creation** - Provisions persistent volume for backup storage +5. **Tekton Pipeline Launch** - Submits PipelineRun with configured parameters +6. **Pre-Restore Check** - Validates cluster readiness +7. **Download** (optional) - Downloads backup archive from S3 or Artifactory +8. **Component Restore** - Executes restore tasks in sequence: + - IBM Catalogs restore + - Certificate Manager restore + - Grafana installation (if enabled) + - MongoDB restore (with optional storage class override) + - SLS restore (if included) + - DRO installation (if enabled) +9. **Suite Restore** - Restores MAS core configuration with optional domain/URL overrides +10. **Manage Application Restore** (if enabled) - Restores Manage application and database +11. **Post-Restore Verification** - Validates restored MAS instance +12. **Workspace Cleanup** (optional, default: enabled) - Cleans backup and config workspaces + +### Monitoring Progress + +After launching the restore, a URL to the Tekton PipelineRun is displayed: + +``` +View progress: + https://console-openshift-console.apps.cluster.example.com/k8s/ns/mas-inst1-pipelines/tekton.dev~v1beta1~PipelineRun/mas-restore-20260117-191701-YYMMDD-HHMM +``` + +Use this URL to: + +- Monitor real-time restore progress +- View logs from individual restore tasks +- Troubleshoot any failures +- Verify successful completion + +### Configuration Flexibility + +The restore process provides several options for handling configurations: + +#### MAS Domain Configuration +- **From Backup** (default) - Uses the domain stored in the Suite backup +- **Override** - Specify `--mas-domain-restore` to change the domain during restore + +#### SLS Configuration +- **From Backup** (default) - Restores SLSCfg from backup with `--include-slscfg-from-backup` +- **Custom File** - Use `--exclude-slscfg-from-backup` and provide `--sls-cfg-file` +- **Change URL** - Use `--sls-url-restore` to modify the SLS URL while keeping other configuration + +#### DRO Configuration +- **From Backup** (default) - Restores BASCfg from backup with `--include-drocfg-from-backup` +- **Custom File** - Use `--exclude-drocfg-from-backup` and provide `--dro-cfg-file` +- **Change URL** - Use `--dro-url-restore` to modify the DRO URL while keeping other configuration + + +Restore Scenarios - Non-Interactive Mode +------------------------------------------------------------------------------- + +### Scenario 1: Basic Restore from Local Backup + +**Environment:** +- Backup archive already present in the cluster PVC +- Standard restore with all defaults + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --no-confirm +``` + +### Scenario 2: Restore with S3 Download + +**Environment:** +- Backup stored in AWS S3 +- Need to download before restore + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --download-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name mas-backups-prod \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Scenario 3: Restore with Domain Change + +**Environment:** +- Restoring to a different cluster with new domain +- Need to update MAS domain + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --mas-domain-restore new-cluster.example.com \ + --no-confirm +``` + +### Scenario 4: Restore with External SLS + +**Environment:** +- Using external SLS instance +- Skip SLS restore but provide custom SLS configuration + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-sls \ + --exclude-slscfg-from-backup \ + --sls-cfg-file /path/to/custom-sls-config.yaml \ + --no-confirm +``` + +### Scenario 5: Restore with SLS URL Override + +**Environment:** +- Restore SLS from backup but change the URL +- SLS moved to different endpoint + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --include-sls \ + --include-slscfg-from-backup \ + --sls-url-restore https://new-sls.example.com \ + --no-confirm +``` + +### Scenario 6: Restore with DRO Installation + +**Environment:** +- Install new DRO instance during restore +- Provide DRO configuration details + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --include-dro \ + --ibm-entitlement-key YOUR_ENTITLEMENT_KEY \ #pragma: allowlist secret + --contact-email admin@example.com \ + --contact-firstname John \ + --contact-lastname Doe \ + --dro-namespace redhat-marketplace \ + --no-confirm +``` + +### Scenario 7: Restore Without Grafana + +**Environment:** +- Skip Grafana installation +- Monitoring not required + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --exclude-grafana \ + --no-confirm +``` + +### Scenario 8: Complete Restore with All Options + +**Environment:** +- Download from S3 +- Change domain and SLS URL +- Install DRO and Grafana +- Custom storage size + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --backup-storage-size 100Gi \ + --mas-domain-restore new-cluster.example.com \ + --include-sls \ + --include-slscfg-from-backup \ + --sls-url-restore https://new-sls.example.com \ + --include-drocfg-from-backup \ + --dro-url-restore https://new-dro.example.com \ + --include-grafana \ + --include-dro \ + --ibm-entitlement-key YOUR_ENTITLEMENT_KEY \ #pragma: allowlist secret + --contact-email admin@example.com \ + --contact-firstname John \ + --contact-lastname Doe \ + --dro-namespace redhat-marketplace \ + --download-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name mas-backups-prod \ + --s3-region us-east-1 \ + --no-confirm +``` + +### Scenario 9: Restore for Troubleshooting (No Cleanup) + +**Environment:** +- Need to inspect workspace contents after restore +- Workspace cleanup disabled + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --no-clean-backup \ + --no-confirm +``` + +!!! note + Use `--no-clean-backup` when you need to inspect the restore workspace contents for troubleshooting. Remember to manually clean up the workspaces later to free up storage. + +### Scenario 10: Emergency Restore (Skip Pre-Check) + +**Environment:** +- Emergency restore scenario +- Skip pre-restore validation for speed + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --skip-pre-check \ + --no-confirm +``` + +!!! warning + Use `--skip-pre-check` only in emergency situations. Pre-restore checks validate cluster readiness and can prevent restore failures. + +### Scenario 11: Restore with MongoDB Storage Class Override + +**Environment:** +- Restoring to a cluster with different storage classes +- Need to override MongoDB storage class + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --override-mongodb-storageclass \ + --mongodb-storageclass-name custom-rwo-storage \ + --no-confirm +``` + +### Scenario 12: Restore with Manage Application + +**Environment:** +- Need to restore Manage application in addition to MAS Suite +- Restore Manage namespace resources and persistent volume data + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --no-confirm +``` + +### Scenario 13: Restore with Manage Application and Database + +**Environment:** +- Restore both Manage application and its incluster Db2 database +- Complete Manage workspace restoration + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --restore-manage-db \ + --no-confirm +``` + +!!! warning + Manage database restore is an offline operation. The Manage application will be unavailable during the restore process. + +### Scenario 14: Restore Manage with Custom Storage Classes + +**Environment:** +- Restoring to a cluster with different storage infrastructure +- Need to override storage classes for both Manage app and Db2 + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --restore-manage-app \ + --restore-manage-db \ + --override-manage-app-storageclass \ + --manage-app-storage-class-rwx custom-rwx-storage \ + --manage-app-storage-class-rwo custom-rwo-storage \ + --override-manage-db-storageclass \ + --manage-db-storage-class-rwx custom-rwx-storage \ + --manage-db-storage-class-rwo custom-rwo-storage \ + --no-confirm +``` + +!!! note + The Manage Db2 storage class override now uses a single ReadWriteMany (`--manage-db-storage-class-rwx`) and ReadWriteOnce (`--manage-db-storage-class-rwo`) storage class, applied across all Db2 persistent volumes based on the access modes. The previous per-volume flags (`--manage-db-meta-storage-class`, `--manage-db-data-storage-class`, `--manage-db-backup-storage-class`, `--manage-db-logs-storage-class`, `--manage-db-temp-storage-class`) have been removed. + +### Scenario 15: Complete Restore with MongoDB Override and Manage + +**Environment:** +- Comprehensive restore with all new features +- Override MongoDB storage class +- Restore Manage application and database +- Download from S3 + +**Restore Command:** +```bash +mas restore \ + --instance-id inst1 \ + --restore-version 20260117-191701 \ + --backup-storage-size 100Gi \ + --override-mongodb-storageclass \ + --mongodb-storageclass-name custom-rwo-storage \ + --restore-manage-app \ + --restore-manage-db \ + --override-manage-db-storageclass \ + --manage-db-storage-class-rwx custom-rwx-storage \ + --manage-db-storage-class-rwo custom-rwo-storage \ + --download-backup \ + --aws-access-key-id AKIAIOSFODNN7EXAMPLE \ #pragma: allowlist secret + --aws-secret-access-key wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ #pragma: allowlist secret + --s3-bucket-name mas-backups-prod \ + --s3-region us-east-1 \ + --no-confirm +``` + + +Restore Best Practices +------------------------------------------------------------------------------- + +### Pre-Restore Checklist + +1. **Verify Backup Integrity** - Ensure backup archives are complete and accessible +2. **Check Cluster Resources** - Verify sufficient CPU, memory, and storage +3. **Review Target Environment** - Confirm cluster version and configuration compatibility +4. **Plan Domain Changes** - Determine if domain or URL changes are needed +5. **Prepare External Services** - Ensure external SLS/DRO are accessible if used +6. **Review Storage Classes** - Identify if MongoDB or Manage storage class overrides are needed +7. **Plan Manage Restore** - Determine if Manage application and database should be restored +8. **Document Configuration** - Record any custom configurations or overrides + +### During Restore + +1. **Monitor Pipeline** - Watch the Tekton PipelineRun for any issues +2. **Check Logs** - Review task logs if any failures occur +3. **Verify Components** - Ensure each component restores successfully +4. **Note Timing** - Track restore duration for future planning + +### Post-Restore Verification + +1. **Validate Suite Status** - Confirm MAS Suite CR is ready +2. **Check Application Access** - Verify MAS applications are accessible +3. **Test Integrations** - Validate connections to databases and external services +4. **Verify MongoDB** - Confirm MongoDB is running with correct storage class if overridden +5. **Validate Manage Application** - If restored, verify Manage application is accessible and functional +6. **Check Manage Database** - If restored, confirm Db2 database is running with correct storage classes +7. **Review Configurations** - Confirm all configurations are correct +8. **Update DNS** - Update DNS records if domain changed +9. **Test Functionality** - Perform smoke tests on critical functions + +### Common Restore Scenarios + +#### Disaster Recovery +- Use latest backup from off-site storage +- May require domain and URL changes +- Verify all external dependencies are available +- Consider MongoDB storage class override if infrastructure changed +- Include Manage application and database restore if needed + +#### Cluster Migration +- Download backup from source cluster storage +- Change domain to match new cluster +- Update SLS and DRO URLs if needed +- Override MongoDB and Manage storage classes for different infrastructure +- Verify network connectivity and routes +- Plan for Manage database downtime during restore + +#### Environment Cloning +- Use production backup for dev/test +- Change domain to avoid conflicts +- Consider using external SLS to share licenses +- May exclude DRO for non-production environments +- Override storage classes to use lower-cost storage in non-production +- Optionally restore Manage application for testing + + +Restore Troubleshooting +------------------------------------------------------------------------------- + +### Common Restore Issues + +**Issue: "Backup archive not found"** + +- Verify backup archive exists in PVC or download location +- Check backup version matches the archive name +- Ensure download credentials are correct if downloading from S3/Artifactory + +**Issue: "Pre-restore check failed"** + +- Review cluster resource availability +- Check OpenShift version compatibility +- Verify required operators are available +- Use `--skip-pre-check` only if necessary + +**Issue: "MongoDB restore failed"** + +- Verify MongoDB namespace and instance name match backup +- Ensure sufficient storage for MongoDB data +- Check MongoDB operator is installed and ready +- If using storage class override, verify the storage class exists and is accessible +- Ensure the specified storage class supports ReadWriteOnce access mode + +**Issue: "SLS restore failed"** + +- Verify SLS namespace is correct +- Check if using `--include-sls` or `--exclude-sls` appropriately +- Ensure SLS configuration file is valid if using custom config + +**Issue: "Suite restore failed with domain mismatch"** + +- Use `--mas-domain-restore` to override domain from backup +- Verify DNS records are updated for new domain +- Check certificate configurations match new domain + +**Issue: "DRO installation failed"** + +- Verify IBM entitlement key is valid +- Check DRO namespace has sufficient permissions +- Ensure contact information is provided correctly + +**Issue: "Download from S3 failed"** + +- Verify AWS credentials are correct +- Check S3 bucket exists and is accessible +- Verify network connectivity to AWS +- Ensure IAM permissions allow GetObject operations + +**Issue: "Configuration file not found"** +**Issue: "Manage application restore failed"** + +- Verify Manage workspace exists in the backup +- Ensure sufficient storage for Manage application persistent volumes +- Check that storage class overrides (if specified) are valid and accessible +- Verify both ReadWriteMany and ReadWriteOnce storage classes are available if using overrides +- Review Manage namespace for any conflicting resources + +**Issue: "Manage Db2 database restore failed"** + +- Verify Db2 instance exists in the backup +- Ensure sufficient storage for all Db2 persistent volumes (meta, data, backup, logs, temp) +- Check that all specified storage classes exist and support required access modes +- Verify Db2 operator is installed and ready +- Review Db2 pod logs for specific error messages +- Note: Db2 restore is an offline operation - ensure no active connections during restore + +**Issue: "Storage class not found during restore"** + +- Verify the specified storage class exists in the target cluster: `oc get storageclass` +- Check storage class supports the required access mode (RWO or RWX) +- If using cluster defaults, ensure default storage classes are configured +- Review storage class provisioner compatibility with the cluster infrastructure + + +- Verify custom config file paths are correct +- Ensure files are accessible from the CLI container +- Check file format is valid YAML + + +Additional Resources +------------------------------------------------------------------------------- + +### MAS CLI Documentation +- [Backup Command Reference](../commands/backup.md) - Complete backup command-line options and usage +- [Restore Command Reference](../commands/restore.md) - Complete restore command-line options and usage + +### Ansible DevOps Collection +- [Backup and Restore Playbook](https://ibm-mas.github.io/ansible-devops/playbooks/backup-restore/) - Detailed Ansible playbook documentation +- [Execution Environment](https://ibm-mas.github.io/ansible-devops/execution-environment/) - Ansible Automation Platform setup guide +- [IBM Catalogs Role](https://ibm-mas.github.io/ansible-devops/roles/ibm_catalogs/) - IBM Operator Catalog backup/restore +- [Certificate Manager Role](https://ibm-mas.github.io/ansible-devops/roles/cert_manager/) - Certificate Manager backup/restore +- [MongoDB Role](https://ibm-mas.github.io/ansible-devops/roles/mongodb/) - MongoDB backup/restore +- [SLS Role](https://ibm-mas.github.io/ansible-devops/roles/sls/) - Suite License Service backup/restore +- [Suite Backup Role](https://ibm-mas.github.io/ansible-devops/roles/suite_backup/) - MAS Core backup +- [Suite Restore Role](https://ibm-mas.github.io/ansible-devops/roles/suite_restore/) - MAS Core restore +- [Suite App Backup Role](https://ibm-mas.github.io/ansible-devops/roles/suite_app_backup/) - MAS application backup (generic) +- [Db2 Role](https://ibm-mas.github.io/ansible-devops/roles/db2/) - Db2 database backup/restore +- [Grafana Role](https://ibm-mas.github.io/ansible-devops/roles/grafana/) - Grafana installation +- [DRO Role](https://ibm-mas.github.io/ansible-devops/roles/dro/) - Data Reporter Operator installation + +### External Documentation +- [MAS Documentation](https://www.ibm.com/docs/en/mas) - Official IBM Maximo Application Suite documentation +- [OpenShift Pipelines](https://docs.openshift.com/container-platform/latest/cicd/pipelines/understanding-openshift-pipelines.html) - Tekton pipeline documentation +- [Ansible DevOps Collection](https://ibm-mas.github.io/ansible-devops/) - Complete Ansible automation documentation +- [Red Hat Ansible Automation Platform](https://www.redhat.com/en/technologies/management/ansible) - Enterprise automation platform \ No newline at end of file diff --git a/docs/img/backup-cmd.png b/docs/img/backup-cmd.png new file mode 100644 index 00000000000..195cb357d8a Binary files /dev/null and b/docs/img/backup-cmd.png differ diff --git a/docs/index.md b/docs/index.md index 6328730bd67..a507894fa30 100644 --- a/docs/index.md +++ b/docs/index.md @@ -61,3 +61,5 @@ Not all functions supported in the container image are available in the standalo | [provision-roks](commands/provision-roks.md) | ✓ | ✕ | | [provision-rosa](commands/provision-rosa.md) | ✓ | ✕ | | [configtool-oidc](commands/configtool-oidc.md) | ✓ | ✕ | +| [backup](commands/backup.md) | ✓ | ✓ | +| [restore](commands/restore.md) | ✓ | ✓ | diff --git a/image/cli/app-root/src/.bashrc b/image/cli/app-root/src/.bashrc index 92330cd017b..ffe46342941 100644 --- a/image/cli/app-root/src/.bashrc +++ b/image/cli/app-root/src/.bashrc @@ -27,6 +27,8 @@ echo " - ${TEXT_BOLD}${COLOR_GREEN}mas update${TEXT_RESET} to apply a new catal echo " - ${TEXT_BOLD}${COLOR_GREEN}mas upgrade${TEXT_RESET} to upgrade an existing MAS install to a new release" echo " - ${TEXT_BOLD}${COLOR_GREEN}mas must-gather${TEXT_RESET} to perform must-gather against the target cluster" echo " - ${TEXT_BOLD}${COLOR_GREEN}mas uninstall${TEXT_RESET} to uninstall a MAS instance" +echo " - ${TEXT_BOLD}${COLOR_GREEN}mas backup${TEXT_RESET} to backup a MAS instance" +echo " - ${TEXT_BOLD}${COLOR_GREEN}mas restore${TEXT_RESET} to restore a MAS instance" # None of these functions are tested/supported on s390x /ppc64le yet if [ $arch != "s390x" ] && [ $arch != "ppc64le" ]; then diff --git a/image/cli/mascli/mas b/image/cli/mascli/mas index d45337bb1cc..a8d60e9aaaa 100755 --- a/image/cli/mascli/mas +++ b/image/cli/mascli/mas @@ -330,6 +330,32 @@ case $1 in debug "$@" ;; + backup) + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" >> $LOGFILE + echo "!! backup !!" >> $LOGFILE + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" >> $LOGFILE + + echo "${TEXT_UNDERLINE}IBM Maximo Application Suite Update Manager (v${VERSION})${TEXT_RESET}" + echo "Powered by ${COLOR_CYAN}${TEXT_UNDERLINE}https://github.com/ibm-mas/ansible-devops/${TEXT_RESET} and ${COLOR_CYAN}${TEXT_UNDERLINE}https://tekton.dev/${TEXT_RESET}" + # Take the first parameter off (it will be "backup") + shift + # Run the new Python-based install + mas-cli backup "$@" + ;; + + restore) + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" >> $LOGFILE + echo "!! restore !!" >> $LOGFILE + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" >> $LOGFILE + + echo "${TEXT_UNDERLINE}IBM Maximo Application Suite Update Manager (v${VERSION})${TEXT_RESET}" + echo "Powered by ${COLOR_CYAN}${TEXT_UNDERLINE}https://github.com/ibm-mas/ansible-devops/${TEXT_RESET} and ${COLOR_CYAN}${TEXT_UNDERLINE}https://tekton.dev/${TEXT_RESET}" + # Take the first parameter off (it will be "restore") + shift + # Run the new Python-based restore + mas-cli restore "$@" + ;; + gitops-bootstrap) echo "${TEXT_UNDERLINE}IBM Maximo Application Suite GitOps Manager (v${VERSION})${TEXT_RESET}" echo "Powered by ${COLOR_CYAN}${TEXT_UNDERLINE}https://github.com/ibm-mas/gitops/${TEXT_RESET}" diff --git a/mkdocs.yml b/mkdocs.yml index 39e408e3b8b..d58462096e8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,6 +16,7 @@ nav: - "Update": guides/update.md - "Upgrade": guides/upgrade.md - "Uninstall": guides/uninstall.md + - "Backup and Restore": guides/backup-restore.md - "Examples": - "EAM Migration": examples/eam-migration.md - "Mirror Db2 Images": examples/mirror-db2.md @@ -31,6 +32,8 @@ nav: - "provision-roks": commands/provision-roks.md - "provision-rosa": commands/provision-rosa.md - "configtool-oidc": commands/configtool-oidc.md + - "backup": commands/backup.md + - "restore": commands/restore.md - "Operator Catalogs": - "Overview": catalogs/index.md - "Mar 05 2026": catalogs/v9-260305-amd64.md diff --git a/python/src/mas-cli b/python/src/mas-cli index d0f6c6f3910..c06729c937a 100644 --- a/python/src/mas-cli +++ b/python/src/mas-cli @@ -20,6 +20,8 @@ from mas.cli.aiservice.upgrade.app import AiServiceUpgradeApp from mas.cli.update.app import UpdateApp from mas.cli.upgrade.app import UpgradeApp from mas.cli.uninstall.app import UninstallApp +from mas.cli.backup.app import BackupApp +from mas.cli.restore.app import RestoreApp from mas.cli.mirror.app import MirrorApp from prompt_toolkit import HTML, print_formatted_text @@ -44,6 +46,7 @@ def usage(): + " - mas-cli install Install IBM Maximo Application Suite\n" # noqa: W503 + " - mas-cli update Apply updates and security fixes\n" # noqa: W503 + " - mas-cli upgrade Upgrade to a new MAS release\n" # noqa: W503 + + " - mas-cli backup Backup a MAS instance\n" # noqa: W503 + " - mas-cli uninstall Remove MAS from the cluster\n" # noqa: W503 + " - mas-cli mirror Mirror container images \n" # noqa: W503 )) @@ -72,6 +75,12 @@ if __name__ == '__main__': elif function == "upgrade": app = UpgradeApp() app.upgrade(argv[2:]) + elif function == "backup": + app = BackupApp() + app.backup(argv[2:]) + elif function == "restore": + app = RestoreApp() + app.restore(argv[2:]) elif function == "mirror": app = MirrorApp() app.mirror(argv[2:]) diff --git a/python/src/mas/cli/backup/__init__.py b/python/src/mas/cli/backup/__init__.py new file mode 100644 index 00000000000..2ca23962a69 --- /dev/null +++ b/python/src/mas/cli/backup/__init__.py @@ -0,0 +1,11 @@ +# ***************************************************************************** +# Copyright (c) 2024 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +from ..cli import BaseApp # noqa: F401 diff --git a/python/src/mas/cli/backup/app.py b/python/src/mas/cli/backup/app.py new file mode 100644 index 00000000000..3b718583cdb --- /dev/null +++ b/python/src/mas/cli/backup/app.py @@ -0,0 +1,517 @@ +#!/usr/bin/env python +# ***************************************************************************** +# Copyright (c) 2024 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import logging +from datetime import datetime +from halo import Halo +from prompt_toolkit import print_formatted_text, HTML +from prompt_toolkit.completion import WordCompleter + +from openshift.dynamic.exceptions import ResourceNotFoundError + +from ..cli import BaseApp +from ..validators import InstanceIDValidator +from .argParser import backupArgParser +from mas.devops.ocp import createNamespace, getConsoleURL +from mas.devops.mas import listMasInstances, getDefaultStorageClasses, getWorkspaceId +from mas.devops.tekton import preparePipelinesNamespace, installOpenShiftPipelines, updateTektonDefinitions, launchBackupPipeline + + +logger = logging.getLogger(__name__) + + +class BackupApp(BaseApp): + + def backup(self, argv): + """ + Backup MAS instance + """ + self.args = backupArgParser.parse_args(args=argv) + self.noConfirm = self.args.no_confirm + self.devMode = self.args.dev_mode + self.interactive_mode = True + + if self.args.skip_pre_check: + self.setParam("skip_pre_check", "true") + + if self.args.mas_instance_id: + # Non-interactive mode + self.interactive_mode = False + logger.debug("MAS Instance ID is set, so we assume already connected to the desired OCP") + requiredParams = ["mas_instance_id"] + optionalParams = [ + "backup_version", + "backup_storage_size", + "clean_backup", + "include_sls", + "mongodb_namespace", + "mongodb_instance_name", + "mongodb_provider", + "sls_namespace", + "cert_manager_provider", + "skip_pre_check", + "dev_mode", + # Dev Mode + "artifactory_username", + "artifactory_token", + # Upload Configuration + "upload_backup", + "aws_access_key_id", + "aws_secret_access_key", + "s3_bucket_name", + "s3_region", + "s3_endpoint_url", + "artifactory_url", + "artifactory_repository", + # Manage App Backup + "manage_workspace_id", + "backup_manage_app", + "backup_manage_db", + "manage_db2_namespace", + "manage_db2_instance_name", + "manage_db2_backup_type", + "manage_db2_backup_vendor" + ] + for key, value in vars(self.args).items(): + # These fields we just pass straight through to the parameters and fail if they are not set + if key in requiredParams: + if value is None: + self.fatalError(f"{key} must be set") + self.setParam(key, value) + + # These fields we just pass straight through to the parameters + elif key in optionalParams: + if value is not None: + self.setParam(key, value) + + # Arguments that we don't need to do anything with + elif key in ["no_confirm", "help", "upload_destination"]: + pass + + # Fail if there's any arguments we don't know how to handle + else: + print(f"Unknown option: {key} {value}") + self.fatalError(f"Unknown option: {key} {value}") + else: + # Interactive mode + self.interactive_mode = True + self.printH1("Set Target OpenShift Cluster") + # Connect to the target cluster + self.connect() + + if self.dynamicClient is None: + self.fatalError("The Kubernetes dynamic Client is not available. See log file for details") + + # Perform a check whether the cluster is set up for airgap install + self.isAirgap() + + # Review MAS instances + isMasInstalled = self.reviewMASInstance() + if not isMasInstalled: + self.fatalError("No MAS instances were detected on the cluster => nothing to backup! See log file for details") + + # If instance ID not provided, prompt for it + if self.interactive_mode: + + if self.args.mas_instance_id is None: + self.promptForInstanceId() + + # Prompt for backup storage size if not provided + if self.args.backup_storage_size is None: + self.promptForBackupStorageSize() + + # Prompt for backup version if not provided + if self.args.backup_version is None: + self.promptForBackupVersion() + + # Prompt for SLS configuration + self.promptForSLSConfiguration() + + # Prompt for MongoDB configuration + self.promptForMongoDBConfiguration() + + # Prompt for Manage app backup + self.promptForManageAppBackup() + + self.promptForUploadConfiguration() + # Prompt for clean backup option if not provided + self.promptForCleanBackup() + + # Set default values for optional parameters if not provided + self.setDefaultParams() + + print() + + self.printH1("Review Settings") + self.printDescription([ + "Connected to:", + f" - {getConsoleURL(self.dynamicClient)}" + ]) + + self.printH2("MAS Instance") + self.printSummary("Instance ID", self.getParam("mas_instance_id")) + + self.printH2("Backup Configuration") + self.printSummary("Backup Directory", "/workspace/backups (hardcoded)") + self.printSummary("Config Directory", "/workspace/backups/configs (hardcoded)") + self.printSummary("Backup Storage Size", self.getParam("backup_storage_size")) + self.printSummary("Backup Version", self.getParam("backup_version")) + self.printSummary("Clean Workspaces After Completion", self.getParam("clean_backup") if self.getParam("clean_backup") else "true") + + self.printH2("Components") + self.printSummary("Include SLS", self.getParam("include_sls") if self.getParam("include_sls") else "true") + self.printSummary("MongoDB Namespace", self.getParam("mongodb_namespace") if self.getParam("mongodb_namespace") else "mongoce") + self.printSummary("SLS Namespace", self.getParam("sls_namespace") if self.getParam("sls_namespace") else "ibm-sls") + + if self.getParam("backup_manage_app") == "true": + self.printH2("Manage Application Backup") + self.printSummary("Backup Manage App", "Yes") + self.printSummary("Workspace ID", self.getParam("manage_workspace_id")) + self.printSummary("Backup Manage incluster Db2 Database", "Yes" if self.getParam("backup_manage_db") == "true" else "No") + if self.getParam("backup_manage_db") == "true": + self.printSummary("Db2 Namespace", self.getParam("manage_db2_namespace")) + self.printSummary("Db2 Instance Name", self.getParam("manage_db2_instance_name")) + self.printSummary("Db2 Backup Type", self.getParam("manage_db2_backup_type")) + + continueWithBackup = True + if not self.noConfirm: + print() + self.printDescription([ + "Please carefully review your choices above, correcting mistakes now is much easier than after the backup has begun" + ]) + continueWithBackup = self.yesOrNo("Proceed with these settings") + + # Prepare the namespace and launch the backup pipeline + if self.noConfirm or continueWithBackup: + self.createTektonFileWithDigest() + + self.printH1("Launch Backup") + instanceId = self.getParam("mas_instance_id") + pipelinesNamespace = f"mas-{instanceId}-pipelines" + + # Determine storage class and access mode for pipeline PVCs + defaultStorageClasses = getDefaultStorageClasses(self.dynamicClient) + if self.isSNO() or defaultStorageClasses.rwx == "none": + self.pipelineStorageClass = defaultStorageClasses.rwo + self.pipelineStorageAccessMode = "ReadWriteOnce" + else: + self.pipelineStorageClass = defaultStorageClasses.rwx + self.pipelineStorageAccessMode = "ReadWriteMany" + + with Halo(text='Validating OpenShift Pipelines installation', spinner=self.spinner) as h: + if installOpenShiftPipelines(self.dynamicClient): + h.stop_and_persist(symbol=self.successIcon, text="OpenShift Pipelines Operator is installed and ready to use") + else: + h.stop_and_persist(symbol=self.failureIcon, text="OpenShift Pipelines Operator installation failed") + self.fatalError("Installation failed") + + with Halo(text=f'Preparing namespace ({pipelinesNamespace})', spinner=self.spinner) as h: + createNamespace(self.dynamicClient, pipelinesNamespace) + backupStorageSize = self.getParam("backup_storage_size") if self.getParam("backup_storage_size") else "20Gi" + preparePipelinesNamespace( + dynClient=self.dynamicClient, + instanceId=instanceId, + storageClass=self.pipelineStorageClass, + accessMode=self.pipelineStorageAccessMode, + createConfigPVC=False, + createBackupPVC=True, + backupStorageSize=backupStorageSize + ) + h.stop_and_persist(symbol=self.successIcon, text=f"Namespace is ready ({pipelinesNamespace})") + + with Halo(text=f'Installing latest Tekton definitions (v{self.version})', spinner=self.spinner) as h: + updateTektonDefinitions(pipelinesNamespace, self.tektonDefsPath) + h.stop_and_persist(symbol=self.successIcon, text=f"Latest Tekton definitions are installed (v{self.version})") + + with Halo(text="Submitting PipelineRun for MAS backup", spinner=self.spinner) as h: + pipelineURL = launchBackupPipeline(dynClient=self.dynamicClient, params=self.params) + if pipelineURL is not None: + h.stop_and_persist(symbol=self.successIcon, text="PipelineRun for MAS backup submitted") + print_formatted_text(HTML(f"\nView progress:\n {pipelineURL}\n")) + else: + h.stop_and_persist(symbol=self.failureIcon, text="Failed to submit PipelineRun for MAS backup, see log file for details") + print() + + def reviewMASInstance(self) -> bool: + self.printH1("Review MAS Instances") + try: + instances = listMasInstances(self.dynamicClient) + self.printDescription(["The following MAS instances are installed on the target cluster:"]) + for instance in instances: + self.printDescription([f"- {instance['metadata']['name']} v{instance['status']['versions']['reconciled']}"]) + return True + except ResourceNotFoundError: + self.printDescription(["No MAS instances were detected on the cluster (Suite.core.mas.ibm.com/v1 API is not available)"]) + return False + + def promptForInstanceId(self) -> None: + self.printH1("Select MAS Instance") + try: + instances = listMasInstances(self.dynamicClient) + if len(instances) == 0: + self.fatalError("No MAS instances found on the cluster") + elif len(instances) == 1: + instanceId = instances[0]['metadata']['name'] + self.setParam("mas_instance_id", instanceId) + self.printDescription([f"Using MAS instance: {instanceId}"]) + else: + instanceOptions = [] + for instance in instances: + self.printDescription([f"- {instance['metadata']['name']} v{instance['status']['versions']['reconciled']}"]) + instanceOptions.append(instance['metadata']['name']) + + instanceCompleter = WordCompleter(instanceOptions) + print() + instanceId = self.promptForString("MAS instance ID", completer=instanceCompleter, validator=InstanceIDValidator()) + self.setParam("mas_instance_id", instanceId) + + except ResourceNotFoundError: + self.fatalError("Unable to list MAS instances") + + def promptForBackupStorageSize(self) -> None: + self.printH1("Backup Storage Configuration") + storageSize = self.promptForString("Enter backup PVC storage size", default="20Gi") + self.setParam("backup_storage_size", storageSize) + + def promptForBackupVersion(self) -> None: + self.printH1("Backup Version Configuration") + self.printDescription([ + "Please Read:", + "When you use custom backup version, You are responsible to ensure you choose the right one", + "and any mistake may result in loss of prior backups with same versions because they will be overwritten.", + ]) + useAutoGenerated = self.yesOrNo("Use autogenerated backup_version based on timestamp") + + if useAutoGenerated: + # Auto-generate timestamp + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + self.setParam("backup_version", timestamp) + self.printDescription([f"Using autogenerated backup version: {timestamp}"]) + else: + # Prompt user to enter custom backup version + backupVersion = self.promptForString("Set the backup version to use for this backup") + self.setParam("backup_version", backupVersion) + + def promptForCleanBackup(self) -> None: + self.printH1("Backup Cleanup Configuration") + self.printDescription([ + "After the backup completes, the backup and config workspaces can be cleaned to free up space.", + "This is recommended unless you need to inspect the workspace contents for troubleshooting." + ]) + cleanBackup = self.yesOrNo("Clean backup and config workspaces after completion") + + if cleanBackup: + self.setParam("clean_backup", "true") + else: + self.setParam("clean_backup", "false") + + def promptForSLSConfiguration(self) -> None: + """Prompt user for SLS (Suite License Service) configuration""" + self.printH1("SLS Configuration") + self.printDescription([ + "Suite License Service (SLS) can be included in the backup.", + "If included, you will need to specify the SLS namespace." + ]) + + includeSLS = self.yesOrNo("Include SLS in backup") + + if includeSLS: + self.setParam("include_sls", "true") + + # Prompt for SLS namespace + slsNamespace = self.promptForString("SLS Namespace", default="ibm-sls") + self.setParam("sls_namespace", slsNamespace) + else: + self.setParam("include_sls", "false") + + def promptForMongoDBConfiguration(self) -> None: + """Prompt user for MongoDB configuration""" + self.printH1("MongoDB Configuration") + self.printDescription([ + "Configure MongoDB settings for the backup.", + "These settings specify where MongoDB is deployed and how to access it." + ]) + + # Prompt for MongoDB namespace + mongoNamespace = self.promptForString("MongoDB Namespace", default="mongoce") + self.setParam("mongodb_namespace", mongoNamespace) + + # Prompt for MongoDB instance name + mongoInstanceName = self.promptForString("MongoDB Instance Name", default="mas-mongo-ce") + self.setParam("mongodb_instance_name", mongoInstanceName) + + def setDefaultParams(self) -> None: + """Set default values for optional parameters if not already set""" + if not self.getParam("mongodb_namespace"): + self.setParam("mongodb_namespace", "mongoce") + if not self.getParam("mongodb_instance_name"): + self.setParam("mongodb_instance_name", "mas-mongo-ce") + if not self.getParam("mongodb_provider"): + self.setParam("mongodb_provider", "community") + if not self.getParam("sls_namespace"): + self.setParam("sls_namespace", "ibm-sls") + if not self.getParam("cert_manager_provider"): + self.setParam("cert_manager_provider", "redhat") + if not self.getParam("include_sls"): + self.setParam("include_sls", "true") + if not self.getParam("backup_storage_size"): + self.setParam("backup_storage_size", "20Gi") + if not self.getParam("backup_version"): + # Auto-generate timestamp + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + self.setParam("backup_version", timestamp) + if not self.getParam("clean_backup"): + self.setParam("clean_backup", "true") + + def promptForUploadConfiguration(self) -> None: + """Prompt user for backup upload configuration""" + self.printH1("Backup Upload Configuration") + + # Ask if user wants to upload the backup + uploadBackup = self.yesOrNo("Do you want to upload the backup archive after completion") + + if uploadBackup: + self.setParam("upload_backup", "true") + + self.printDescription([ + "Development mode is enabled. Choose upload destination:", + " 1. S3", + " 2. Artifactory", + ]) + uploadDestination = self.promptForListSelect( + "Select upload destination", + ["S3", "Artifactory"], + "upload_destination", + default=1 + ) + + if uploadDestination == "S3": + # Prompt for S3 credentials + self.printH2("S3 Configuration") + awsAccessKeyId = self.promptForString("S3 Access Key ID") + self.setParam("aws_access_key_id", awsAccessKeyId) + + awsSecretAccessKey = self.promptForString("S3 Secret Access Key", isPassword=True) + self.setParam("aws_secret_access_key", awsSecretAccessKey) + + s3BucketName = self.promptForString("S3 Bucket Name") + self.setParam("s3_bucket_name", s3BucketName) + + s3Region = self.promptForString("S3 Region", default="us-east-1") + self.setParam("s3_region", s3Region) + + s3EndpointUrl = self.promptForString("S3 Endpoint URL(can be empty for AWS)", default="") + self.setParam("s3_endpoint_url", s3EndpointUrl) + else: + # Prompt for Artifactory credentials + self.printH2("Artifactory Configuration") + + # Check if artifactory credentials are already set from dev mode + if not self.getParam("artifactory_username"): + artifactoryUsername = self.promptForString("Artifactory Username") + self.setParam("artifactory_username", artifactoryUsername) + + if not self.getParam("artifactory_token"): + artifactoryToken = self.promptForString("Artifactory Token", isPassword=True) + self.setParam("artifactory_token", artifactoryToken) + + artifactoryUrl = self.promptForString("Artifactory URL") + self.setParam("artifactory_url", artifactoryUrl) + + artifactoryRepository = self.promptForString("Artifactory Repository") + self.setParam("artifactory_repository", artifactoryRepository) + else: + self.setParam("upload_backup", "false") + + def promptForManageAppBackup(self) -> None: + """Prompt user for Manage application backup configuration""" + self.printH1("Manage Application Backup") + self.printDescription([ + "In addition to backing up the MAS Suite, you can also backup the Manage application.", + "This includes the Manage namespace resources and persistent volume data." + ]) + + backupManageApp = self.yesOrNo("Do you want to backup the Manage application") + + if backupManageApp: + self.setParam("backup_manage_app", "true") + + # Get workspace ID - try to auto-detect first + try: + instanceId = self.getParam("mas_instance_id") + workspaceId = getWorkspaceId(self.dynamicClient, instanceId) + if workspaceId: + self.printDescription([f"Detected Manage workspace: {workspaceId}"]) + useDetected = self.yesOrNo("Use this workspace") + if useDetected: + self.setParam("manage_workspace_id", workspaceId) + else: + workspaceId = self.promptForString("Enter Manage workspace ID") + self.setParam("manage_workspace_id", workspaceId) + else: + workspaceId = self.promptForString("Enter Manage workspace ID") + self.setParam("manage_workspace_id", workspaceId) + except Exception: + workspaceId = self.promptForString("Enter Manage workspace ID") + self.setParam("manage_workspace_id", workspaceId) + + # Ask about DB2 backup + self.printH2("Manage Database Backup") + self.printDescription([ + "The Manage application uses a Db2 database that should also be backed up.", + "This will backup the incluster Db2 database associated with the Manage workspace." + ]) + backupDb2 = self.yesOrNo("Do you want to backup the Manage database (Db2)") + + if backupDb2: + self.setParam("backup_manage_db", "true") + self.promptForDb2BackupConfiguration("manage") + else: + self.setParam("backup_manage_db", "false") + else: + self.setParam("backup_manage_app", "false") + self.setParam("backup_manage_db", "false") + + def promptForDb2BackupConfiguration(self, appId: str) -> None: + """Prompt user for Db2 backup configuration - reusable for any app that uses Db2 + + Args: + appId: The application ID (e.g., 'manage', 'facilities') used to prefix parameter names + """ + self.printH2("Db2 Configuration") + + # DB2 namespace + db2Namespace = self.promptForString("Enter Db2 namespace", default="db2u") + self.setParam(f"{appId}_db2_namespace", db2Namespace) + + # DB2 instance name + instanceId = self.getParam("mas_instance_id") + workspaceID = self.getParam(f"{appId}_workspace_id") + db2InstanceName = self.promptForString("Enter Db2 instance name", default=f"mas-{instanceId}-{workspaceID}-{appId}") + self.setParam(f"{appId}_db2_instance_name", db2InstanceName) + + # Backup type + self.printDescription([ + "Db2 backup can be performed online (database remains available) or offline (database unavailable during backup).", + "Note: If your Db2 instance uses circular logging (default), you must use offline backup.", + "Backup Types:", + " 1. offline", + " 2. online", + ]) + self.promptForListSelect( + message="Select backup type", + options=["offline", "online"], + param=f"{appId}_db2_backup_type", + default=1 + ) + + # Always set to disk for pipeline as s3 upload is handled for the whole pipeline + self.setParam(f"{appId}_db2_backup_vendor", "disk") diff --git a/python/src/mas/cli/backup/argParser.py b/python/src/mas/cli/backup/argParser.py new file mode 100644 index 00000000000..f1400608603 --- /dev/null +++ b/python/src/mas/cli/backup/argParser.py @@ -0,0 +1,260 @@ +# ***************************************************************************** +# Copyright (c) 2024 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import argparse + +from .. import __version__ as packageVersion +from ..cli import getHelpFormatter + +backupArgParser = argparse.ArgumentParser( + prog='mas backup', + description="\n".join([ + f"IBM Maximo Application Suite Admin CLI v{packageVersion}", + "Backup a MAS instance by configuring and launching the MAS Backup Tekton Pipeline.\n", + "Interactive Mode:", + "Omitting the --instance-id option will trigger an interactive prompt" + ]), + epilog="Refer to the online documentation for more information: https://ibm-mas.github.io/cli/", + formatter_class=getHelpFormatter(), + add_help=False +) + +masArgGroup = backupArgParser.add_argument_group( + 'MAS Instance', + 'Specify the MAS instance to backup.' +) +masArgGroup.add_argument( + '-i', '--instance-id', + dest='mas_instance_id', + required=False, + help="MAS Instance ID to backup" +) + +backupArgGroup = backupArgParser.add_argument_group( + 'Backup Configuration', + 'Configure backup version and storage size.' +) +backupArgGroup.add_argument( + '--backup-version', + required=False, + help="Version/timestamp for the backup (auto-generated if not provided)" +) +backupArgGroup.add_argument( + '--backup-storage-size', + required=False, + help="Size of the backup PVC storage (default: 20Gi)" +) +backupArgGroup.add_argument( + '--clean-backup', + dest='clean_backup', + required=False, + action="store_const", + const="true", + default="true", + help="Clean backup and config workspaces after completion (default: true)" +) +backupArgGroup.add_argument( + '--no-clean-backup', + dest='clean_backup', + required=False, + action="store_const", + const="false", + help="Do not clean backup and config workspaces after completion" +) + +uploadArgGroup = backupArgParser.add_argument_group( + 'Upload Configuration', + 'Configure backup archive upload to S3 or Artifactory.' +) +uploadArgGroup.add_argument( + '--upload-backup', + required=False, + action='store_true', + default=False, + help="Upload the backup archive after completion" +) +uploadArgGroup.add_argument( + '--aws-access-key-id', + required=False, + help="AWS Access Key ID for S3 upload" +) +uploadArgGroup.add_argument( + '--aws-secret-access-key', + required=False, + help="AWS Secret Access Key for S3 upload" +) +uploadArgGroup.add_argument( + '--s3-bucket-name', + required=False, + help="S3 bucket name for backup upload" +) +uploadArgGroup.add_argument( + '--s3-region', + required=False, + help="Region for S3 bucket" +) +uploadArgGroup.add_argument( + '--s3-endpoint-url', + required=False, + help="Endpoint url for S3 bucket" +) +uploadArgGroup.add_argument( + '--artifactory-url', + required=False, + help="Artifactory URL for backup upload" +) +uploadArgGroup.add_argument( + '--artifactory-repository', + required=False, + help="Artifactory repository for backup upload" +) + +manageAppArgGroup = backupArgParser.add_argument_group( + 'Manage Application Backup', + 'Configure backup of the Manage application and its database.' +) +manageAppArgGroup.add_argument( + '--backup-manage-app', + dest='backup_manage_app', + required=False, + action="store_const", + const="true", + help="Backup the Manage application" +) +manageAppArgGroup.add_argument( + '--manage-workspace-id', + dest='manage_workspace_id', + required=False, + help="Manage workspace ID" +) +manageAppArgGroup.add_argument( + '--backup-manage-db', + dest='backup_manage_db', + required=False, + action="store_const", + const="true", + help="Backup the Manage application database (Db2)" +) +manageAppArgGroup.add_argument( + '--manage-db2-namespace', + dest='manage_db2_namespace', + required=False, + help="Manage Db2 namespace (default: db2u)" +) +manageAppArgGroup.add_argument( + '--manage-db2-instance-name', + dest='manage_db2_instance_name', + required=False, + help="Manage Db2 instance name" +) +manageAppArgGroup.add_argument( + '--manage-db2-backup-type', + dest='manage_db2_backup_type', + required=False, + choices=["offline", "online"], + help="Manage Db2 backup type: offline (database unavailable) or online (database remains available)" +) + +componentsArgGroup = backupArgParser.add_argument_group( + 'Components', + 'Configure which components to include in the backup.' +) +componentsArgGroup.add_argument( + '--include-sls', + required=False, + action="store_const", + const="true", + default="true", + help="Include SLS in backup (default: true)" +) +componentsArgGroup.add_argument( + '--exclude-sls', + dest='include_sls', + required=False, + action="store_const", + const="false", + help="Exclude SLS from backup (use if SLS is external)" +) + +depsArgGroup = backupArgParser.add_argument_group( + 'Dependencies Configuration', + 'Configure MongoDB, SLS, and Certificate Manager settings.' +) +depsArgGroup.add_argument( + '--mongodb-namespace', + required=False, + help="MongoDB namespace (default: mongoce)" +) +depsArgGroup.add_argument( + '--mongodb-instance-name', + required=False, + help="MongoDB instance name (default: mas-mongo-ce)" +) +depsArgGroup.add_argument( + '--mongodb-provider', + required=False, + choices=["community"], + help="MongoDB provider (only community is supported for backup)" +) +depsArgGroup.add_argument( + '--sls-namespace', + required=False, + help="SLS namespace (default: ibm-sls)" +) +depsArgGroup.add_argument( + '--cert-manager-provider', + required=False, + choices=["redhat", "ibm"], + help="Certificate manager provider (default: redhat)" +) + +# More Options +# ----------------------------------------------------------------------------- +otherArgGroup = backupArgParser.add_argument_group( + 'More', + 'Additional options including development mode, Artifactory credentials, and confirmation prompts.' +) +otherArgGroup.add_argument( + "--artifactory-username", + required=False, + help="Username for access to development builds on Artifactory" +) +otherArgGroup.add_argument( + "--artifactory-token", + required=False, + help="API Token for access to development builds on Artifactory" +) +otherArgGroup.add_argument( + "--dev-mode", + required=False, + action="store_true", + default=False, + help="Configure backup for development mode" +) +otherArgGroup.add_argument( + '--no-confirm', + required=False, + action='store_true', + default=False, + help="Launch the backup without prompting for confirmation" +) +otherArgGroup.add_argument( + '--skip-pre-check', + required=False, + action='store_true', + default=False, + help="Skips the 'pre-backup-check' task in the backup pipeline" +) +otherArgGroup.add_argument( + '-h', "--help", + action='help', + default=False, + help="Show this help message and exit" +) diff --git a/python/src/mas/cli/restore/__init__.py b/python/src/mas/cli/restore/__init__.py new file mode 100644 index 00000000000..85df29608e9 --- /dev/null +++ b/python/src/mas/cli/restore/__init__.py @@ -0,0 +1,11 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +from ..cli import BaseApp # noqa: F401 diff --git a/python/src/mas/cli/restore/app.py b/python/src/mas/cli/restore/app.py new file mode 100644 index 00000000000..fc2e6929559 --- /dev/null +++ b/python/src/mas/cli/restore/app.py @@ -0,0 +1,651 @@ +#!/usr/bin/env python +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import logging +from os import path +from base64 import b64encode +from glob import glob +from halo import Halo +from prompt_toolkit import prompt, print_formatted_text, HTML + +from ..cli import BaseApp +from ..validators import InstanceIDFormatValidator, FileExistsValidator, StorageClassValidator +from .argParser import restoreArgParser +from mas.devops.ocp import createNamespace, getConsoleURL, getStorageClasses +from mas.devops.mas import getDefaultStorageClasses +from mas.devops.tekton import preparePipelinesNamespace, installOpenShiftPipelines, updateTektonDefinitions, launchRestorePipeline, prepareRestoreSecrets + +logger = logging.getLogger(__name__) + + +class RestoreApp(BaseApp): + + def restore(self, argv): + """ + Restore MAS instance + """ + self.args = restoreArgParser.parse_args(args=argv) + self.noConfirm = self.args.no_confirm + self.devMode = self.args.dev_mode + self.interactive_mode = True + + if self.args.skip_pre_check: + self.setParam("skip_pre_check", "true") + + if self.args.mas_instance_id: + # Non-interactive mode + self.interactive_mode = False + logger.debug("MAS Instance ID is set, so we assume already connected to the desired OCP") + requiredParams = ["mas_instance_id", "restore_version"] + optionalParams = [ + "mas_domain_on_restore", + "include_slscfg_from_backup", + "include_drocfg_from_backup", + "sls_url_on_restore", + "sls_cfg_file", + "dro_url_on_restore", + "dro_cfg_file", + "backup_storage_size", + "clean_backup", + "include_sls", + "include_grafana", + "include_dro", + "skip_pre_check", + "dev_mode", + # SLS + "sls_domain", + # DRO Configuration + "dro_namespace", + "dro_contact_email", + "dro_contact_firstname", + "dro_contact_lastname", + "dro_contact_company_name", + "ibm_entitlement_key", + # Dev Mode + "artifactory_username", + "artifactory_token", + # Download Configuration + "download_backup", + "backup_archive_name", + "aws_access_key_id", + "aws_secret_access_key", + "s3_bucket_name", + "s3_region", + "s3_endpoint_url", + "artifactory_url", + "artifactory_repository", + # Manage App Restore + "restore_manage_app", + "restore_manage_db", + "manage_app_override_storageclass", + "manage_app_storage_class_rwx", + "manage_app_storage_class_rwo", + "manage_db_override_storageclass", + "manage_db_storage_class_rwx", + "manage_db_storage_class_rwo", + # MongoDB Storage Class Override + "override_mongodb_storageclass", + "mongodb_storageclass_name" + ] + for key, value in vars(self.args).items(): + # These fields we just pass straight through to the parameters and fail if they are not set + if key in requiredParams: + if value is None: + self.fatalError(f"{key} must be set") + self.setParam(key, value) + + # These fields we just pass straight through to the parameters + elif key in optionalParams: + if value is not None: + self.setParam(key, value) + + # Arguments that we don't need to do anything with + elif key in ["no_confirm", "help", "download_destination"]: + pass + + # Fail if there's any arguments we don't know how to handle + else: + print(f"Unknown option: {key} {value}") + self.fatalError(f"Unknown option: {key} {value}") + else: + # Interactive mode + self.interactive_mode = True + self.printH1("Set Target OpenShift Cluster") + # Connect to the target cluster + self.connect() + + if self.dynamicClient is None: + self.fatalError("The Kubernetes dynamic Client is not available. See log file for details") + + # Perform a check whether the cluster is set up for airgap install + self.isAirgap() + + # If instance ID not provided, prompt for it + if self.interactive_mode: + + if self.args.mas_instance_id is None: + self.promptForInstanceId() + + # Prompt for backup version if not provided + if self.args.restore_version is None: + self.promptForBackupVersion() + + # Prompt for backup class override + self.configStorageClasses() + + # Prompt for Grafana install + self.promptForIncludeGrafana() + + # Prompt for SLS install + self.promptForIncludeSLS() + + # Prompt for DRO install + self.promptForIncludeDRO() + + if self.args.mas_domain_on_restore is None: + self.promptForMASConfiguration() + + self.promptForSLSConfiguration() + + self.promptForDROConfiguration() + + # Prompt for Manage app restore + self.promptForManageAppRestore() + + # Prompt for backup storage size if not provided + if self.args.backup_storage_size is None: + self.promptForBackupStorageSize() + + self.promptForDownloadConfiguration() + + # Set default values for optional parameters if not provided + self.setDefaultParams() + self.setNonInteractiveDROParams() + + print() + + self.printH1("Review Settings") + self.printDescription([ + "Connected to:", + f" - {getConsoleURL(self.dynamicClient)}" + ]) + + self.printH2("MAS Instance Configuration") + self.printSummary("Instance ID", self.getParam("mas_instance_id")) + if self.getParam("mas_domain_on_restore") is not None and self.getParam("mas_domain_on_restore") != "": + self.printSummary("Suite Domain", self.getParam("mas_domain_on_restore")) + if self.getParam("sls_url_on_restore") is not None and self.getParam("sls_url_on_restore") != "": + self.printSummary("SLS URL", self.getParam("sls_url_on_restore")) + if self.getParam("sls_cfg_file") is not None and self.getParam("sls_cfg_file") != "": + self.printSummary("Custom SLS Config File", self.getParam("sls_cfg_file")) + if self.getParam("dro_url_on_restore") is not None and self.getParam("dro_url_on_restore") != "": + self.printSummary("DRO URL", self.getParam("dro_url_on_restore")) + if self.getParam("dro_cfg_file") is not None and self.getParam("dro_cfg_file") != "": + self.printSummary("Custom DRO Config File", self.getParam("dro_cfg_file")) + + self.printH2("Restore Configuration") + self.printSummary("Backup Directory", "/workspace/backups (hardcoded)") + self.printSummary("Config Directory", "/workspace/backups/configs (hardcoded)") + self.printSummary("Backup Storage Size", self.getParam("backup_storage_size")) + self.printSummary("Backup Version to restore", self.getParam("restore_version")) + if self.getParam("backup_archive_name") is not None and self.getParam("backup_archive_name") != "": + self.printSummary("Backup custom archive name", self.getParam("backup_archive_name")) + + if "storage_class_rwx" in self.params and self.params["storage_class_rwx"] != "": + self.printH2("Storage Class Configuration") + self.printSummary("Storage Class for RWO", self.getParam("storage_class_rwo")) + self.printSummary("Storage Class for RWX", self.getParam("storage_class_rwx")) + + self.printH2("Components") + self.printSummary("Include Grafana", self.getParam("include_grafana") if self.getParam("include_grafana") else "true") + self.printSummary("Include SLS", self.getParam("include_sls") if self.getParam("include_sls") else "true") + self.printSummary("Include DRO", self.getParam("include_dro") if self.getParam("include_dro") else "true") + + if self.getParam("restore_manage_app") == "true": + self.printH2("Manage Application Restore") + self.printSummary("Restore Manage App", "Yes") + self.printSummary("Restore Manage incluster Db2 Database", "Yes" if self.getParam("restore_manage_db") == "true" else "No") + + if self.getParam("sls_domain") is not None and self.getParam("sls_domain") != "": + self.printH2("SLS Configuration") + self.printSummary("SLS Domain", self.getParam("dro_namespace")) + + if self.getParam("include_dro") is not None and self.getParam("include_dro") == "true": + self.printH2("DRO Configuration") + self.printSummary("DRO Namespace", self.getParam("dro_namespace")) + self.printSummary("Contact Email", self.getParam("dro_contact_email")) + self.printSummary("Contact First Name", self.getParam("dro_contact_firstname")) + self.printSummary("Contact Last Name", self.getParam("dro_contact_lastname")) + + continueWithRestore = True + if not self.noConfirm: + print() + self.printDescription([ + "Please carefully review your choices above, correcting mistakes now is much easier than after the restore has begun" + ]) + continueWithRestore = self.yesOrNo("Proceed with these settings") + + # Prepare the namespace and launch the restore pipeline + if self.noConfirm or continueWithRestore: + self.createTektonFileWithDigest() + + # Create secrets for config files if provided + self.createConfigSecrets() + + self.printH1("Launch Restore") + instanceId = self.getParam("mas_instance_id") + pipelinesNamespace = f"mas-{instanceId}-pipelines" + + # Determine storage class and access mode for pipeline PVCs + defaultStorageClasses = getDefaultStorageClasses(self.dynamicClient) + if self.isSNO() or defaultStorageClasses.rwx == "none": + self.pipelineStorageClass = defaultStorageClasses.rwo + self.pipelineStorageAccessMode = "ReadWriteOnce" + else: + self.pipelineStorageClass = defaultStorageClasses.rwx + self.pipelineStorageAccessMode = "ReadWriteMany" + + with Halo(text='Validating OpenShift Pipelines installation', spinner=self.spinner) as h: + if installOpenShiftPipelines(self.dynamicClient): + h.stop_and_persist(symbol=self.successIcon, text="OpenShift Pipelines Operator is installed and ready to use") + else: + h.stop_and_persist(symbol=self.failureIcon, text="OpenShift Pipelines Operator installation failed") + self.fatalError("Installation failed") + + with Halo(text=f'Preparing namespace ({pipelinesNamespace})', spinner=self.spinner) as h: + createNamespace(self.dynamicClient, pipelinesNamespace) + backupStorageSize = self.getParam("backup_storage_size") if self.getParam("backup_storage_size") else "20Gi" + preparePipelinesNamespace( + dynClient=self.dynamicClient, + instanceId=instanceId, + storageClass=self.pipelineStorageClass, + accessMode=self.pipelineStorageAccessMode, + createConfigPVC=False, + createBackupPVC=True, + backupStorageSize=backupStorageSize + ) + + # Apply config file secrets to the namespace + prepareRestoreSecrets(dynClient=self.dynamicClient, namespace=pipelinesNamespace, restoreConfigs=self.configSecret) + + h.stop_and_persist(symbol=self.successIcon, text=f"Namespace is ready ({pipelinesNamespace})") + + with Halo(text=f'Installing latest Tekton definitions (v{self.version})', spinner=self.spinner) as h: + updateTektonDefinitions(pipelinesNamespace, self.tektonDefsPath) + h.stop_and_persist(symbol=self.successIcon, text=f"Latest Tekton definitions are installed (v{self.version})") + + with Halo(text="Submitting PipelineRun for MAS Restore", spinner=self.spinner) as h: + pipelineURL = launchRestorePipeline(dynClient=self.dynamicClient, params=self.params) + if pipelineURL is not None: + h.stop_and_persist(symbol=self.successIcon, text="PipelineRun for MAS restore submitted") + print_formatted_text(HTML(f"\nView progress:\n {pipelineURL}\n")) + else: + h.stop_and_persist(symbol=self.failureIcon, text="Failed to submit PipelineRun for MAS Restore, see log file for details") + print() + + def promptForInstanceId(self) -> None: + self.printH1("Enter the MAS instance ID to restore from the backup") + self.printDescription([" - Note: Use the same MAS instance ID as the backup you are restoring from."]) + self.promptForString(message="Instance ID", param="mas_instance_id", validator=InstanceIDFormatValidator()) + + def promptForMASConfiguration(self) -> None: + self.printH1("Maximo Application Suite Configuration") + changeDomain = self.yesOrNo("Would you like to change the MAS domain in the Suite CR") + if changeDomain: + self.promptForString(message="MAS Domain", param="mas_domain_on_restore") + + # When include_dro is true, set the dro_cfg_file to the default mounted path and include_drocfg_from_backup to false + # this is because when DRO is installed, suite-restore will use the cfg generated by the dro install task. + def setNonInteractiveDROParams(self): + if self.getParam("include_dro") == "true": + self.setParam("dro_cfg_file", "/workspace/backups/configs/dro.yml") + self.setParam("include_drocfg_from_backup", "false") + + def promptForSLSConfiguration(self) -> None: + self.printH1("Suite-level SLS Configuration") + self.printDescription([ + "You can either choose to use SLSCfg from the backup or you can provide the path to the SLSCfg file." + ]) + # promt user to include slscfg from backup. if yes, promt for sls_url, if not prompt for sls_cfg_file. + includeSLSCfg = self.yesOrNo("Would you like to restore Suite-level SLSCfg from backup") + if includeSLSCfg: + self.setParam("include_slscfg_from_backup", "true") + changeSLSUrl = self.yesOrNo("Would you like to change the SLS URL in the Suite's SLSCfg CR") + if changeSLSUrl: + self.promptForString(message="SLS URL", param="sls_url_on_restore") + else: + self.setParam("sls_url_on_restore", "") + else: + self.setParam("include_slscfg_from_backup", "false") + self.promptForString(message="SLS Configuration File, must be provided when not restoring from backup", param="sls_cfg_file", validator=FileExistsValidator()) + + def promptForDROConfiguration(self) -> None: + if self.getParam("include_dro") != "true": + self.printH1("Suite-level DRO/BAS Configuration") + self.printDescription([ + "You can either choose to use BASCfg from the backup or you can provide the path to the BASCfg file." + ]) + # promt user to include bascfg from backup. if yes, promt for bas_url, if not prompt for dro_cfg_file. + includeDROCfg = self.yesOrNo("Would you like to restore Suite-level BASCfg from backup") + if includeDROCfg: + self.setParam("include_drocfg_from_backup", "true") + changeDROUrl = self.yesOrNo("Would you like to change the DRO URL in the Suite's BASCfg CR") + if changeDROUrl: + self.promptForString(message="BAS URL", param="dro_url_on_restore") + else: + self.setParam("dro_url_on_restore", "") + else: + self.setParam("include_drocfg_from_backup", "false") + self.promptForString(message="DRO/BAS Configuration File, must be provided when not restoring from backup", param="dro_cfg_file", validator=FileExistsValidator()) + + def promptForIncludeSLS(self) -> None: + self.printH1("SLS Configuration") + self.printDescription([" - You can restore SLS instance or bring your own SLS."]) + includeSLS: bool = self.yesOrNo("Would you like to restore SLS instance from backup") + if includeSLS: + self.setParam("include_sls", "true") + # Prompt user to enter custom SLS Domain + customSLSDomain: bool = self.yesOrNo("Would you like to change SLS Domain to use in SLS instance") + if customSLSDomain: + slsDomain = self.promptForString("Enter the SLS Domain to use in License Service CR") + self.setParam("sls_domain", slsDomain) + else: + self.setParam("sls_domain", "") + else: + self.setParam("include_sls", "false") + + def promptForIncludeDRO(self) -> None: + self.printH1("IBM Data Reporting Operator Configuration") + self.printDescription([ + " - DRO is not part of backup/restore. You can install DRO instance or bring your own DRO.", + " - When you choose to install DRO, BASCfg will be autogenerated for the new DRO installation and will be automtically used in the Suite configuration." + ]) + includeDRO: bool = self.yesOrNo("Would you like the pipeline to install DRO instance") + if includeDRO: + self.setParam("include_dro", "true") + self.setParam("dro_cfg_file", "/workspace/backups/configs/dro.yml") + self.setParam("include_drocfg_from_backup", "false") + self.promptForString("IBM entitlement key", "ibm_entitlement_key", isPassword=True) + self.promptForString("Contact e-mail address", "dro_contact_email") + self.promptForString("Contact first name", "dro_contact_firstname") + self.promptForString("Contact last name", "dro_contact_lastname") + self.promptForString("IBM Data Reporter Operator (DRO) Namespace", "dro_namespace", default="redhat-marketplace") + else: + self.setParam("include_dro", "false") + + def promptForIncludeGrafana(self) -> None: + self.printH1("Grafana Configuration") + self.printDescription([" - Grafana is not part of backup/restore. You can install Grafana instance or skip it."]) + includeGrafana: bool = self.yesOrNo("Would you like the pipeline to install Grafana instance") + if includeGrafana: + self.setParam("include_grafana", "true") + else: + self.setParam("include_grafana", "false") + + def promptForBackupStorageSize(self) -> None: + self.printH1("Backup Storage Configuration") + self.printDescription([ + " - Make sure to have enough storage to download the archive(s) and extract the contents.", + " - Example, if your accumulated size of backup archives is 8Gi, choose 20Gi.", + " - Note: The downloaded archive will be deleted after the contents are extracted." + ]) + storageSize = self.promptForString("Enter PVC storage size, must be bigger than backup archive size.", default="20Gi") + self.setParam("backup_storage_size", storageSize) + + def promptForBackupVersion(self) -> None: + self.printH1("Backup Version Configuration") + # Prompt user to enter custom backup version + restore_version = self.promptForString("Set the backup version to use for this restore operation. (e.g. 20260117-191701)") + self.setParam("restore_version", restore_version) + + def setDefaultParams(self) -> None: + """Set default values for optional parameters if not already set""" + if not self.getParam("include_sls"): + self.setParam("include_sls", "true") + if not self.getParam("include_grafana"): + self.setParam("include_grafana", "true") + if not self.getParam("include_dro"): + self.setParam("include_dro", "true") + if not self.getParam("backup_storage_size"): + self.setParam("backup_storage_size", "20Gi") + if not self.getParam("include_slscfg_from_backup"): + self.setParam("include_slscfg_from_backup", "true") + if not self.getParam("include_drocfg_from_backup"): + self.setParam("include_drocfg_from_backup", "true") + if not self.getParam("clean_backup"): + self.setParam("clean_backup", "true") + + def promptForDownloadConfiguration(self) -> None: + """Prompt user for backup download configuration""" + self.printH1("Backup Download Configuration") + + # Ask if user wants to download the backup + downloadBackup = self.yesOrNo("Do you want to download the backup archive before restore") + + if downloadBackup: + self.setParam("download_backup", "true") + + self.printDescription([ + "Development mode is enabled. Choose download location:" + ]) + downloadDestination = self.promptForListSelect( + "Select download location", + ["S3", "Artifactory"], + "download_destination", + default=1 + ) + + if downloadDestination == "S3": + # Prompt for S3 credentials + self.printH2("S3 Configuration") + awsAccessKeyId = self.promptForString("S3 Access Key ID") + self.setParam("aws_access_key_id", awsAccessKeyId) + + awsSecretAccessKey = self.promptForString("S3 Secret Access Key", isPassword=True) + self.setParam("aws_secret_access_key", awsSecretAccessKey) + + s3BucketName = self.promptForString("S3 Bucket Name") + self.setParam("s3_bucket_name", s3BucketName) + + s3Region = self.promptForString("S3 Region", default="us-east-1") + self.setParam("s3_region", s3Region) + + s3EndpointUrl = self.promptForString("S3 Endpoint URL(can be empty for AWS)", default="") + self.setParam("s3_endpoint_url", s3EndpointUrl) + else: + # Prompt for Artifactory credentials + self.printH2("Artifactory Configuration") + + # Check if artifactory credentials are already set from dev mode + if not self.getParam("artifactory_username"): + artifactoryUsername = self.promptForString("Artifactory Username") + self.setParam("artifactory_username", artifactoryUsername) + + if not self.getParam("artifactory_token"): + artifactoryToken = self.promptForString("Artifactory Token", isPassword=True) + self.setParam("artifactory_token", artifactoryToken) + + artifactoryUrl = self.promptForString("Artifactory URL") + self.setParam("artifactory_url", artifactoryUrl) + + artifactoryRepository = self.promptForString("Artifactory Repository") + self.setParam("artifactory_repository", artifactoryRepository) + + cleanBackup = self.yesOrNo("Clean the downloaded backup files after completion") + if cleanBackup: + self.setParam("clean_backup", "true") + else: + self.setParam("clean_backup", "false") + else: + self.setParam("download_backup", "false") + + def promptForManageAppRestore(self) -> None: + """Prompt user for Manage application restore configuration""" + self.printH1("Manage Application Restore") + self.printDescription([ + "In addition to restoring the MAS Suite, you can also restore the Manage application.", + "This includes DB2, Manage namespace resources and persistent volume data." + ]) + + restoreManageApp = self.yesOrNo("Do you want to restore the Manage application") + + if restoreManageApp: + self.setParam("restore_manage_app", "true") + + # Ask about DB2 restore + self.printH2("Manage Database Restore") + self.printDescription([ + "- The Manage application uses a Db2 database that should also be restored.", + "- This will restore the incluster Db2 database associated with the Manage workspace.", + "- Note: This will be offline restore and the Manage application will be unavailable during the restore." + ]) + + restoreDb2 = self.yesOrNo("Do you want to restore the Manage database (Db2)") + + # Always set to disk for pipeline as s3 download is handled for the whole pipeline + self.setParam("manage_db2_restore_vendor", "disk") + if restoreDb2: + self.setParam("restore_manage_db", "true") + else: + self.setParam("restore_manage_db", "false") + else: + self.setParam("restore_manage_app", "false") + self.setParam("restore_manage_db", "false") + + def configStorageClasses(self): + self.printH1("Configure Storage Class during Restore") + self.printDescription([ + " - You can override the storage class for components during restore.", + " - This is useful when restoring to a cluster with different storage classes." + ]) + overrideStorageClasses = not self.yesOrNo("Do you want to use the storage classes from backup") + + if overrideStorageClasses: + defaultStorageClasses = getDefaultStorageClasses(self.dynamicClient) + if defaultStorageClasses.provider is not None: + print_formatted_text(HTML(f"Storage provider auto-detected: {defaultStorageClasses.providerName}")) + print_formatted_text(HTML(f" - Storage class (ReadWriteOnce): {defaultStorageClasses.rwo}")) + print_formatted_text(HTML(f" - Storage class (ReadWriteMany): {defaultStorageClasses.rwx}")) + self.storageClassProvider = defaultStorageClasses.provider + self.params["storage_class_rwo"] = defaultStorageClasses.rwo + self.params["storage_class_rwx"] = defaultStorageClasses.rwx + + customSC = False + if "storage_class_rwx" in self.params and self.params["storage_class_rwx"] != "": + customSC = not self.yesOrNo("Use the auto-detected storage classes") + + if "storage_class_rwx" not in self.params or self.params["storage_class_rwx"] == "" or customSC: + self.storageClassProvider = "custom" + + self.printDescription([ + "Select the ReadWriteOnce and ReadWriteMany storage classes to use from the list below:", + "Enter 'none' for the ReadWriteMany storage class if you do not have a suitable class available in the cluster, however this will limit what can be restored" + ]) + for storageClass in getStorageClasses(self.dynamicClient): + print_formatted_text(HTML(f" - {storageClass.metadata.name}")) + + self.params["storage_class_rwo"] = prompt(HTML('ReadWriteOnce (RWO) storage class '), validator=StorageClassValidator(), validate_while_typing=False) + self.params["storage_class_rwx"] = prompt(HTML('ReadWriteMany (RWX) storage class '), validator=StorageClassValidator(), validate_while_typing=False) + + # Configure mongodb storage class override, preferable with RWO + if self.getParam("storage_class_rwo") is not None and self.getParam("storage_class_rwo") != "": + self.setParam("override_mongodb_storageclass", "true") + self.setParam("mongodb_storageclass_name", self.getParam("storage_class_rwo")) + + # Configure manage app storage class override + if (self.getParam("storage_class_rwo") is not None and self.getParam("storage_class_rwx") != "") and (self.getParam("storage_class_rwx") is not None and self.getParam("storage_class_rwo") != ""): + self.setParam("manage_app_override_storageclass", "true") + self.setParam("manage_app_storage_class_rwx", self.getParam("storage_class_rwx")) + self.setParam("manage_app_storage_class_rwo", self.getParam("storage_class_rwo")) + self.setParam("manage_db_override_storageclass", "true") + self.setParam("manage_db_storage_class_rwx", self.getParam("storage_class_rwx")) + self.setParam("manage_db_storage_class_rwo", self.getParam("storage_class_rwo")) + + else: + self.setParam("override_mongodb_storageclass", "false") + self.setParam("manage_app_override_storageclass", "false") + self.setParam("manage_db_override_storageclass", "false") + + def addFilesToSecret(self, secretDict: dict, configPath: str, extension: str = '', keyPrefix: str = '') -> dict: + """ + Add file (or files) to a secret + """ + filesToProcess = [] + if path.isdir(configPath): + logger.debug(f"Adding all config files in directory {configPath}") + if extension: + filesToProcess = glob(f"{configPath}/*.{extension}") + else: + filesToProcess = glob(f"{configPath}/*") + else: + logger.debug(f"Adding config file {configPath}") + filesToProcess = [configPath] + + for fileToProcess in filesToProcess: + logger.debug(f" * Processing config file {fileToProcess}") + fileName = path.basename(fileToProcess) + + # Load the file + with open(fileToProcess, 'r') as file: + data = file.read() + + # Add/update an entry to the secret data + if "data" not in secretDict: + secretDict["data"] = {} + secretDict["data"][keyPrefix + fileName] = b64encode(data.encode('ascii')).decode("ascii") + + return secretDict + + def createConfigSecrets(self) -> None: + """ + Create a single secret for SLS and DRO configuration files if provided + """ + self.configSecret = None + + slsCfgFile = self.getParam("sls_cfg_file") + droCfgFile = self.getParam("dro_cfg_file") + include_dro = self.getParam("include_dro") + + # Check if either config file is provided + if slsCfgFile or droCfgFile: + # Validate SLS config file exists if provided + if slsCfgFile and not path.exists(slsCfgFile): + self.fatalError(f"SLS configuration file not found: {slsCfgFile}") + + # Validate DRO config file exists if provided + if include_dro != "true" and droCfgFile and not path.exists(droCfgFile): + self.fatalError(f"DRO configuration file not found: {droCfgFile}") + + # Create a single secret for both config files + configSecret = { + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque", + "metadata": { + "name": "pipeline-restore-configs" + } + } + + # Add SLS config file to secret if provided + if slsCfgFile: + configSecret = self.addFilesToSecret(configSecret, slsCfgFile, '') + # Update the param to point to the mounted file path + self.setParam("sls_cfg_file", f"/workspace/restore/{path.basename(slsCfgFile)}") + logger.debug(f"Added SLS config file to secret: {path.basename(slsCfgFile)}") + + # Add DRO config file to secret if provided + if include_dro != "true" and droCfgFile: + configSecret = self.addFilesToSecret(configSecret, droCfgFile, '') + # Update the param to point to the mounted file path + self.setParam("dro_cfg_file", f"/workspace/restore/{path.basename(droCfgFile)}") + logger.debug(f"Added DRO config file to secret: {path.basename(droCfgFile)}") + + self.configSecret = configSecret diff --git a/python/src/mas/cli/restore/argParser.py b/python/src/mas/cli/restore/argParser.py new file mode 100644 index 00000000000..ca45d72616e --- /dev/null +++ b/python/src/mas/cli/restore/argParser.py @@ -0,0 +1,431 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import argparse + +from .. import __version__ as packageVersion +from ..cli import getHelpFormatter + +restoreArgParser = argparse.ArgumentParser( + prog='mas restore', + description="\n".join([ + f"IBM Maximo Application Suite Admin CLI v{packageVersion}", + "Restore a MAS instance from backup by configuring and launching the MAS Restore Tekton Pipeline.\n", + "Interactive Mode:", + "Omitting the --instance-id option will trigger an interactive prompt" + ]), + epilog="Refer to the online documentation for more information: https://ibm-mas.github.io/cli/", + formatter_class=getHelpFormatter(), + add_help=False +) + +masArgGroup = restoreArgParser.add_argument_group( + 'MAS Instance', + 'Specify the MAS instance to restore.' +) +masArgGroup.add_argument( + '-i', '--instance-id', + dest='mas_instance_id', + required=False, + help="MAS Instance ID to restore, must match the instance ID of the backup." +) + +masArgGroup.add_argument( + '--mas-domain-restore', + dest='mas_domain_on_restore', + required=False, + help="MAS Domain to restore. If not specified, the domain will be taken from the backup." +) + +masArgGroup.add_argument( + '--sls-url-restore', + dest='sls_url_on_restore', + required=False, + help="SLS URL to restore in Suite configuration. If not specified, the url will be taken from the backup." +) + +masArgGroup.add_argument( + '--dro-url-restore', + dest='dro_url_on_restore', + required=False, + help="DRO URL to restore in Suite configuration. If not specified, the url will be taken from the backup." +) + +masArgGroup.add_argument( + '--include-slscfg-from-backup', + dest='include_slscfg_from_backup', + required=False, + action="store_const", + const="true", + default="true", + help="Use SLS config from backup during Suite restore." +) + +masArgGroup.add_argument( + '--exclude-slscfg-from-backup', + dest='include_slscfg_from_backup', + required=False, + action="store_const", + const="false", + help="Exclude SLS config from backup during Suite restore." +) + +masArgGroup.add_argument( + '--sls-cfg-file', + dest='sls_cfg_file', + required=False, + help="SLS config file path to restore, must be provided if own SLS is used." +) + +masArgGroup.add_argument( + '--dro-cfg-file', + dest='dro_cfg_file', + required=False, + help="DRO config file path to restore, must be provided if own DRO is used." +) + +masArgGroup.add_argument( + '--include-drocfg-from-backup', + dest='include_drocfg_from_backup', + required=False, + action="store_const", + const="true", + default="true", + help="Include DRO config from backup during Suite restore." +) + +masArgGroup.add_argument( + '--exclude-drocfg-from-backup', + dest='include_drocfg_from_backup', + required=False, + action="store_const", + const="false", + help="Exclude DRO config from backup during Suite restore." +) + +restoreArgGroup = restoreArgParser.add_argument_group( + 'Restore Configuration', + 'Configure backup version to be restored and storage size.' +) +restoreArgGroup.add_argument( + '--restore-version', + required=False, + help="Version/timestamp used in backup. Example: YYYYMMDD-HHMMSS" +) +restoreArgGroup.add_argument( + '--backup-storage-size', + required=False, + help="Size of the PVC storage, must be bigger than backup archive size. (default: 20Gi)" +) +restoreArgGroup.add_argument( + '--clean-backup', + dest='clean_backup', + required=False, + action="store_const", + const="true", + default="true", + help="Clean backup and config workspaces after completion (default: true)" +) +restoreArgGroup.add_argument( + '--no-clean-backup', + dest='clean_backup', + required=False, + action="store_const", + const="false", + help="Do not clean backup and config workspaces after completion" +) + +mongoArgGroup = restoreArgParser.add_argument_group( + 'MongoDB Restore Configuration', + 'Configure MongoDB for restore.' +) + +mongoArgGroup.add_argument( + '--override-mongodb-storageclass', + dest='override_mongodb_storageclass', + required=False, + action="store_const", + const="true", + help="Override MongoDb PVC storageclass" +) + +mongoArgGroup.add_argument( + '--mongodb-storage-class-name', + required=False, + dest="mongodb_storageclass_name", + help="ReadWriteOnce Storage class for MongoDb PVC" +) + +downloadArgGroup = restoreArgParser.add_argument_group( + 'Download Configuration', + 'Configure backup archive download from S3 or Artifactory.' +) +downloadArgGroup.add_argument( + '--download-backup', + required=False, + action='store_true', + default=False, + help="Download the backup archive from S3 or Artifactory" +) +downloadArgGroup.add_argument( + '--aws-access-key-id', + required=False, + help="AWS Access Key ID for S3 download" +) +downloadArgGroup.add_argument( + '--aws-secret-access-key', + required=False, + help="AWS Secret Access Key for S3 download" +) +downloadArgGroup.add_argument( + '--s3-bucket-name', + required=False, + help="S3 bucket name for backup download" +) +downloadArgGroup.add_argument( + '--s3-region', + required=False, + help="AWS region for S3 bucket" +) +downloadArgGroup.add_argument( + '--s3-endpoint-url', + required=False, + help="Endpoint url for S3 bucket" +) +downloadArgGroup.add_argument( + '--artifactory-url', + required=False, + help="Artifactory URL for backup download" +) +downloadArgGroup.add_argument( + '--artifactory-repository', + required=False, + help="Artifactory repository for backup download" +) +downloadArgGroup.add_argument( + '--custom-backup-archive-name', + required=False, + dest="backup_archive_name", + help="Custom backup archive name to download from S3 or Artifactory" +) + +manageAppArgGroup = restoreArgParser.add_argument_group( + 'Manage Application Restore', + 'Configure restore of the Manage application and its database.' +) +manageAppArgGroup.add_argument( + '--restore-manage-app', + dest='restore_manage_app', + required=False, + action="store_const", + const="true", + help="Restore the Manage application" +) +manageAppArgGroup.add_argument( + '--restore-manage-db', + dest='restore_manage_db', + required=False, + action="store_const", + const="true", + help="Restore the Manage application database (Db2)" +) + +manageAppArgGroup.add_argument( + '--override-manage-app-storageclass', + dest='manage_app_override_storageclass', + required=False, + action="store_const", + const="true", + help="Override Manage Application PVC storageclass" +) + +manageAppArgGroup.add_argument( + '--manage-app-storage-class-rwx', + required=False, + dest="manage_app_storage_class_rwx", + help="ReadWriteMany Storage class for Manage App PVC" +) + +manageAppArgGroup.add_argument( + '--manage-app-storage-class-rwo', + required=False, + dest="manage_app_storage_class_rwo", + help="ReadWriteOnce Storage class for Manage App PVC" +) + +manageAppArgGroup.add_argument( + '--override-manage-db-storageclass', + dest='manage_db_override_storageclass', + required=False, + action="store_const", + const="true", + help="Override database (Db2) PVC storageclass" +) + +manageAppArgGroup.add_argument( + '--manage-db-storage-class-rwo', + required=False, + dest="manage_db_storage_class_rwo", + help="RWO Storage class for DB2 storage" +) + +manageAppArgGroup.add_argument( + '--manage-db-storage-class-rwx', + required=False, + dest="manage_db_storage_class_rwx", + help="RWX Storage class for DB2 storage" +) + +componentsArgGroup = restoreArgParser.add_argument_group( + 'Components', + 'Configure which components to include in the restore.' +) + +componentsArgGroup.add_argument( + '--include-grafana', + required=False, + action="store_const", + const="true", + default="true", + help="Include Grafana in restore (default: true)" +) +componentsArgGroup.add_argument( + '--exclude-grafana', + dest='include_grafana', + required=False, + action="store_const", + const="false", + help="Skip installing Grafana." +) + +componentsArgGroup.add_argument( + '--include-dro', + required=False, + action="store_const", + const="true", + default="true", + help="Include DRO in restore (default: true)" +) +componentsArgGroup.add_argument( + '--exclude-dro', + dest='include_dro', + required=False, + action="store_const", + const="false", + help="Skip installing DRO." +) + +componentsArgGroup.add_argument( + '--include-sls', + required=False, + action="store_const", + const="true", + default="true", + help="Include SLS in restore (default: true)" +) +componentsArgGroup.add_argument( + '--exclude-sls', + dest='include_sls', + required=False, + action="store_const", + const="false", + help="Exclude SLS from restore (use if SLS is external)" +) + +slsArgGroup = restoreArgParser.add_argument_group( + "IBM Suite License Service Operator", + "Configure IBM Suite License Service Operator (SLS) domain during restore." +) +slsArgGroup.add_argument( + "--sls-domain", + dest='sls_domain', + required=False, + help="SLS domain to use during SLS instance restore (optional)." +) + +droArgGroup = restoreArgParser.add_argument_group( + "IBM Data Reporting Operator", + "Configure IBM Data Reporting Operator (DRO) with contact information and namespace settings for usage data collection." +) +droArgGroup.add_argument( + "--ibm-entitlement-key", + required=False, + help="IBM entitlement key" +) +droArgGroup.add_argument( + "--contact-email", + "--uds-email", + dest="dro_contact_email", + required=False, + help="Contact e-mail address" +) +droArgGroup.add_argument( + "--contact-firstname", + "--uds-firstname", + dest="dro_contact_firstname", + required=False, + help="Contact first name" +) +droArgGroup.add_argument( + "--contact-lastname", + "--uds-lastname", + dest="dro_contact_lastname", + required=False, + help="Contact last name" +) +droArgGroup.add_argument( + "--dro-namespace", + required=False, + help="Namespace for DRO" +) + +# More Options +# ----------------------------------------------------------------------------- +otherArgGroup = restoreArgParser.add_argument_group( + 'More', + 'Additional options including development mode, Artifactory credentials, and confirmation prompts.' +) +otherArgGroup.add_argument( + "--artifactory-username", + required=False, + help="Username for access to development builds on Artifactory" +) +otherArgGroup.add_argument( + "--artifactory-token", + required=False, + help="API Token for access to development builds on Artifactory" +) +otherArgGroup.add_argument( + "--dev-mode", + required=False, + action="store_true", + default=False, + help="Configure restore in development mode" +) +otherArgGroup.add_argument( + '--no-confirm', + required=False, + action='store_true', + default=False, + help="Launch the backup without prompting for confirmation" +) +otherArgGroup.add_argument( + '--skip-pre-check', + required=False, + action='store_true', + default=False, + help="Skips the 'pre-restore-check' task in the restore pipeline" +) +otherArgGroup.add_argument( + '-h', "--help", + action='help', + default=False, + help="Show this help message and exit" +) diff --git a/tekton/generate-tekton-pipelines.yml b/tekton/generate-tekton-pipelines.yml index 2b4821e6468..444a79dae0a 100644 --- a/tekton/generate-tekton-pipelines.yml +++ b/tekton/generate-tekton-pipelines.yml @@ -28,6 +28,8 @@ dest: "{{ pipeline_target_dir }}/{{ item }}.yaml" with_items: # MAS Pipelines + - mas-backup + - mas-restore - mas-install - mas-update - mas-upgrade diff --git a/tekton/generate-tekton-tasks.yml b/tekton/generate-tekton-tasks.yml index 426b8344797..3518e1ce5e8 100644 --- a/tekton/generate-tekton-tasks.yml +++ b/tekton/generate-tekton-tasks.yml @@ -136,12 +136,19 @@ src: "{{ task_src_dir }}/{{ item }}.yml.j2" dest: "{{ task_target_dir }}/{{ item }}.yaml" with_items: + - clean-workspaces + - prepare-backup-workspace + - download-backup-archive - gencfg-workspace - must-gather + - suite-app-backup - suite-app-install + - suite-app-restore + - suite-app-rollback - suite-app-uninstall - suite-app-upgrade - - suite-app-rollback + - suite-backup + - suite-restore - suite-certs - suite-config - suite-db2-setup-for-manage @@ -152,10 +159,11 @@ - suite-uninstall - suite-upgrade - suite-verify + - upload-backup-archive # 7. Generate Tasks (OCP) # ------------------------------------------------------------------------- - - name: Generate Tasks (Suite) + - name: Generate Tasks (OCP) ansible.builtin.template: src: "{{ task_src_dir }}/ocp/{{ item }}.yml.j2" dest: "{{ task_target_dir }}/{{ item }}.yaml" diff --git a/tekton/src/params/backup.yml.j2 b/tekton/src/params/backup.yml.j2 new file mode 100644 index 00000000000..d461392f8e0 --- /dev/null +++ b/tekton/src/params/backup.yml.j2 @@ -0,0 +1,342 @@ +# OCP Verification Parameters +# ----------------------------------------------------------------------------- +- name: skip_pre_check + type: string + description: Skip pre-backup verification checks + default: "False" + +- name: ocp_ingress_tls_secret_name + type: string + description: Name of the TLS secret for OCP ingress + default: "" + +# Backup/Restore Configuration +# ----------------------------------------------------------------------------- +- name: mas_backup_dir + type: string + description: Directory to store backup files (e.g., /tmp/mas_backups) + default: "" + +- name: backup_version + type: string + description: Version/timestamp for the backup (auto-generated if not provided) + default: "" + +- name: restore_version + type: string + description: Version/timestamp for the backup to restore from + default: "" + +# Cleanup Configuration +# ----------------------------------------------------------------------------- +- name: clean_backup + type: string + description: Whether to clean backup and config workspaces after completion (true/false) + default: "true" + +# Upload Configuration +# ----------------------------------------------------------------------------- +- name: upload_backup + type: string + description: Whether to upload the backup archive (true/false) + default: "false" + +# Download Configuration +# ----------------------------------------------------------------------------- +- name: download_backup + type: string + description: Whether to download the backup archive (true/false) + default: "false" + +- name: backup_archive_name + type: string + description: Custom backup archive name including tar.gz to download from S3 or Artifactory + default: "" + +# S3 Upload Configuration +# ----------------------------------------------------------------------------- +- name: aws_access_key_id + type: string + description: AWS Access Key ID for S3 upload + default: "" + +- name: aws_secret_access_key + type: string + description: AWS Secret Access Key for S3 upload + default: "" + +- name: s3_bucket_name + type: string + description: S3 bucket name for backup upload + default: "" + +- name: s3_region + type: string + description: AWS region for S3 bucket + default: "" + +- name: s3_endpoint_url + type: string + description: S3 endpoint url for S3 bucket + default: "" + +# Artifactory Upload Configuration +# ----------------------------------------------------------------------------- +- name: artifactory_url + type: string + description: Artifactory URL for backup upload + default: "" + +- name: artifactory_repository + type: string + description: Artifactory repository for backup upload + default: "" + +# Component Flags +# ----------------------------------------------------------------------------- +- name: include_grafana + type: string + description: Set to false to skip Grafana install + default: "true" + +- name: include_sls + type: string + description: Set to false to skip SLS backup/restore (if SLS is external) + default: "true" + +- name: include_dro + type: string + description: Set to false to skip DRO install (if DRO is external) + default: "true" + +# MongoDB Configuration +# ----------------------------------------------------------------------------- +- name: mongodb_namespace + type: string + description: MongoDB namespace + default: "mongoce" + +- name: mongodb_instance_name + type: string + description: MongoDB instance name + default: "mas-mongo-ce" + +- name: mongodb_provider + type: string + description: MongoDB provider (only community is supported for backup) + default: "community" + +- name: override_mongodb_storageclass + type: string + description: Whether to override the storage class for MongoDB during restore (true/false) + default: "false" + +- name: mongodb_storageclass_name + type: string + description: Storage class name for MongoDB when override_mongodb_storageclass is true + default: "" + +# SLS Configuration +# ----------------------------------------------------------------------------- +- name: sls_namespace + type: string + description: SLS namespace + default: "ibm-sls" + +- name: sls_domain + type: string + description: SLS URL to restore in Suite configuration. + default: "" + +# Certificate Manager Configuration +# ----------------------------------------------------------------------------- +- name: cert_manager_provider + type: string + default: "redhat" + description: Certificate manager provider + +# Grafana Configuration +# ----------------------------------------------------------------------------- +- name: grafana_instance_storage_class + type: string + default: "" + description: Storage class for Grafana instance (used during restore) + +# DRO Configuration (for restore) +# ----------------------------------------------------------------------------- +- name: dro_contact_email + type: string + default: "" + description: Required for DRO installation during restore + +- name: dro_contact_firstname + type: string + default: "" + description: Required for DRO installation during restore + +- name: dro_contact_lastname + type: string + default: "" + description: Required for DRO installation during restore + +- name: dro_namespace + type: string + default: "redhat-marketplace" + description: Required for DRO installation during restore + +- name: dro_storage_class + type: string + default: "" + description: Required for DRO installation during restore + +- name: ibm_entitlement_key + type: string + default: "" + description: Required for DRO installation during restore + +# Restore-specific Configuration +# ----------------------------------------------------------------------------- +- name: mas_domain_on_restore + type: string + default: "" + description: Domain to use when restoring to a different cluster + +- name: sls_url_on_restore + type: string + default: "" + description: SLS URL when restoring with external SLS + +- name: include_slscfg_from_backup + type: string + default: "" + description: Use SLSCfg from backup during restore + +- name: sls_cfg_file + type: string + default: "" + description: SLS cfg file when restoring with external SLS + +- name: dro_url_on_restore + type: string + default: "" + description: DRO URL when restoring with external DRO + +- name: include_drocfg_from_backup + type: string + default: "" + description: Use DROCfg from backup during restore + +- name: dro_cfg_file + type: string + default: "" + description: DRO cfg file when restoring with external DRO + +- name: mas_config_dir + type: string + default: "" + description: Directory for MAS configuration files during restore + +# Manage Application Backup/Restore Configuration +# ----------------------------------------------------------------------------- +- name: backup_manage_app + type: string + description: Whether to backup the Manage application (true/false) + default: "false" + +- name: backup_manage_db + type: string + description: Whether to backup the Manage database (Db2) (true/false) + default: "false" + +- name: restore_manage_app + type: string + description: Whether to restore the Manage application (true/false) + default: "false" + +- name: restore_manage_db + type: string + description: Whether to restore the Manage database (Db2) (true/false) + default: "false" + +- name: manage_workspace_id + type: string + description: Manage workspace ID for application backup + default: "" + +- name: manage_app_override_storageclass + type: string + description: Whether to override the storage class in Manage App PVC (true/false) + default: "false" + +- name: manage_app_storage_class_rwx + type: string + description: Storage class for Manage ReadWriteMany Storage + default: "" + +- name: manage_app_storage_class_rwo + type: string + description: Storage class for Manage ReadWriteOnce Storage + default: "" + + +# Manage Db2 Backup/Restore Configuration +# ----------------------------------------------------------------------------- +- name: manage_db2_namespace + type: string + description: Manage Db2 namespace for backup + default: "" + +- name: manage_db2_instance_name + type: string + description: Manage Db2 instance name for backup + default: "" + +- name: manage_db2_backup_type + type: string + description: Type of Manage Db2 backup (online or offline) + default: "online" + +- name: manage_db2_backup_vendor + type: string + description: Storage backend for Manage Db2 backup files (disk or s3) + default: "disk" + +- name: manage_db2_restore_vendor + type: string + description: Storage backend for Manage Db2 restore files (disk or s3) + default: "disk" + +- name: manage_db_override_storageclass + type: string + description: Whether to override the storage class in Manage database (Db2) PVC (true/false) + default: "false" + +- name: manage_db_storage_class_rwx + type: string + description: Storage class for Manage DB2 ReadWriteMany Storage + default: "" + +- name: manage_db_storage_class_rwo + type: string + description: Storage class for Manage DB2 ReadWriteOnce Storage + default: "" + +- name: backup_s3_endpoint + type: string + description: S3 endpoint URL for Db2 backups + default: "" + +- name: backup_s3_bucket + type: string + description: S3 bucket name for Db2 backups + default: "" + +- name: backup_s3_access_key + type: string + description: S3 access key for Db2 backups + default: "" + +- name: backup_s3_secret_key + type: string + description: S3 secret key for Db2 backups + default: "" \ No newline at end of file diff --git a/tekton/src/pipelines/mas-backup.yml.j2 b/tekton/src/pipelines/mas-backup.yml.j2 new file mode 100644 index 00000000000..9d6ce4610dd --- /dev/null +++ b/tekton/src/pipelines/mas-backup.yml.j2 @@ -0,0 +1,412 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: mas-backup +spec: + workspaces: + # The backup storage + - name: shared-backups + + params: + # 1. Common Parameters + # ------------------------------------------------------------------------- + {{ lookup('template', params_src_dir ~ '/common.yml.j2') | indent(4) }} + + # 2. Backup/Restore Configuration + # ------------------------------------------------------------------------- + {{ lookup('template', params_src_dir ~ '/backup.yml.j2') | indent(4) }} + + tasks: + # Content + # ------- + # 1. Pipeline Start + # 2. Pre-backup Verification + # 3. Prepare backup workspace + # 4. Backup IBM Catalogs + # 5. Backup Certificate Manager + # 6. Backup MongoDB + # 7. Backup SLS + # 8. Backup MAS Suite + # 9. Backup Manage Database (Db2) - Optional + # 10. Backup Manage Application - Optional + + # 1. Pipeline Start + # ------------------------------------------------------------------------- + - name: pipeline-start + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-pipeline-status + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: pipeline_status + value: Started + - name: pipeline_name + value: $(context.pipeline.name) + - name: pipelinerun_name + value: $(context.pipelineRun.name) + - name: pipelinerun_namespace + value: $(context.pipelineRun.namespace) + + # 2. Pre-backup Verification + # ------------------------------------------------------------------------- + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/cluster-setup/ocp-verify.yml.j2', template_vars={'name': 'pre-backup-check', 'devops_suite_name': 'pre-backup-check'}) | indent(4) }} + runAfter: + - pipeline-start + + # 3. Prepare backup workspace + # ------------------------------------------------------------------------- + - name: prepare-backup-workspace + timeout: "0" + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + taskRef: + kind: Task + name: mas-devops-prepare-backup-workspace + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - pre-backup-check + + # 4. Backup IBM Catalogs + # ------------------------------------------------------------------------- + - name: ibm-catalogs + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: backup-ibm-catalogs + + - name: ibm_catalogs_backup_version + value: $(params.backup_version) + - name: ibm_catalogs_action + value: backup + + - name: artifactory_username + value: $(params.artifactory_username) + - name: artifactory_token + value: $(params.artifactory_token) + + taskRef: + kind: Task + name: mas-devops-ibm-catalogs + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 5. Backup Certificate Manager + # ------------------------------------------------------------------------- + - name: cert-manager + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: backup-cert-manager + + - name: certmanager_backup_version + value: $(params.backup_version) + - name: cert_manager_action + value: backup + + - name: cert_manager_provider + value: $(params.cert_manager_provider) + + taskRef: + kind: Task + name: mas-devops-cert-manager + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 6. Backup MongoDB + # ------------------------------------------------------------------------- + - name: mongodb + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: backup-mongodb + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: mongodb_backup_version + value: $(params.backup_version) + - name: mongodb_action + value: backup + + - name: mongodb_namespace + value: $(params.mongodb_namespace) + - name: mongodb_instance_name + value: $(params.mongodb_instance_name) + - name: mongodb_provider + value: $(params.mongodb_provider) + + taskRef: + kind: Task + name: mas-devops-mongodb + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 7. Backup SLS + # ------------------------------------------------------------------------- + - name: sls + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: devops_suite_name + value: backup-sls + + - name: sls_backup_version + value: $(params.backup_version) + - name: sls_action + value: backup + + - name: sls_namespace + value: $(params.sls_namespace) + + when: + - input: "$(params.include_sls)" + operator: in + values: ["true", "True"] + + taskRef: + kind: Task + name: mas-devops-sls + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 8. Backup MAS Suite + # ------------------------------------------------------------------------- + - name: suite-backup + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: devops_suite_name + value: backup-suite + + - name: suite_backup_version + value: $(params.backup_version) + + taskRef: + kind: Task + name: mas-devops-suite-backup + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - ibm-catalogs + - cert-manager + - mongodb + - sls + + # 9. Backup Manage Database (Db2) - Optional + # ------------------------------------------------------------------------- + - name: manage-db2-backup + timeout: "0" + when: + - input: "$(params.backup_manage_db)" + operator: in + values: ["true", "True"] + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: db2_namespace + value: $(params.manage_db2_namespace) + - name: db2_instance_name + value: $(params.manage_db2_instance_name) + - name: db2_action + value: backup + - name: mas_application_id + value: manage + + # Backup specific parameters + - name: db2_backup_version + value: $(params.backup_version) + - name: backup_type + value: $(params.manage_db2_backup_type) + - name: backup_vendor + value: $(params.manage_db2_backup_vendor) + - name: backup_s3_endpoint + value: $(params.backup_s3_endpoint) + - name: backup_s3_bucket + value: $(params.backup_s3_bucket) + - name: backup_s3_access_key + value: $(params.backup_s3_access_key) + - name: backup_s3_secret_key + value: $(params.backup_s3_secret_key) + + taskRef: + kind: Task + name: mas-devops-db2 + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 10. Backup Manage Application - Optional + # ------------------------------------------------------------------------- + - name: manage-app-backup + timeout: "0" + when: + - input: "$(params.backup_manage_app)" + operator: in + values: ["true", "True"] + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + - name: mas_workspace_id + value: $(params.manage_workspace_id) + - name: mas_app_id + value: manage + + - name: mas_backup_dir + value: /workspace/backups + - name: mas_app_backup_version + value: $(params.backup_version) + + taskRef: + kind: Task + name: mas-devops-suite-app-backup + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - suite-backup + + # 11. Upload Backup Archive (Optional) + # ------------------------------------------------------------------------- + - name: upload-backup-archive + timeout: "0" + when: + - input: "$(params.upload_backup)" + operator: in + values: ["true", "True"] + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: backup_version + value: $(params.backup_version) + - name: mas_instance_id + value: $(params.mas_instance_id) + + # S3 parameters + - name: aws_access_key_id + value: $(params.aws_access_key_id) + - name: aws_secret_access_key + value: $(params.aws_secret_access_key) + - name: s3_bucket_name + value: $(params.s3_bucket_name) + - name: s3_region + value: $(params.s3_region) + - name: s3_endpoint_url + value: $(params.s3_endpoint_url) + + # Artifactory parameters + - name: artifactory_username + value: $(params.artifactory_username) + - name: artifactory_token + value: $(params.artifactory_token) + - name: artifactory_url + value: $(params.artifactory_url) + - name: artifactory_repository + value: $(params.artifactory_repository) + + taskRef: + kind: Task + name: mas-devops-upload-backup-archive + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - suite-backup + - manage-db2-backup + - manage-app-backup + + finally: + # 1. Clean Backup Workspaces (Optional) + # ------------------------------------------------------------------------- + - name: clean-backup-workspaces + timeout: "0" + when: + - input: "$(params.clean_backup)" + operator: in + values: ["true", "True"] + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + taskRef: + kind: Task + name: mas-devops-clean-workspaces + workspaces: + - name: backups + workspace: shared-backups + + # 2. Update synchronization configmap + # ------------------------------------------------------------------------- + - name: sync-backup + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-configmap + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: configmap_name + value: sync-backup + - name: configmap_value + # An aggregate status of all the pipelineTasks under the tasks section (excluding the finally section). + # This variable is only available in the finally tasks and can have any one of the values (Succeeded, Failed, Completed, or None) + value: $(tasks.status) + + # 3. Pipeline Finish + # ------------------------------------------------------------------------- + - name: pipeline-finish + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-pipeline-status + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: pipeline_status + # An aggregate status of all the pipelineTasks under the tasks section (excluding the finally section). + # This variable is only available in the finally tasks and can have any one of the values (Succeeded, Failed, Completed, or None) + value: $(tasks.status) + - name: pipeline_name + value: $(context.pipeline.name) + - name: pipelinerun_name + value: $(context.pipelineRun.name) + - name: pipelinerun_namespace + value: $(context.pipelineRun.namespace) diff --git a/tekton/src/pipelines/mas-restore.yml.j2 b/tekton/src/pipelines/mas-restore.yml.j2 new file mode 100644 index 00000000000..d974bca6680 --- /dev/null +++ b/tekton/src/pipelines/mas-restore.yml.j2 @@ -0,0 +1,532 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: mas-restore +spec: + workspaces: + # The backup storage + - name: shared-backups + # Configs to restore like slscfg and drocfg + - name: restore-configurations + + params: + # 1. Common Parameters + # ------------------------------------------------------------------------- + {{ lookup('template', params_src_dir ~ '/common.yml.j2') | indent(4) }} + + # 2. Backup/Restore Configuration + # ------------------------------------------------------------------------- + {{ lookup('template', params_src_dir ~ '/backup.yml.j2') | indent(4) }} + + tasks: + # Content + # ------- + # 1. Pipeline Start + # 2. Pre-restore Verification + # 3. Prepare backup workspace + # 4. Download backup archive (if required) + # 5. Restore IBM Catalogs + # 6. Restore Certificate Manager + # 7. Install Grafana (if required) + # 8. Restore MongoDB + # 9. Restore SLS (if required) + # 10. Install DRO (if required) + # 11. Restore MAS Suite + # 12. Post Restore Suite Verify + # 13. Restore DB2 (if required) + # 14. Restore Manage (if required) + + # 1. Pipeline Start + # ------------------------------------------------------------------------- + - name: pipeline-start + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-pipeline-status + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: pipeline_status + value: Started + - name: pipeline_name + value: $(context.pipeline.name) + - name: pipelinerun_name + value: $(context.pipelineRun.name) + - name: pipelinerun_namespace + value: $(context.pipelineRun.namespace) + + # 2. Pre-restore Verification + # ------------------------------------------------------------------------- + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/cluster-setup/ocp-verify.yml.j2', template_vars={'name': 'pre-restore-check', 'devops_suite_name': 'pre-restore-check'}) | indent(4) }} + runAfter: + - pipeline-start + + # 3. Prepare backup workspace + # ------------------------------------------------------------------------- + - name: prepare-backup-workspace + timeout: "0" + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + taskRef: + kind: Task + name: mas-devops-prepare-backup-workspace + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - pre-restore-check + + # 4. Download backup archive (if required) + # ------------------------------------------------------------------------- + - name: download-backup-archive + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: download-backup-archive + - name: backup_archive_name + value: $(params.backup_archive_name) + - name: mas_instance_id + value: $(params.mas_instance_id) + + # Archive specific + - name: include_sls_archive + value: $(params.include_sls) + - name: include_manage_db_archive + value: $(params.restore_manage_db) + - name: include_manage_app_archive + value: $(params.restore_manage_app) + + # S3 parameters + - name: aws_access_key_id + value: $(params.aws_access_key_id) + - name: aws_secret_access_key + value: $(params.aws_secret_access_key) + - name: s3_bucket_name + value: $(params.s3_bucket_name) + - name: s3_region + value: $(params.s3_region) + - name: s3_endpoint_url + value: $(params.s3_endpoint_url) + + # Artifactory parameters + - name: artifactory_username + value: $(params.artifactory_username) + - name: artifactory_token + value: $(params.artifactory_token) + - name: artifactory_url + value: $(params.artifactory_url) + - name: artifactory_repository + value: $(params.artifactory_repository) + + when: + - input: "$(params.download_backup)" + operator: in + values: ["true", "True"] + + taskRef: + kind: Task + name: mas-devops-download-backup-archive + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + + # 5. Restore IBM Catalogs + # ------------------------------------------------------------------------- + - name: ibm-catalogs + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: restore-ibm-catalogs + + - name: ibm_catalogs_backup_version + value: $(params.restore_version) + - name: ibm_catalogs_action + value: restore + + taskRef: + kind: Task + name: mas-devops-ibm-catalogs + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - prepare-backup-workspace + - download-backup-archive + + # 6. Restore Certificate Manager + # ------------------------------------------------------------------------- + - name: cert-manager + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: restore-cert-manager + + - name: certmanager_backup_version + value: $(params.restore_version) + - name: cert_manager_action + value: restore + + taskRef: + kind: Task + name: mas-devops-cert-manager + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - ibm-catalogs + + + # 7. Install Grafana + # ------------------------------------------------------------------------- + - name: grafana + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: restore-grafana + + - name: grafana_action + value: install + + when: + - input: "$(params.include_grafana)" + operator: in + values: ["true", "True"] + + taskRef: + kind: Task + name: mas-devops-grafana + runAfter: + - cert-manager + + # 8. Restore MongoDB + # ------------------------------------------------------------------------- + - name: mongodb + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: devops_suite_name + value: restore-mongodb + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: mongodb_backup_version + value: $(params.restore_version) + - name: mongodb_action + value: restore + + - name: override_mongodb_storageclass + value: $(params.override_mongodb_storageclass) + - name: mongodb_storage_class + value: $(params.mongodb_storageclass_name) + - name: mas_config_dir + value: /workspace/backups/configs + + taskRef: + kind: Task + name: mas-devops-mongodb + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - cert-manager + - grafana + + # 9. Restore SLS + # ------------------------------------------------------------------------- + - name: sls + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: devops_suite_name + value: restore-sls + + - name: sls_backup_version + value: $(params.restore_version) + - name: sls_action + value: restore + - name: sls_domain + value: $(params.sls_domain) + - name: mas_config_dir + value: /workspace/backups/configs + + when: + - input: "$(params.include_sls)" + operator: in + values: ["true", "True"] + + taskRef: + kind: Task + name: mas-devops-sls + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - mongodb + + # 10. Install DRO + # ------------------------------------------------------------------------- + - name: dro + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: devops_suite_name + value: restore-dro + - name: mas_config_dir + value: /workspace/backups/configs + + - name: dro_action + value: install + - name: dro_contact_email + value: "$(params.dro_contact_email)" + - name: dro_contact_firstname + value: "$(params.dro_contact_firstname)" + - name: dro_contact_lastname + value: "$(params.dro_contact_lastname)" + - name: ibm_entitlement_key + value: $(params.ibm_entitlement_key) + - name: dro_namespace + value: $(params.dro_namespace) + - name: dro_storage_class + value: $(params.dro_storage_class) + + when: + - input: "$(params.include_dro)" + operator: in + values: ["true", "True"] + + taskRef: + kind: Task + name: mas-devops-dro + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - ibm-catalogs + + # 11. Restore MAS Suite + # ------------------------------------------------------------------------- + - name: suite-restore + timeout: "0" + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/backup-restore/restore-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + + - name: devops_suite_name + value: restore-suite + + - name: suite_backup_version + value: $(params.restore_version) + - name: mas_domain_on_restore + value: $(params.mas_domain_on_restore) + + - name: include_slscfg_from_backup + value: $(params.include_slscfg_from_backup) + - name: sls_url_on_restore + value: $(params.sls_url_on_restore) + - name: sls_cfg_file + value: $(params.sls_cfg_file) + + - name: include_drocfg_from_backup + value: $(params.include_drocfg_from_backup) + - name: dro_url_on_restore + value: $(params.dro_url_on_restore) + - name: dro_cfg_file + value: $(params.dro_cfg_file) + + taskRef: + kind: Task + name: mas-devops-suite-restore + workspaces: + - name: backups + workspace: shared-backups + - name: restore + workspace: restore-configurations + runAfter: + - ibm-catalogs + - cert-manager + - mongodb + - sls + - dro + + # 12. Post Restore Suite Verification + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/core/suite-verify.yml.j2', template_vars={'name': 'suite-verify', 'devops_suite_name': 'suite-verify'}) | indent(4) }} + runAfter: + - suite-restore + when: + - input: "$(params.mas_instance_id)" + operator: notin + values: [""] + + # 13. Restore Manage Database (Db2) - Optional + # ------------------------------------------------------------------------- + - name: manage-db2-restore + timeout: "0" + when: + - input: "$(params.restore_manage_db)" + operator: in + values: ["true", "True"] + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: db2_action + value: restore + - name: mas_application_id + value: manage + - name: mas_config_dir + value: /workspace/backups/configs + + # restore specific parameters + - name: db2_backup_version + value: $(params.restore_version) + - name: backup_vendor + value: $(params.manage_db2_restore_vendor) + - name: override_storageclass + value: $(params.manage_db_override_storageclass) + - name: custom_storage_class_rwx + value: $(params.manage_db_storage_class_rwx) + - name: custom_storage_class_rwo + value: $(params.manage_db_storage_class_rwo) + + taskRef: + kind: Task + name: mas-devops-db2 + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - cert-manager + + # 14. Restore Manage Application - Optional + # ------------------------------------------------------------------------- + - name: manage-app-restore + timeout: "0" + when: + - input: "$(params.restore_manage_app)" + operator: in + values: ["true", "True"] + params: + {{ lookup('template', pipeline_src_dir ~ '/taskdefs/common/cli-params.yml.j2') | indent(8) }} + + - name: mas_instance_id + value: $(params.mas_instance_id) + - name: mas_app_id + value: manage + + - name: mas_backup_dir + value: /workspace/backups + - name: mas_app_backup_version + value: $(params.restore_version) + + - name: override_storageclass + value: $(params.manage_app_override_storageclass) + - name: manage_custom_storage_class_rwx + value: $(params.manage_app_storage_class_rwx) + - name: manage_custom_storage_class_rwo + value: $(params.manage_app_storage_class_rwo) + + taskRef: + kind: Task + name: mas-devops-suite-app-restore + workspaces: + - name: backups + workspace: shared-backups + runAfter: + - manage-db2-restore + - suite-verify + + finally: + # 1. Clean Backup Workspaces (Optional) + # ------------------------------------------------------------------------- + - name: clean-backup-workspaces + timeout: "0" + when: + - input: "$(params.clean_backup)" + operator: in + values: ["true", "True"] + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + taskRef: + kind: Task + name: mas-devops-clean-workspaces + workspaces: + - name: backups + workspace: shared-backups + + # 2. Update synchronization configmap + # ------------------------------------------------------------------------- + - name: sync-restore + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-configmap + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: configmap_name + value: sync-restore + - name: configmap_value + # An aggregate status of all the pipelineTasks under the tasks section (excluding the finally section). + # This variable is only available in the finally tasks and can have any one of the values (Succeeded, Failed, Completed, or None) + value: $(tasks.status) + + # 3. Pipeline Finish + # ------------------------------------------------------------------------- + - name: pipeline-finish + timeout: "0" + taskRef: + kind: Task + name: mas-devops-update-pipeline-status + params: + - name: image_pull_policy + value: $(params.image_pull_policy) + - name: pipeline_status + # An aggregate status of all the pipelineTasks under the tasks section (excluding the finally section). + # This variable is only available in the finally tasks and can have any one of the values (Succeeded, Failed, Completed, or None) + value: $(tasks.status) + - name: pipeline_name + value: $(context.pipeline.name) + - name: pipelinerun_name + value: $(context.pipelineRun.name) + - name: pipelinerun_namespace + value: $(context.pipelineRun.namespace) diff --git a/tekton/src/pipelines/taskdefs/backup-restore/restore-params.yml.j2 b/tekton/src/pipelines/taskdefs/backup-restore/restore-params.yml.j2 new file mode 100644 index 00000000000..93bc5e4d2db --- /dev/null +++ b/tekton/src/pipelines/taskdefs/backup-restore/restore-params.yml.j2 @@ -0,0 +1,2 @@ +- name: restore_version + value: $(params.restore_version) \ No newline at end of file diff --git a/tekton/src/tasks/clean-workspaces.yml.j2 b/tekton/src/tasks/clean-workspaces.yml.j2 new file mode 100644 index 00000000000..9d898867361 --- /dev/null +++ b/tekton/src/tasks/clean-workspaces.yml.j2 @@ -0,0 +1,79 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-clean-workspaces +spec: + params: + - name: image_pull_policy + type: string + default: IfNotPresent + description: Image pull policy for the container image + + workspaces: + - name: configs + description: Configuration workspace to clean + optional: true + - name: backups + description: Backup workspace to clean + optional: true + + steps: + - name: clean-workspaces + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + script: | + #!/bin/bash + set -e + + echo "==========================================" + echo "Workspace Cleanup - Starting" + echo "==========================================" + echo "" + + # Clean backup workspace + if [ -d "/workspace/backups" ]; then + echo "Backup workspace contents BEFORE cleanup:" + echo "------------------------------------------" + ls -lah /workspace/backups/ || echo "Unable to list directory" + echo "" + + echo "Cleaning /workspace/backups..." + rm -rf /workspace/backups/* + + echo "" + echo "Backup workspace contents AFTER cleanup:" + echo "------------------------------------------" + ls -lah /workspace/backups/ || echo "Unable to list directory" + echo "✓ Backup workspace cleaned" + else + echo "⚠ /workspace/backups does not exist" + fi + + echo "" + echo "==========================================" + echo "" + + # Clean config workspace + if [ -d "/workspace/configs" ]; then + echo "Config workspace contents BEFORE cleanup:" + echo "------------------------------------------" + ls -lah /workspace/configs/ || echo "Unable to list directory" + echo "" + + echo "Cleaning /workspace/configs..." + rm -rf /workspace/configs/* + + echo "" + echo "Config workspace contents AFTER cleanup:" + echo "------------------------------------------" + ls -lah /workspace/configs/ || echo "Unable to list directory" + echo "✓ Config workspace cleaned" + else + echo "⚠ /workspace/configs does not exist" + fi + + echo "" + echo "==========================================" + echo "Workspace cleanup completed successfully" + echo "==========================================" \ No newline at end of file diff --git a/tekton/src/tasks/dependencies/cert-manager.yml.j2 b/tekton/src/tasks/dependencies/cert-manager.yml.j2 index da88ed2bccd..dd6949b4e96 100644 --- a/tekton/src/tasks/dependencies/cert-manager.yml.j2 +++ b/tekton/src/tasks/dependencies/cert-manager.yml.j2 @@ -18,6 +18,12 @@ spec: description: Optional. Used to uninstall Cert Manager default: "" + # Backup/Restore specific parameters + - name: certmanager_backup_version + type: string + description: Optional. Version/timestamp for the backup + default: "" + stepTemplate: env: {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} @@ -28,6 +34,12 @@ spec: # Optional parameter to uninstall Cert Manager - name: CERT_MANAGER_ACTION value: $(params.cert_manager_action) + + # Backup/Restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: CERTMANAGER_BACKUP_VERSION + value: $(params.certmanager_backup_version) steps: - name: cert-manager command: @@ -36,3 +48,7 @@ spec: image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) workingDir: /workspace/configs + + workspaces: + - name: backups + optional: true diff --git a/tekton/src/tasks/dependencies/db2.yml.j2 b/tekton/src/tasks/dependencies/db2.yml.j2 index e708019f5ac..35997b9c737 100644 --- a/tekton/src/tasks/dependencies/db2.yml.j2 +++ b/tekton/src/tasks/dependencies/db2.yml.j2 @@ -148,6 +148,10 @@ spec: default: "" # JDBCCfg + - name: mas_config_dir + type: string + description: config directory path + default: "/workspace/configs" - name: mas_config_scope type: string default: "" @@ -168,6 +172,44 @@ spec: description: Optional MAS custom labels, comma separated list of key=value pairs default: "" + # Backup/Restore specific parameters + - name: db2_backup_version + type: string + description: Optional. Version/timestamp for the backup + default: "" + - name: mas_backup_dir + type: string + description: Directory to store backup files + default: "" + - name: backup_type + type: string + description: Type of backup (online or offline) + default: "" + - name: override_storageclass + type: string + description: Override database (Db2) PVC storageclass during restore + default: "false" + - name: backup_vendor + type: string + description: Storage backend for database backup files (disk or s3) + default: "" + - name: backup_s3_endpoint + type: string + description: S3 endpoint URL for backups + default: "" + - name: backup_s3_bucket + type: string + description: S3 bucket name for backups + default: "" + - name: backup_s3_access_key + type: string + description: S3 access key for backups + default: "" + - name: backup_s3_secret_key + type: string + description: S3 secret key for backups + default: "" + stepTemplate: env: {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} @@ -272,7 +314,7 @@ spec: # MAS - JdbcCfg - name: MAS_CONFIG_DIR - value: /workspace/configs + value: $(params.mas_config_dir) - name: MAS_CONFIG_SCOPE value: $(params.mas_config_scope) - name: MAS_INSTANCE_ID @@ -285,6 +327,26 @@ spec: # Custom labels support - name: CUSTOM_LABELS value: $(params.custom_labels) + + # Backup/Restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: DB2_BACKUP_VERSION + value: $(params.db2_backup_version) + - name: DB2_BACKUP_TYPE + value: $(params.backup_type) + - name: BACKUP_VENDOR + value: $(params.backup_vendor) + - name: BACKUP_S3_ENDPOINT + value: $(params.backup_s3_endpoint) + - name: BACKUP_S3_BUCKET + value: $(params.backup_s3_bucket) + - name: BACKUP_S3_ACCESS_KEY + value: $(params.backup_s3_access_key) + - name: BACKUP_S3_SECRET_KEY + value: $(params.backup_s3_secret_key) + - name: OVERRIDE_STORAGECLASS + value: $(params.override_storageclass) steps: - name: db2 command: @@ -292,8 +354,10 @@ spec: - db2 image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) - workingDir: /workspace/configs + workingDir: $(params.mas_config_dir) workspaces: - name: configs optional: true + - name: backups + optional: true diff --git a/tekton/src/tasks/dependencies/dro.yml.j2 b/tekton/src/tasks/dependencies/dro.yml.j2 index 79531091a71..fcb4251b675 100644 --- a/tekton/src/tasks/dependencies/dro.yml.j2 +++ b/tekton/src/tasks/dependencies/dro.yml.j2 @@ -24,6 +24,10 @@ spec: - name: ocp_ingress_tls_secret_name type: string default: "" + - name: mas_config_dir + type: string + description: config directory path + default: "/workspace/configs" - name: ibm_entitlement_key type: string @@ -48,7 +52,7 @@ spec: - name: dro image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) - workingDir: /workspace/configs + workingDir: $(params.mas_config_dir) script: | #!/bin/bash @@ -70,7 +74,7 @@ spec: # Properties for generating a MAS configuration - name: MAS_CONFIG_DIR - value: /workspace/configs + value: $(params.mas_config_dir) - name: MAS_INSTANCE_ID value: $(params.mas_instance_id) - name: AISERVICE_INSTANCE_ID @@ -98,3 +102,5 @@ spec: optional: true - name: pod-templates optional: true + - name: backups + optional: true diff --git a/tekton/src/tasks/dependencies/ibm-catalogs.yml.j2 b/tekton/src/tasks/dependencies/ibm-catalogs.yml.j2 index d7dae089f0f..b2dc8aa7cba 100644 --- a/tekton/src/tasks/dependencies/ibm-catalogs.yml.j2 +++ b/tekton/src/tasks/dependencies/ibm-catalogs.yml.j2 @@ -30,6 +30,12 @@ spec: description: Optional. Used to uninstall IBM Catalogs default: "" + # Backup/Restore specific parameters + - name: ibm_catalogs_backup_version + type: string + description: Optional. Version/timestamp for the backup + default: "" + stepTemplate: env: {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} @@ -49,6 +55,12 @@ spec: - name: IBM_CATALOGS_ACTION value: $(params.ibm_catalogs_action) + # Backup/Restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: IBM_CATALOGS_BACKUP_VERSION + value: $(params.ibm_catalogs_backup_version) + steps: - name: ibm-catalogs command: @@ -57,3 +69,7 @@ spec: image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) workingDir: /workspace/configs + + workspaces: + - name: backups + optional: true diff --git a/tekton/src/tasks/dependencies/mongodb.yml.j2 b/tekton/src/tasks/dependencies/mongodb.yml.j2 index bd5d61d60d9..3773b5e3c42 100644 --- a/tekton/src/tasks/dependencies/mongodb.yml.j2 +++ b/tekton/src/tasks/dependencies/mongodb.yml.j2 @@ -19,6 +19,10 @@ spec: type: string description: Optional MAS custom labels, comma separated list of key=value pairs default: "" + - name: mas_config_dir + type: string + description: config directory path + default: "/workspace/configs" # Storage Class - name: mongodb_storage_class @@ -67,6 +71,20 @@ spec: description: Approves the MongoDb upgrade to version 8 if needed default: "" + # Backup/Restore specific parameters + - name: mongodb_backup_version + type: string + description: Optional. Version/timestamp for the backup + default: "" + - name: mongodb_instance_name + type: string + description: Optional. MongoDB instance name for backup + default: "" + - name: override_mongodb_storageclass + type: string + description: Override database PVC storageclass during restore + default: "false" + # Dependencies - IBM Cloud MongoDb # ------------------------------------------------------------------------- - name: ibmcloud_resourcegroup @@ -100,7 +118,7 @@ spec: # General - name: MAS_CONFIG_DIR - value: /workspace/configs + value: $(params.mas_config_dir) - name: MAS_INSTANCE_ID value: $(params.mas_instance_id) - name: AISERVICE_INSTANCE_ID @@ -148,6 +166,16 @@ spec: - name: IBMCLOUD_APIKEY value: $(params.ibmcloud_apikey) + # Backup/Restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: MONGODB_BACKUP_VERSION + value: $(params.mongodb_backup_version) + - name: MONGODB_INSTANCE_NAME + value: $(params.mongodb_instance_name) + - name: OVERRIDE_STORAGECLASS + value: $(params.override_mongodb_storageclass) + steps: - name: mongodb script: | @@ -162,8 +190,10 @@ spec: image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) - workingDir: /workspace/configs + workingDir: $(params.mas_config_dir) workspaces: - name: configs optional: true + - name: backups + optional: true diff --git a/tekton/src/tasks/dependencies/sls.yml.j2 b/tekton/src/tasks/dependencies/sls.yml.j2 index aacc0892c52..5dfb878262e 100644 --- a/tekton/src/tasks/dependencies/sls.yml.j2 +++ b/tekton/src/tasks/dependencies/sls.yml.j2 @@ -19,6 +19,10 @@ spec: type: string description: Optional MAS custom labels, comma separated list of key=value pairs default: "" + - name: mas_config_dir + type: string + description: config directory path + default: "/workspace/configs" - name: sls_namespace type: string @@ -35,6 +39,9 @@ spec: - name: sls_icr_cpopen type: string default: "" + - name: sls_domain + type: string + default: "" # New way of bootstrapping license file since SLS 3.7.0 - name: sls_entitlement_file @@ -61,13 +68,23 @@ spec: description: Optional. Used to uninstall SLS default: "" + # Backup/Restore specific parameters + - name: sls_backup_version + type: string + description: Optional. Version/timestamp for the backup + default: "" + - name: include_sls + type: string + description: Optional. Set to false to skip SLS backup + default: "true" + stepTemplate: env: {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} # General - name: MAS_CONFIG_DIR - value: /workspace/configs + value: $(params.mas_config_dir) - name: MAS_INSTANCE_ID value: $(params.mas_instance_id) - name: AISERVICE_INSTANCE_ID @@ -103,6 +120,14 @@ spec: - name: SLS_ACTION value: $(params.sls_action) + # Backup/Restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: SLS_BACKUP_VERSION + value: $(params.sls_backup_version) + - name: SLS_DOMAIN + value: $(params.sls_domain) + - name: MAS_POD_TEMPLATES_DIR value: /workspace/pod-templates @@ -120,7 +145,7 @@ spec: image: quay.io/ibmmas/cli:latest imagePullPolicy: $(params.image_pull_policy) - workingDir: /workspace/configs + workingDir: $(params.mas_config_dir) workspaces: - name: configs @@ -131,3 +156,5 @@ spec: optional: true - name: pod-templates optional: true + - name: backups + optional: true diff --git a/tekton/src/tasks/download-backup-archive.yml.j2 b/tekton/src/tasks/download-backup-archive.yml.j2 new file mode 100644 index 00000000000..e3ee3256ab7 --- /dev/null +++ b/tekton/src/tasks/download-backup-archive.yml.j2 @@ -0,0 +1,131 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-download-backup-archive +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + # Backup specific parameters + - name: restore_version + type: string + description: Version/timestamp for the backup + default: "" + - name: backup_archive_name + type: string + description: name of the backup archive including `.tar.gz` extension. + default: "" + - name: mas_instance_id + type: string + description: Instance ID + + # Archives specific parameters + - name: include_sls_archive + type: string + description: Download SLS archive + default: "false" + - name: include_manage_db_archive + type: string + description: Download Manage DB2 archive + default: "false" + - name: include_manage_app_archive + type: string + description: Download Manage App archive + default: "false" + + # S3 download parameters + - name: aws_access_key_id + type: string + description: AWS Access Key ID for S3 download + default: "" + - name: aws_secret_access_key + type: string + description: AWS Secret Access Key for S3 download + default: "" + - name: s3_bucket_name + type: string + description: S3 bucket name for backup download + default: "" + - name: s3_region + type: string + description: AWS region for S3 bucket + default: "" + - name: s3_endpoint_url + type: string + description: S3 endpoint url for S3 bucket + default: "" + + # Artifactory download parameters + - name: artifactory_username + type: string + description: Artifactory username for backup download + default: "" + - name: artifactory_token + type: string + description: Artifactory token for backup download + default: "" + - name: artifactory_url + type: string + description: Artifactory URL for backup download + default: "" + - name: artifactory_repository + type: string + description: Artifactory repository for backup download + default: "" + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + # Backup specific + - name: MAS_RESTORE_DIR + value: /workspace/backups + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + - name: BACKUP_VERSION + value: $(params.restore_version) + - name: BACKUP_ARCHIVE_NAME + value: $(params.backup_archive_name) + + # Archive specific + - name: INCLUDE_SLS_ARCHIVE + value: $(params.include_sls_archive) + - name: INCLUDE_MANAGE_DB_ARCHIVE + value: $(params.include_manage_db_archive) + - name: INCLUDE_MANAGE_APP_ARCHIVE + value: $(params.include_manage_app_archive) + + # S3 credentials + - name: S3_ACCESS_KEY_ID + value: $(params.aws_access_key_id) + - name: S3_SECRET_ACCESS_KEY + value: $(params.aws_secret_access_key) + - name: S3_BUCKET_NAME + value: $(params.s3_bucket_name) + - name: S3_REGION + value: $(params.s3_region) + - name: S3_ENDPOINT_URL + value: $(params.s3_endpoint_url) + + # Artifactory credentials + - name: ARTIFACTORY_USERNAME + value: $(params.artifactory_username) + - name: ARTIFACTORY_TOKEN + value: $(params.artifactory_token) + - name: ARTIFACTORY_URL + value: $(params.artifactory_url) + - name: ARTIFACTORY_REPOSITORY + value: $(params.artifactory_repository) + + steps: + - name: download-backup-archive + command: + - /opt/app-root/src/run-role.sh + - download_backup_archive + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups diff --git a/tekton/src/tasks/prepare-backup-workspace.yml.j2 b/tekton/src/tasks/prepare-backup-workspace.yml.j2 new file mode 100644 index 00000000000..768a1566f44 --- /dev/null +++ b/tekton/src/tasks/prepare-backup-workspace.yml.j2 @@ -0,0 +1,69 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-prepare-backup-workspace +spec: + params: + - name: image_pull_policy + type: string + default: IfNotPresent + description: Image pull policy for the container image + + workspaces: + - name: backups + description: Backup workspace to prepare + + steps: + - name: prepare-backup-workspace + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + script: | + #!/bin/bash + set -e + + echo "==========================================" + echo "Workspace Preparation - Starting" + echo "==========================================" + echo "" + + # Prepare backup workspace configs directory + if [ -d "/workspace/backups" ]; then + echo "Backup workspace exists, preparing configs directory..." + echo "" + + # Remove existing configs directory if it exists + if [ -d "/workspace/backups/configs" ]; then + echo "Removing existing /workspace/backups/configs directory..." + rm -rf /workspace/backups/configs + echo "✓ Existing configs directory removed" + echo "" + fi + + # Create clean configs directory + echo "Creating clean /workspace/backups/configs directory..." + mkdir -p /workspace/backups/configs + echo "✓ Clean configs directory created" + echo "" + + # Verify the directory was created + echo "Verifying directory structure:" + echo "------------------------------" + ls -lah /workspace/backups/ || echo "Unable to list directory" + echo "" + + if [ -d "/workspace/backups/configs" ]; then + echo "✓ /workspace/backups/configs directory is ready" + else + echo "✗ Failed to create /workspace/backups/configs directory" + exit 1 + fi + else + echo "✗ /workspace/backups does not exist" + exit 1 + fi + + echo "" + echo "==========================================" + echo "Workspace preparation completed successfully" + echo "==========================================" \ No newline at end of file diff --git a/tekton/src/tasks/suite-app-backup.yml.j2 b/tekton/src/tasks/suite-app-backup.yml.j2 new file mode 100644 index 00000000000..3629702e2c0 --- /dev/null +++ b/tekton/src/tasks/suite-app-backup.yml.j2 @@ -0,0 +1,65 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-suite-app-backup +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + - name: mas_instance_id + type: string + description: Instance ID + + - name: mas_workspace_id + type: string + description: Workspace ID + default: "" + + - name: mas_app_id + type: string + description: Application ID (e.g., manage) + default: "" + + # Backup specific parameters + - name: mas_backup_dir + type: string + description: Directory to store backup files + default: "" + + - name: mas_app_backup_version + type: string + description: Version/timestamp for the MAS app backup + default: "" + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + + - name: MAS_WORKSPACE_ID + value: $(params.mas_workspace_id) + + - name: MAS_APP_ID + value: $(params.mas_app_id) + + # Backup specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + + - name: MAS_APP_BACKUP_VERSION + value: $(params.mas_app_backup_version) + + steps: + - name: suite-app-backup + command: + - /opt/app-root/src/run-role.sh + - suite_app_backup + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups diff --git a/tekton/src/tasks/suite-app-restore.yml.j2 b/tekton/src/tasks/suite-app-restore.yml.j2 new file mode 100644 index 00000000000..546c47c01a2 --- /dev/null +++ b/tekton/src/tasks/suite-app-restore.yml.j2 @@ -0,0 +1,77 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-suite-app-restore +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + - name: mas_instance_id + type: string + description: Instance ID + + - name: mas_app_id + type: string + description: Application ID (e.g., manage) + default: "" + + # Backup specific parameters + - name: mas_backup_dir + type: string + description: Directory to store backup files + default: "" + + - name: mas_app_backup_version + type: string + description: Version/timestamp for the MAS app backup + default: "" + + - name: override_storageclass + type: string + description: Override App PVC storageclass during restore + default: "false" + - name: manage_custom_storage_class_rwx + type: string + default: "" + - name: manage_custom_storage_class_rwo + type: string + default: "" + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + + - name: MAS_APP_ID + value: $(params.mas_app_id) + + # Backup specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + + - name: MAS_APP_BACKUP_VERSION + value: $(params.mas_app_backup_version) + + - name: OVERRIDE_STORAGECLASS + value: $(params.override_storageclass) + + # Manage specific + - name: MANAGE_CUSTOM_STORAGE_CLASS_RWX + value: $(params.manage_custom_storage_class_rwx) + - name: MANAGE_CUSTOM_STORAGE_CLASS_RWO + value: $(params.manage_custom_storage_class_rwo) + + steps: + - name: suite-app-restore + command: + - /opt/app-root/src/run-role.sh + - suite_app_restore + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups diff --git a/tekton/src/tasks/suite-backup.yml.j2 b/tekton/src/tasks/suite-backup.yml.j2 new file mode 100644 index 00000000000..f79254eaa73 --- /dev/null +++ b/tekton/src/tasks/suite-backup.yml.j2 @@ -0,0 +1,49 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-suite-backup +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + - name: mas_instance_id + type: string + description: Instance ID + + # Backup specific parameters + - name: mas_backup_dir + type: string + description: Directory to store backup files + default: "" + - name: suite_backup_version + type: string + description: Version/timestamp for the backup + default: "" + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + - name: MAS_CONFIG_DIR + value: /workspace/backups/configs + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + + # Backup specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: SUITE_BACKUP_VERSION + value: $(params.suite_backup_version) + + steps: + - name: suite-backup + command: + - /opt/app-root/src/run-role.sh + - suite_backup + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups diff --git a/tekton/src/tasks/suite-restore.yml.j2 b/tekton/src/tasks/suite-restore.yml.j2 new file mode 100644 index 00000000000..46dc2493689 --- /dev/null +++ b/tekton/src/tasks/suite-restore.yml.j2 @@ -0,0 +1,94 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-suite-restore +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + - name: mas_instance_id + type: string + description: Instance ID + + # Backup specific parameters + - name: mas_backup_dir + type: string + description: Directory to store backup files + default: "" + - name: suite_backup_version + type: string + description: Version/timestamp for the backup + default: "" + - name: mas_domain_on_restore + type: string + description: The domain to use for the MAS Suite instance + default: "NO_OVERRIDE" # Default value is used to indicate that the domain should not be overridden + + - name: include_slscfg_from_backup + type: string + description: Whether to include SLS from backup + default: "true" + - name: sls_url_on_restore + type: string + default: "" + description: SLS URL when restoring with external SLS + - name: sls_cfg_file + type: string + default: "" + description: SLS cfg file when restoring with external BAS + + - name: include_drocfg_from_backup + type: string + description: Whether to include SLS from backup + default: "true" + - name: dro_url_on_restore + type: string + default: "" + description: BAS/DRO URL when restoring with external BAS + - name: dro_cfg_file + type: string + default: "" + description: BAS cfg file when restoring with external BAS + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + - name: MAS_CONFIG_DIR + value: /workspace/backups/configs + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + + # restore specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: SUITE_BACKUP_VERSION + value: $(params.suite_backup_version) + - name: MAS_DOMAIN + value: $(params.mas_domain_on_restore) + - name: INCLUDE_SLS_FROM_BACKUP + value: $(params.include_slscfg_from_backup) + - name: SLS_URL + value: $(params.sls_url_on_restore) + - name: SLS_CFG_FILE + value: $(params.sls_cfg_file) + - name: INCLUDE_DRO_FROM_BACKUP + value: $(params.include_drocfg_from_backup) + - name: BAS_URL + value: $(params.dro_url_on_restore) + - name: DRO_CFG_FILE + value: $(params.dro_cfg_file) + + steps: + - name: suite-restore + command: + - /opt/app-root/src/run-role.sh + - suite_restore + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups + - name: restore diff --git a/tekton/src/tasks/upload-backup-archive.yml.j2 b/tekton/src/tasks/upload-backup-archive.yml.j2 new file mode 100644 index 00000000000..8a8ec3e3793 --- /dev/null +++ b/tekton/src/tasks/upload-backup-archive.yml.j2 @@ -0,0 +1,103 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: mas-devops-upload-backup-archive +spec: + params: + {{ lookup('template', task_src_dir ~ '/common/cli-params.yml.j2') | indent(4) }} + + # Backup specific parameters + - name: backup_version + type: string + description: Version/timestamp for the backup + default: "" + - name: mas_instance_id + type: string + description: Instance ID + + # S3 Upload parameters + - name: aws_access_key_id + type: string + description: AWS Access Key ID for S3 upload + default: "" + - name: aws_secret_access_key + type: string + description: AWS Secret Access Key for S3 upload + default: "" + - name: s3_bucket_name + type: string + description: S3 bucket name for backup upload + default: "" + - name: s3_region + type: string + description: AWS region for S3 bucket + default: "" + - name: s3_endpoint_url + type: string + description: S3 endpoint url for S3 bucket + default: "" + + # Artifactory Upload parameters + - name: artifactory_username + type: string + description: Artifactory username for backup upload + default: "" + - name: artifactory_token + type: string + description: Artifactory token for backup upload + default: "" + - name: artifactory_url + type: string + description: Artifactory URL for backup upload + default: "" + - name: artifactory_repository + type: string + description: Artifactory repository for backup upload + default: "" + + stepTemplate: + env: + {{ lookup('template', task_src_dir ~ '/common/cli-env.yml.j2') | indent(6) }} + + # Backup specific + - name: MAS_BACKUP_DIR + value: /workspace/backups + - name: BACKUP_VERSION + value: $(params.backup_version) + - name: MAS_INSTANCE_ID + value: $(params.mas_instance_id) + + # S3 credentials + - name: S3_ACCESS_KEY_ID + value: $(params.aws_access_key_id) + - name: S3_SECRET_ACCESS_KEY + value: $(params.aws_secret_access_key) + - name: S3_BUCKET_NAME + value: $(params.s3_bucket_name) + - name: S3_REGION + value: $(params.s3_region) + - name: S3_ENDPOINT_URL + value: $(params.s3_endpoint_url) + + # Artifactory credentials + - name: ARTIFACTORY_USERNAME + value: $(params.artifactory_username) + - name: ARTIFACTORY_TOKEN + value: $(params.artifactory_token) + - name: ARTIFACTORY_URL + value: $(params.artifactory_url) + - name: ARTIFACTORY_REPOSITORY + value: $(params.artifactory_repository) + + steps: + - name: upload-backup-archive + command: + - /opt/app-root/src/run-role.sh + - upload_backup_archive + image: quay.io/ibmmas/cli:latest + imagePullPolicy: $(params.image_pull_policy) + workingDir: /workspace/backups + + workspaces: + - name: backups