Skip to content

Commit 357b40b

Browse files
committed
chore: clean up before opening PR
1 parent bea8f14 commit 357b40b

6 files changed

Lines changed: 281 additions & 269 deletions

configs/scripts/build_inference_script.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,5 +29,5 @@ methods_to_sweep: [
2929
] # the methods to sweep
3030
vina_binding_site_methods_to_sweep: ["p2rank"] # the Vina binding site prediction methods to sweep
3131
ensemble_ranking_methods_to_sweep: ["consensus"] # the ensemble ranking methods to sweep - NOTE: must be one of (`consensus`, `ff`)
32-
datasets_to_sweep: ["posebusters_benchmark", "astex_diverse", "dockgen"] # the datasets to sweep
32+
datasets_to_sweep: ["posebusters_benchmark", "astex_diverse", "dockgen", "casp15"] # the datasets to sweep
3333
num_sweep_repeats: 3 # the number of repeats to run for each method-dataset sweep (if the method is a generative method)

configs/scripts/build_interaction_analysis_script.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@ methods_to_sweep: [
1818
"alphafold3_ss",
1919
"alphafold3",
2020
] # the methods to sweep
21-
datasets_to_sweep: ["astex_diverse", "dockgen", "posebusters_benchmark"] # the datasets to sweep
21+
datasets_to_sweep: ["astex_diverse", "dockgen", "posebusters_benchmark", "casp15"] # the datasets to sweep

notebooks/posebusters_method_interaction_analysis_plotting.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@
142142
" \"alphafold3\": \"AF3\",\n",
143143
"}\n",
144144
"\n",
145-
"MAX_POSEBUSTERS_BENCHMARK_ANALYSIS_PROTEIN_SEQUENCE_LENGTH = 2000 # Only PoseBusters Benchmark targets with protein sequences below this threshold can be analyzed"
145+
"MAX_POSEBUSTERS_BENCHMARK_ANALYSIS_PROTEIN_SEQUENCE_LENGTH = 700 # Only PoseBusters Benchmark targets with protein sequences below this threshold can be analyzed"
146146
]
147147
},
148148
{

notebooks/posebusters_method_interaction_analysis_plotting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@
107107
"alphafold3": "AF3",
108108
}
109109

110-
MAX_POSEBUSTERS_BENCHMARK_ANALYSIS_PROTEIN_SEQUENCE_LENGTH = 2000 # Only PoseBusters Benchmark targets with protein sequences below this threshold can be analyzed
110+
MAX_POSEBUSTERS_BENCHMARK_ANALYSIS_PROTEIN_SEQUENCE_LENGTH = 700 # Only PoseBusters Benchmark targets with protein sequences below this threshold can be analyzed
111111

112112
# %% [markdown]
113113
# #### Define utility functions

posebench/models/ensemble_generation.py

Lines changed: 23 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -144,27 +144,29 @@ def insert_hpc_headers(
144144
string.
145145
:return: Batch headers string for SLURM job scheduling.
146146
"""
147-
return """######################### Batch Headers #########################
148-
#SBATCH --qos=shared # use specified partition for job
149-
#SBATCH --image=registry.nersc.gov/m5008/acmwhb/posebench:0.0.1 # use specified container image
150-
#SBATCH --account=m5008 # use specified account for billing (e.g., `m5008` for AI4Science projects)
151-
#SBATCH --nodes=1 # NOTE: this needs to match Lightning's `Trainer(num_nodes=...)`
152-
#SBATCH --ntasks-per-node=1 # NOTE: this needs to be `1` on SLURM clusters when using Lightning's `ddp_spawn` strategy`; otherwise, set to match Lightning's quantity of `Trainer(devices=...)`
153-
#SBATCH --time=00-05:00:00 # time limit for the job (up to 2 days: `02-00:00:00`)
154-
#SBATCH --job-name=inference_analysis_sweep # job name
155-
#SBATCH --output=scripts/perlmutter/regular/logs/inference_analysis_sweep%j.out # output log file
156-
#SBATCH --error=scripts/perlmutter/regular/logs/inference_analysis_sweep%j.err # error log file
157-
158-
# Wait for 5-10 seconds randomly to avoid race condition
159-
sleep $((RANDOM % 6 + 5))
160-
161-
# Determine location of the project's directory
162-
# PROJECT_ID="m5008"
163-
# PROJECT_DIR="/global/cfs/cdirs/$PROJECT_ID/$USER/Repositories/posebench" # long term storage community drive
164-
PROJECT_DIR="/pscratch/sd/a/$USER/Repositories/posebench" # high-performance storage scratch drive with an 8-week purge policy
165-
cd "$PROJECT_DIR" || exit
166-
167-
"""
147+
return f"""######################### Batch Headers #########################
148+
#SBATCH --partition {gpu_partition} # use reserved partition `chengji-lab-gpu`
149+
#SBATCH --account {gpu_account} # NOTE: this must be specified to use the reserved partition above
150+
#SBATCH --nodes=1 # NOTE: this needs to match Lightning's `Trainer(num_nodes=...)`
151+
#SBATCH --gres gpu:{f'{gpu_type}:' if gpu_type else ''}1 # request {gpu_type} GPU resource(s)
152+
#SBATCH --ntasks-per-node=1 # NOTE: this needs to be `1` on SLURM clusters when using Lightning's `ddp_spawn` strategy`; otherwise, set to match Lightning's quantity of `Trainer(devices=...)`
153+
#SBATCH --mem={cpu_memory_in_gb}G # NOTE: use `--mem=0` to request all memory "available" on the assigned node
154+
#SBATCH -t {time_limit} # time limit for the job (up to two days: `2-00:00:00`)
155+
#SBATCH -J posebench_{method}_ensembling # job name
156+
#SBATCH --output=R-%x.%j.out # output log file
157+
#SBATCH --error=R-%x.%j.err # error log file
158+
module purge
159+
module load cuda/11.8.0_gcc_9.5.0
160+
# determine location of the project directory
161+
use_private_project_dir=false # NOTE: customize as needed
162+
if [ "$use_private_project_dir" = true ]; then
163+
project_dir="/home/$USER/data/Repositories/Lab_Repositories/PoseBench"
164+
else
165+
project_dir="/cluster/pixstor/chengji-lab/$USER/Repositories/Lab_Repositories/PoseBench"
166+
fi
167+
# shellcheck source=/dev/null
168+
source /home/$USER/mambaforge/etc/profile.d/conda.sh
169+
cd "$project_dir" || exit"""
168170

169171

170172
def create_diffdock_bash_script(

0 commit comments

Comments
 (0)