Initial commit: RoseTTAFold-All-Atom configured for Wes with Harbor images and s3:// paths

This commit is contained in:
2026-03-17 17:57:24 +01:00
commit 6eef3bb748
108 changed files with 28144 additions and 0 deletions

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Script to benchmark inference performance, without bases precomputation
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
CUDA_VISIBLE_DEVICES=0 python -m se3_transformer.runtime.inference \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--use_layer_norm \
--norm \
--task homo \
--seed 42 \
--benchmark

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Script to benchmark single-GPU training performance, with bases precomputation
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
CUDA_VISIBLE_DEVICES=0 python -m se3_transformer.runtime.training \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--epochs 6 \
--use_layer_norm \
--norm \
--save_ckpt_path model_qm9.pth \
--task homo \
--precompute_bases \
--seed 42 \
--benchmark

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# Script to benchmark multi-GPU training performance, with bases precomputation
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --max_restarts 0 --module \
se3_transformer.runtime.training \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--epochs 6 \
--use_layer_norm \
--norm \
--save_ckpt_path model_qm9.pth \
--task homo \
--precompute_bases \
--seed 42 \
--benchmark

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
# choices: 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
# 'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'
TASK=homo
python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --max_restarts 0 --module \
se3_transformer.runtime.inference \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--use_layer_norm \
--norm \
--load_ckpt_path model_qm9.pth \
--task "$TASK"

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
NUM_EPOCHS=${3:-100}
LEARNING_RATE=${4:-0.002}
WEIGHT_DECAY=${5:-0.1}
# choices: 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
# 'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'
TASK=homo
python -m se3_transformer.runtime.training \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--epochs "$NUM_EPOCHS" \
--lr "$LEARNING_RATE" \
--weight_decay "$WEIGHT_DECAY" \
--use_layer_norm \
--norm \
--save_ckpt_path model_qm9.pth \
--precompute_bases \
--seed 42 \
--task "$TASK"

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
NUM_EPOCHS=${3:-130}
LEARNING_RATE=${4:-0.01}
WEIGHT_DECAY=${5:-0.1}
# choices: 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
# 'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'
TASK=homo
python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --max_restarts 0 --module \
se3_transformer.runtime.training \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--epochs "$NUM_EPOCHS" \
--lr "$LEARNING_RATE" \
--min_lr 0.00001 \
--weight_decay "$WEIGHT_DECAY" \
--use_layer_norm \
--norm \
--save_ckpt_path model_qm9.pth \
--precompute_bases \
--seed 42 \
--task "$TASK"