diff --git a/scripts/benchmark_sbatch_submit.sh b/scripts/benchmark_sbatch_submit.sh index f51c9960ce624e34af7bfd5ab8778b303126ae37..a2aa7b627081079846650f24a820d17d92489991 100644 --- a/scripts/benchmark_sbatch_submit.sh +++ b/scripts/benchmark_sbatch_submit.sh @@ -1,4 +1,4 @@ -cd /proj/nsc/users/xuan/ngc/DeepLearningExamples/PyTorch/Segmentation/nnUNet +cd /proj/nsc_testing/xuan/Berzelius-nnU-Net-Benchmark dim=2 @@ -9,7 +9,7 @@ for gpus in {1..8}; do echo dim ${dim}, nodes ${nodes}, gpus ${gpus}, batch_size ${batch_size}, iteration ${iteration} # For single node - sbatch -o sbatch_out/benchmark_dim${dim}_nodes${nodes}_gpus${gpus}_batchsize${batch_size}_iteration${iteration}.out benchmark_single_node.sbatch ${dim} ${nodes} ${gpus} ${batch_size} ${iteration} + sbatch -o sbatch_out/benchmark_dim${dim}_nodes${nodes}_gpus${gpus}_batchsize${batch_size}_iteration${iteration}.out scripts/benchmark_single_node.sbatch ${dim} ${nodes} ${gpus} ${batch_size} ${iteration} sleep 1 # pause to be kind to the scheduler @@ -23,7 +23,7 @@ for nodes in {2..8}; do for iteration in {1..100}; do # For multi node - sbatch -o sbatch_out/benchmark_dim${dim}_nodes${nodes}_gpus${gpus}_batchsize${batch_size}_iteration${iteration}.out benchmark_multi_node.sbatch ${dim} ${nodes} ${gpus} ${batch_size} ${iteration} + sbatch -o sbatch_out/benchmark_dim${dim}_nodes${nodes}_gpus${gpus}_batchsize${batch_size}_iteration${iteration}.out scripts/benchmark_multi_node.sbatch ${dim} ${nodes} ${gpus} ${batch_size} ${iteration} sleep 1 # pause to be kind to the scheduler