Add global docker/process config and fix k8s_gpu profile
- Enable docker globally (required by WES) - Set default container, memory (32GB), cpus (4) at process level - Add NVIDIA_VISIBLE_DEVICES env for GPU visibility in k8s_gpu Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,15 +1,18 @@
|
||||
aws {
|
||||
client {
|
||||
endpoint = 'https://s3.cluster.omic.ai'
|
||||
s3PathStyleAccess = true
|
||||
docker {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
process {
|
||||
container = 'harbor.cluster.omic.ai/omic/chai1:latest'
|
||||
memory = '32 GB'
|
||||
cpus = 4
|
||||
}
|
||||
|
||||
profiles {
|
||||
standard {
|
||||
docker {
|
||||
enabled = true
|
||||
temp = 'auto'
|
||||
runOptions = '--gpus all'
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +20,6 @@ profiles {
|
||||
process {
|
||||
executor = 'k8s'
|
||||
}
|
||||
docker {
|
||||
enabled = true
|
||||
}
|
||||
k8s {
|
||||
storageClaimName = 'eureka-pvc'
|
||||
storageMountPath = '/omic/eureka'
|
||||
@@ -31,12 +31,9 @@ profiles {
|
||||
k8s_gpu {
|
||||
process {
|
||||
executor = 'k8s'
|
||||
pod = [[nodeSelector: 'nvidia.com/gpu.present=true']]
|
||||
pod = [[nodeSelector: 'nvidia.com/gpu.present=true'], [env: 'NVIDIA_VISIBLE_DEVICES', value: 'all']]
|
||||
accelerator = [request: 1, type: 'nvidia.com/gpu']
|
||||
}
|
||||
docker {
|
||||
enabled = true
|
||||
}
|
||||
k8s {
|
||||
storageClaimName = 'eureka-pvc'
|
||||
storageMountPath = '/omic/eureka'
|
||||
|
||||
Reference in New Issue
Block a user