-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathchoices.json
32 lines (32 loc) · 5.03 KB
/
choices.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
{
"madwizard/apriori/use-gpu": "don't use gpus",
"madwizard/apriori/arch": "x64",
"madwizard/apriori/platform": "darwin",
"madwizard/apriori/mac-installer": "Homebrew",
"madwizard/apriori/in-terminal": "HTML",
"Training####Fine Tuning": "Fine Tuning",
"GLUE": "GLUE",
"AWS####IBM": "AWS",
"S3 Bucket for Run Data.expand([ -n \"$MC_CONFIG_DIR\" ] && mc -q --config-dir ${MC_CONFIG_DIR} ls s3 | awk '{print substr($NF, 1, length($NF) - 1)}', S3 Buckets)####separator####📁 Create a new bucket": "browsey",
"Run Locally####Run on a Kubernetes Cluster": "Run on a Kubernetes Cluster",
"Choose the bucket that contains your model and glue data.madwizard/apriori/platform": "Darwin",
"expand(kubectl config get-contexts -o name, Kubernetes contexts)": "default/api-codeflare-train-v11-codeflare-openshift-com:6443/kube:admin",
"expand([ -z ${KUBE_CONTEXT} ] && exit 1 || kubectl --context ${KUBE_CONTEXT} get ns -o name | grep -Ev 'openshift|kube-' | sed 's#namespace/##', Kubernetes namespaces)####Create a namespace": "nvidia-gpu-operator",
"Number of CPUs####Number of GPUs####Minimum Workers####Maximum Workers####Worker Memory####Head Memory": "{\"Number of CPUs\":\"1\",\"Number of GPUs\":\"1\",\"Minimum Workers\":\"1\",\"Maximum Workers\":\"1\",\"Worker Memory\":\"32Gi\",\"Head Memory\":\"32Gi\"}",
"Choose the bucket that contains your model and glue data.expand([ -n \"$MC_CONFIG_DIR\" ] && mc -q --config-dir ${MC_CONFIG_DIR} ls s3 | awk '{print substr($NF, 1, length($NF) - 1)}', S3 Buckets)####separator####📁 Create a new bucket": "browsey",
"Choose your Model File.expand([ -n \"$MC_CONFIG_DIR\" ] && [ -n \"$S3_FILEPATH\" ] && [ -n \"$S3_FILEPATH${S3_BUCKET_SUFFIX}\" ] && mc -q --config-dir ${MC_CONFIG_DIR} ls \"s3/$S3_FILEPATH${S3_BUCKET_SUFFIX}\" | awk '{print $NF}', S3 Objects)": "roberta-base",
"Choose your Glue Data File.expand([ -n \"$MC_CONFIG_DIR\" ] && [ -n \"$S3_FILEPATH\" ] && [ -n \"$S3_FILEPATH${S3_BUCKET_SUFFIX}\" ] && mc -q --config-dir ${MC_CONFIG_DIR} ls \"s3/$S3_FILEPATH${S3_BUCKET_SUFFIX}\" | awk '{print $NF}', S3 Objects)": "glue_data",
"BERT": "BERT",
"Example: Using Ray Tasks to Parallelize a Function####Example: Using Ray Actors to Parallelize a Class####Example: Creating and Transforming Datasets####Example: Training Using PyTorch####Example: Hyperparameter Tuning####Example: Serving a scikit-learn gradient boosting classifier": "Example: Using Ray Tasks to Parallelize a Function",
"Number of CPUs####Number of GPUs": "{\"Number of CPUs\":4,\"Number of GPUs\":3}",
"expand(echo ${A-error} ; echo ${B-4} ; echo ${C-5})": "3",
"XXXXXX.11111####222222": "11111",
"YYYYYY.11111####222222": "222222",
"My Cluster is Running Locally####My Cluster is Runing on Kubernetes": "My Cluster is Runing on Kubernetes",
"expand([ -n \"$KUI_RAY_ADDRESS\" ] && ray job list --address $KUI_RAY_ADDRESS | tail +2 | awk '{print $1}' | sed \"s/[:{' ]//g\", Ray Runs)": "07a2647f-3656-4e3e-836c-95a2fa841af6",
"expand([ -n \"$KUI_RAY_ADDRESS\" ] && curl $KUI_RAY_ADDRESS/api/jobs/ | jq -r 'keys | .[]', Ray Runs)": "d5a0d68f-a675-49ca-bff7-4ae762e6b146",
"My Cluster is Running Locally####My Cluster is Running on Kubernetes": "My Cluster is Running on Kubernetes",
"expand([ -n \"$KUI_RAY_ADDRESS\" ] && curl $KUI_RAY_ADDRESS/api/jobs/ | jq -r 'to_entries | sort_by(.value.start_time) | reverse | .[] | \"\\(.key) \\(.value.status) \\(.value.entrypoint)\"' | sed -E 's/python3 ([^[:space:]])+ //g' | awk '{a=$1;b=$2; $1=\"\";$2=\"\";print \"\\033;1m\" a, \"\\033[0;33m\" b \"\\033[0;2m\" $0 \"\\033[0m\"}', Ray Runs)": "\u001b;1ma88d4632-ab5c-4350-a770-d39a955c42c8 \u001b[0;33mRUNNING\u001b[0;2m -v --datapath /tmp/ --modelpath /tmp/ --logpath /tmp/ --tblogpath s3://browsey/codeflare/a88d4632-ab5c-4350-a770-d39a955c42c8/tensorboard/ --num_workers 1\u001b[0m",
"expand([ -n \"$KUI_RAY_ADDRESS\" ] && curl $KUI_RAY_ADDRESS/api/jobs/ | jq -r 'to_entries | sort_by(.value.start_time) | reverse | .[] | \"\\(.key) \\(.value.status) \\(.value.entrypoint)\"' | sed -E 's/python3 ([^[:space:]])+ //g' | awk '{a=$1;b=$2; $1=\"\";$2=\"\";print a, \"\\033[33m\" b \"\\033[0;2m\" $0 \"\\033[0m\"}', Ray Runs)": "a88d4632-ab5c-4350-a770-d39a955c42c8 \u001b[33mRUNNING\u001b[0;2m -v --datapath /tmp/ --modelpath /tmp/ --logpath /tmp/ --tblogpath s3://browsey/codeflare/a88d4632-ab5c-4350-a770-d39a955c42c8/tensorboard/ --num_workers 1\u001b[0m",
"expand([ -n \"$KUI_RAY_ADDRESS\" ] && curl $KUI_RAY_ADDRESS/api/jobs/ | jq -r 'to_entries | sort_by(.value.start_time) | reverse | .[] | \"\\(.key) \\(.value.status) \\(.value.start_time / 1000 | strflocaltime(\"%Y-%m-%dT%H:%M:%S\")) \\(.value.entrypoint)\"' | sed -E 's/python3 ([^[:space:]])+ //g' | awk '{a=$1;b=$2;c=$3; $1=\"\";$2=\"\";$3=\"\"; print a, \"\\033[0;36m\" c, \"\\033[0;1;33m\" b \"\\033[0;2m\" $0 \"\\033[0m\"}', Ray Runs)": "505b98b6-a258-4afd-bdc6-ddf84d3f2862 \u001b[0;36m2022-07-07T13:27:54 \u001b[0;1;33mRUNNING\u001b[0;2m -v --datapath /tmp/ --modelpath /tmp/ --logpath /tmp/ --tblogpath s3://browsey/codeflare/505b98b6-a258-4afd-bdc6-ddf84d3f2862/tensorboard/ --num_workers 1\u001b[0m"
}