-
Notifications
You must be signed in to change notification settings - Fork 722
/
Copy pathsample.json
34 lines (34 loc) · 1.15 KB
/
sample.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
{
"guid": "030C6A71-39DA-436E-9644-4FC25C7C5907",
"name": "TensorFlow Fine-tuning and Inference for LLMs with Bfloat16",
"categories": ["Toolkit/oneAPI AI And Analytics/Features and Functionality"],
"description": "This sample illustrates how to fine-tune and do inference of a TensorFlow LLM model using Bfloat16",
"builder": ["cli"],
"languages": [{ "python": {} }],
"os": ["linux"],
"targetDevice": ["CPU"],
"cpuInstructionSets": ["AVX512", "AMX"],
"ciTests": {
"linux": [{
"env": [
"source /intel/oneapi/intelpython/bin/activate",
"conda activate tensorflow",
"pip install uv",
"uv init",
"uv python pin $(which python)",
"uv venv --system-site-packages",
"uv add -r requirements.txt",
"uv add py-cpuinfo nbformat nbconvert",
"uv add --dev ipykernel",
"uv run ipython kernel install --user --name tensorflow",
"uv add notebook nbconvert"
],
"id": "intel llm bf16 infrence and fine-tuning",
"steps": [
"uv run python GPTJ_finetuning.py",
"uv run python ci_test.py"
]
}]
},
"expertise": "Code Optimization"
}