Skip to content

Commit 9aa4bf1

Browse files
authored
Merge branch 'master' into 2023.1.1_AIKit
2 parents f1f2efb + db68fa5 commit 9aa4bf1

File tree

923 files changed

+123415
-10576
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

923 files changed

+123415
-10576
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,22 @@
11
{
2-
"guid": "111213A0-C930-45B4-820F-02472BABBF34",
3-
"name": "Intel® Tensorflow Getting Started",
4-
"categories": ["Toolkit/oneAPI AI And Analytics/AI Getting Started Samples"],
5-
"description": "This sample illustrates how to train a TensorFlow model and run inference with oneMKL and oneDNN.",
6-
"builder": ["cli"],
7-
"languages": [{"python":{}}],
8-
"os":["linux","windows"],
9-
"targetDevice": ["CPU"],
10-
"ciTests": {
11-
"linux": [
12-
{
13-
"id": "tensorflow hello world",
14-
"steps": [
15-
"source activate tensorflow",
16-
"python TensorFlow_HelloWorld.py"
17-
]
18-
}
19-
],
20-
"windows": [
21-
{
22-
"id": "tensorflow hello world",
23-
"steps": [
24-
"source activate tensorflow",
25-
"python TensorFlow_HelloWorld.py"
26-
]
27-
}
28-
]
29-
},
30-
"expertise": "Getting Started"
2+
"guid": "111213A0-C930-45B4-820F-02472BABBF34",
3+
"name": "Intel® Tensorflow Getting Started",
4+
"categories": ["Toolkit/oneAPI AI And Analytics/AI Getting Started Samples"],
5+
"description": "This sample illustrates how to train a TensorFlow model and run inference with oneMKL and oneDNN.",
6+
"builder": ["cli"],
7+
"languages": [{
8+
"python": {}
9+
}],
10+
"os": ["linux", "windows"],
11+
"targetDevice": ["CPU"],
12+
"ciTests": {
13+
"linux": [{
14+
"id": "tensorflow hello world",
15+
"steps": [
16+
"source activate tensorflow",
17+
"python TensorFlow_HelloWorld.py"
18+
]
19+
}]
20+
},
21+
"expertise": "Getting Started"
3122
}

AI-and-Analytics/Jupyter/Predictive_Modeling_Training/AI_Kit_XGBoost_Predictive_Modeling.complete/00_Local_Setup/Local_Setup.ipynb

+57-315
Large diffs are not rendered by default.

AI-and-Analytics/Jupyter/Predictive_Modeling_Training/AI_Kit_XGBoost_Predictive_Modeling.complete/03_XGBoost/XGBoost.ipynb

+36-34
Original file line numberDiff line numberDiff line change
@@ -33,16 +33,6 @@
3333
"[Images Source](https://commons.wikimedia.org/wiki/File:3D_view_of_an_event_recorded_with_the_CMS_detector_in_2012_at_a_proton-proton_centre_of_mass_energy_of_8_TeV.png)"
3434
]
3535
},
36-
{
37-
"cell_type": "markdown",
38-
"id": "2f66db55-d1f8-407e-b7bb-b353ce78fa56",
39-
"metadata": {},
40-
"source": [
41-
"# Example environment creation:\n",
42-
"\n",
43-
"This environment is the latest [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html) base environment, which includes data analytics and machine learning workflows and Intel optimizations for XGboost. See [here](https://software.intel.com/content/www/us/en/develop/articles/installing-ai-kit-with-conda.html) for more installation information."
44-
]
45-
},
4636
{
4737
"cell_type": "markdown",
4838
"id": "57efb70b-7073-4baa-b544-253551c7bb58",
@@ -61,18 +51,21 @@
6151
"outputs": [],
6252
"source": [
6353
"import sklearn\n",
64-
"#from sklearnex import patch_sklearn\n",
65-
"#patch_sklearn()\n",
54+
"from sklearnex import patch_sklearn\n",
55+
"patch_sklearn()\n",
6656
"#unpatch_sklearn()\n",
6757
"from sklearn.model_selection import train_test_split\n",
6858
"from sklearn.metrics import mean_squared_error\n",
59+
"import warnings\n",
60+
"warnings.simplefilter(action='ignore', category=FutureWarning)\n",
6961
"import pandas as pd\n",
62+
"from pandas import MultiIndex, Int16Dtype # if you don't import in this order you will get a pandas.Int64Index fix for FutureWarning error.\n",
7063
"import xgboost as xgb\n",
7164
"import numpy as np\n",
72-
"import warnings\n",
7365
"from time import perf_counter\n",
7466
"print(\"XGB Version : \", xgb.__version__)\n",
75-
"print(\"Scikit-Learn Version : \", sklearn.__version__)"
67+
"print(\"Scikit-Learn Version : \", sklearn.__version__)\n",
68+
"print(\"Pandas Version : \", pd.__version__)"
7669
]
7770
},
7871
{
@@ -169,11 +162,21 @@
169162
"source": [
170163
"filename = 'HIGGS.csv'\n",
171164
"names = ['class_label', 'lepton pT', 'lepton eta', 'lepton phi', 'missing energy magnitude', 'missing energy phi', 'jet 1 pt', 'jet 1 eta', 'jet 1 phi', 'jet 1 b-tag', 'jet 2 pt', 'jet 2 eta', 'jet 2 phi', 'jet 2 b-tag', 'jet 3 pt', 'jet 3 eta', 'jet 3 phi', 'jet 3 b-tag', 'jet 4 pt', 'jet 4 eta', 'jet 4 phi', 'jet 4 b-tag', 'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb']\n",
172-
"data = pd.read_csv(filename, names=names, delimiter=\",\", nrows=100000)\n",
173-
"#data = pd.read_csv(filename, names=names, delimiter=\",\", nrows=1100000)\n",
165+
"#data = pd.read_csv(filename, names=names, delimiter=\",\", nrows=100000)\n",
166+
"data = pd.read_csv(filename, names=names, delimiter=\",\", nrows=1100000)\n",
174167
"print(data.shape)"
175168
]
176169
},
170+
{
171+
"cell_type": "code",
172+
"execution_count": null,
173+
"id": "505ce472-a525-42c3-b995-ddc2c3aa2b43",
174+
"metadata": {},
175+
"outputs": [],
176+
"source": [
177+
"%time p_df = pd.read_csv(\"HIGGS.csv\")"
178+
]
179+
},
177180
{
178181
"cell_type": "markdown",
179182
"id": "0d7249bc-4e6b-4a28-8894-00b14c61d4f2",
@@ -233,7 +236,7 @@
233236
"id": "d339991a-3485-49ef-8151-c5c8020fc586",
234237
"metadata": {},
235238
"source": [
236-
"* In this scenario loading 100000 rows the balance isn't too skewed. "
239+
"* In this scenario loading 100000 rows the balance isn't too skewed, the next cell is optional."
237240
]
238241
},
239242
{
@@ -293,7 +296,7 @@
293296
"outputs": [],
294297
"source": [
295298
"# This is the y target vector -- the ones we want to predict.\n",
296-
"# print(data.iloc[:,0])"
299+
"print(data.iloc[:,0])"
297300
]
298301
},
299302
{
@@ -383,7 +386,7 @@
383386
"}\n",
384387
"\n",
385388
"# Train the model\n",
386-
"warnings.filterwarnings(\"ignore\", category=UserWarning)\n",
389+
"warnings.simplefilter(action='ignore', category=UserWarning)\n",
387390
"t1_start = perf_counter() # Time fit function\n",
388391
"model_xgb= xgb.XGBClassifier(**xgb_params)\n",
389392
"model_xgb.fit(X_train,y_train)\n",
@@ -515,17 +518,17 @@
515518
" 'disable_default_eval_metric': 'true',\n",
516519
" 'tree_method': 'hist', \n",
517520
"}\n",
521+
"# Necessary for now to supress multi-threaded Future errors with respect to pandas and XGBoost\n",
522+
"import os\n",
523+
"os.environ['PYTHONWARNINGS']='ignore::FutureWarning'\n",
518524
"\n",
519525
"# Train the model\n",
520-
"warnings.filterwarnings(\"ignore\", category=UserWarning)\n",
521526
"model_xgb= xgb.XGBClassifier(**xgb_params2, use_label_encoder=False)\n",
522527
"\n",
523-
"\n",
524528
"# Setup grid search n_jobs=-1 uses all cores, reducing cv from 5 to 3 for speed, scoring is done using area under curve.\n",
525529
"grid_cv = GridSearchCV(model_xgb, param_grid, n_jobs=-1, cv=3, scoring=\"roc_auc\")\n",
526530
"\n",
527531
"# This fit function takes a while--hours, make sure you are ready.\n",
528-
"\n",
529532
"_ = grid_cv.fit(X_train, y_train)"
530533
]
531534
},
@@ -556,7 +559,7 @@
556559
"source": [
557560
"### Results\n",
558561
"\n",
559-
" grid_cv.best_score_ = 0.8002252116945674 grid cv.best_params\n",
562+
" grid_cv.best_score_ = 0.80 grid cv.best_params\n",
560563
"\n",
561564
" {'colsample_bytree': 1, 'gamma': 0, 'learning_rate': 0.1, 'max_depth': 8, 'reg_lambda': 10, 'scale_pos_weight': 1, 'subsample': 1}\n",
562565
"\n",
@@ -609,7 +612,7 @@
609612
"\n",
610613
" n_estimators:, default=100\n",
611614
"\n",
612-
"The number of trees in the forest. A good way to see how many trees might be useful is to plot the learning curve. since this is a classification problem we will use log loss as our measurement where lower values are better. \n",
615+
"n_estimaters represents the number of trees in the forest. A good way to see how many trees might be useful is to plot the learning curve. Since this is a classification problem we will use log loss as our measurement where lower values are better. \n",
613616
"\n",
614617
"Our orignal fit function needs to be modified to include eval_metric with the type set to logloss. In addition we need to define the evaluation data set so that the results are evaluated after each round in order to plot them.\n"
615618
]
@@ -632,7 +635,7 @@
632635
"metadata": {},
633636
"outputs": [],
634637
"source": [
635-
"# fit the model\n",
638+
"# Fit the model\n",
636639
"model_xgb.fit(X_train, y_train, eval_metric='logloss', eval_set=evalset)"
637640
]
638641
},
@@ -716,16 +719,15 @@
716719
" 'reg_lambda': 10,\n",
717720
" 'scale_pos_weight': 1,\n",
718721
" 'tree_method': 'hist', \n",
719-
" 'n_estimators': 250\n",
722+
" 'n_estimators': 1000,\n",
720723
"}\n",
721724
"\n",
722725
"# Train the model\n",
723-
"warnings.filterwarnings(\"ignore\", category=UserWarning)\n",
724-
"t = time.process_time() # Time fit function\n",
726+
"t1_start = perf_counter() # Time fit function\n",
725727
"model_xgb= xgb.XGBClassifier(**xgb_params)\n",
726728
"model_xgb.fit(X_train,y_train, eval_metric='logloss', eval_set=evalset, verbose=True)\n",
727-
"elapsed_time = time.process_time() - t\n",
728-
"print (\"It took\",elapsed_time,\" to fit.\")"
729+
"t1_stop = perf_counter()\n",
730+
"print (\"It took\", t1_stop-t1_start,\"seconds to fit.\")"
729731
]
730732
},
731733
{
@@ -780,7 +782,7 @@
780782
"source": [
781783
"## So how many trees do we need really?\n",
782784
"\n",
783-
"* It takes awhile to watch 250 trees get evaluated, a great performance improvement is to use the XGBoost early stopping capbility.\n",
785+
"* It takes awhile to watch 1000 trees get evaluated, a great performance improvement is to use the XGBoost early stopping capability.\n",
784786
"\n",
785787
"* Modify the fit function to stop the training after 10 to 15 rounds of no improvement. \n",
786788
" \n",
@@ -823,9 +825,9 @@
823825
],
824826
"metadata": {
825827
"kernelspec": {
826-
"display_name": "Python 3 (ipykernel)",
828+
"display_name": "Python 3 (Intel® oneAPI 2023.0)",
827829
"language": "python",
828-
"name": "python3"
830+
"name": "c009-intel_distribution_of_python_3_oneapi-beta05-python"
829831
},
830832
"language_info": {
831833
"codemirror_mode": {
@@ -837,7 +839,7 @@
837839
"name": "python",
838840
"nbconvert_exporter": "python",
839841
"pygments_lexer": "ipython3",
840-
"version": "3.9.7"
842+
"version": "3.9.15"
841843
},
842844
"nbTranslate": {
843845
"displayLangs": [
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
Copyright Intel Corporation
2+
3+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4+
5+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6+
7+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

0 commit comments

Comments
 (0)