Skip to content

Commit 44a1475

Browse files
committed
EZNAS - Reproduce results
1 parent 1e7a882 commit 44a1475

13 files changed

+1822
-0
lines changed

EZNAS/README.md

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# EZNAS: Evolving Zero-Cost Proxies For Neural Architecture Scoring
2+
3+
EZNAS is a genetic programming driven methodology for the automatic discovery of Zero-Cost Neural Architecture Scoring Metrics (ZC-NASMs). It aims to provide an interpretable, generalizable, and efficient approach to rank neural networks without the expensive training routines, significantly reducing the carbon footprint of Neural Architecture Search (NAS).
4+
5+
## Installation
6+
7+
Follow these steps to set up and run EZNAS:
8+
9+
### Step 1: Base Set-up
10+
Run the provided setup_script.sh to install all necessary packages and dependencies.
11+
12+
```bash
13+
bash setup_script.sh
14+
```
15+
16+
This script should handle:
17+
18+
1. Installation of required Python packages.
19+
2. Cloning of external GitHub repositories.
20+
3. Setting up datasets and additional files necessary for running the project.
21+
22+
### Step 2: Set Environment Variable
23+
24+
Set the PROJ_HOME environment variable to the path of your project:
25+
26+
```bash
27+
export PROJ_HOME="<Path to your project>"
28+
```
29+
30+
### Step 3: Run evaluation
31+
32+
For SLURM based execution, modify runjob.sh as per server specification.
33+
34+
To reproduce results for a specific data-set, simply run the appropriate command in quotes from the reproduce.sh file.
35+
36+
```bash
37+
python verify_scores.py --batch_size 16 --search_space NASBench201 --dataset cifar10 --nds_space ''
38+
```
39+
40+
### Results
41+
42+
| Search Space | Kendall τ | Spearman ρ |
43+
|------------------------|--------------|--------------|
44+
| NASBench-201 CIFAR-10 | 0.6195383854 | 0.8084988792 |
45+
| NASBench-201 CIFAR-100 | 0.6168760649 | 0.7983379022 |
46+
| NATSBench-SSS | 0.7073727282 | 0.8873359833 |
47+
| NDS DARTS | 0.5466290384 | 0.7364709542 |
48+
| NDS Amoeba | 0.4130041903 | 0.5775007582 |
49+
| NDS ENAS | 0.5111310224 | 0.6932549307 |
50+
| NDS PNAS | 0.4781835008 | 0.656343803 |
51+
| NDS NASNet | 0.4312498051 | 0.6050820615 |
52+
53+
54+
Note that the above table is for a batch size of 16. For better results, a higher batch-size is recommended! For instance, for NATSBench-SS at batch-size of 64, the Spearman ρ is 0.91.
55+
56+
# Citation
57+
58+
If you use the code or data in your research, please use the following BibTex entry:
59+
60+
```
61+
@inproceedings{
62+
akhauri2022eznas,
63+
title={{EZNAS}: Evolving Zero-Cost Proxies For Neural Architecture Scoring},
64+
author={Yash Akhauri and Juan Pablo Munoz and Nilesh Jain and Ravishankar Iyer},
65+
booktitle={Advances in Neural Information Processing Systems},
66+
editor={Alice H. Oh and Alekh Agarwal and Danielle Belgrave and Kyunghyun Cho},
67+
year={2022},
68+
url={https://openreview.net/forum?id=lSqaDG4dvdt}
69+
}
70+
```

EZNAS/dataset_utils.py

+156
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
import argparse
2+
import numpy as np
3+
import random
4+
import torch
5+
import torchvision
6+
import torchvision.transforms as transforms
7+
import torchvision.datasets as datasets
8+
import os
9+
# Secondary Imports
10+
from xautodl.datasets.get_dataset_with_transform import get_datasets
11+
from xautodl.datasets.DownsampledImageNet import ImageNet16
12+
from nasbench import api
13+
14+
def get_imagenet_224_train_loader(batch_size=4):
15+
valdir = os.path.join(os.environ['ilsvrc2012_PATH'], 'val')
16+
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
17+
std=[0.229, 0.224, 0.225])
18+
val_loader = torch.utils.data.DataLoader(
19+
datasets.ImageFolder(valdir, transforms.Compose([
20+
transforms.Resize(256),
21+
transforms.CenterCrop(224),
22+
transforms.ToTensor(),
23+
normalize,
24+
])),
25+
batch_size=batch_size, shuffle=False,
26+
num_workers=1, pin_memory=True)
27+
return val_loader
28+
29+
def get_imagenet_train_loader(batch_size=4):
30+
mean = [x / 255 for x in [122.68, 116.66, 104.01]]
31+
std = [x / 255 for x in [63.22, 61.26, 65.09]]
32+
lists = [
33+
transforms.RandomHorizontalFlip(),
34+
transforms.RandomCrop(16, padding=2),
35+
transforms.ToTensor(),
36+
transforms.Normalize(mean, std),
37+
]
38+
train_transform = transforms.Compose(lists)
39+
xshape = (1, 3, 16, 16)
40+
# use path from 'IN16120_PATH as environment variable
41+
train_data = ImageNet16(os.environ['IN16120_PATH'], True, train_transform, 120)
42+
assert len(train_data) == 151700
43+
return train_data
44+
45+
def get_cifar100_train_loader(batch_size=4):
46+
print('==> Preparing data..')
47+
transform_train = transforms.Compose([
48+
transforms.RandomCrop(32, padding=4),
49+
transforms.RandomHorizontalFlip(),
50+
transforms.ToTensor(),
51+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
52+
])
53+
54+
transform_test = transforms.Compose([
55+
transforms.ToTensor(),
56+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
57+
])
58+
59+
trainset = torchvision.datasets.CIFAR100(
60+
root='./cifardata', train=True, download=True, transform=transform_train)
61+
trainloader = torch.utils.data.DataLoader(
62+
trainset, batch_size=batch_size, shuffle=True, num_workers=1)
63+
return trainloader
64+
65+
66+
67+
def get_train_loader(batch_size=4):
68+
print('==> Preparing data..')
69+
transform_train = transforms.Compose([
70+
transforms.RandomCrop(32, padding=4),
71+
transforms.RandomHorizontalFlip(),
72+
transforms.ToTensor(),
73+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
74+
])
75+
76+
transform_test = transforms.Compose([
77+
transforms.ToTensor(),
78+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
79+
])
80+
81+
trainset = torchvision.datasets.CIFAR10(
82+
root='./cifardata', train=True, download=True, transform=transform_train)
83+
trainloader = torch.utils.data.DataLoader(
84+
trainset, batch_size=batch_size, shuffle=True)
85+
return trainloader
86+
87+
88+
def get_networks(NUM_NETS, nasbench, ALLOWED_OPS, INPUT, OUTPUT):
89+
net_list = []
90+
while(len(net_list)<NUM_NETS):
91+
# print(len(net_list), end=", ")
92+
try:
93+
n = 7
94+
individual = [int(random.random()>0.7) for _ in range(25)]
95+
test_matrix = np.asarray([[0, 1, 0, 0, 0, 0, 0], # input layer
96+
[0, 0, 1, 0, 0, 0, 0], # 1x1 conv
97+
[0, 0, 0, 1, 0, 0, 0], # 3x3 conv
98+
[0, 0, 0, 0, 1, 0, 0], # 5x5 conv (replaced by two 3x3's)
99+
[0, 0, 0, 0, 0, 1, 0], # 5x5 conv (replaced by two 3x3's)
100+
[0, 0, 0, 0, 0, 0, 1], # 3x3 max-pool
101+
[0, 0, 0, 0, 0, 0, 0]])
102+
triu = np.triu_indices(n, 2) # Find upper right indices of a triangular nxn matrix
103+
test_matrix[triu] = individual[:15] # Assign list values to upper right matrix
104+
indv = [str(x) for x in individual]
105+
ops = [INPUT] + [ALLOWED_OPS[int(''.join(indv[15+i:17+i]), 2)] for i in range(0, 10, 2)] + [OUTPUT]
106+
model_spec = api.ModelSpec(matrix=test_matrix, ops=ops)
107+
val_acc = nasbench.query(model_spec)['validation_accuracy']
108+
net_list.append((test_matrix, [str(x) for x in individual[15:]]))
109+
except:
110+
pass
111+
return net_list
112+
113+
114+
def get_eznas_trainloader(batch_size, dataset):
115+
if dataset == "cifar100":
116+
train_loader = get_cifar100_train_loader(batch_size=batch_size)
117+
elif dataset == "cifar10":
118+
train_loader = get_train_loader(batch_size=batch_size)
119+
elif dataset == "ImageNet16-120":
120+
train_loader = get_imagenet_train_loader(batch_size=batch_size)
121+
elif dataset == "ImageNet":
122+
train_loader = get_imagenet_224_train_loader(batch_size=batch_size)
123+
return train_loader
124+
125+
def get_args():
126+
parser = argparse.ArgumentParser(description='NAS Without Training')
127+
parser.add_argument('--data_loc', default='../cifardata/', type=str, help='dataset folder')
128+
parser.add_argument('--api_loc', default='nasbench_only108.tfrecord',
129+
type=str, help='path to API')
130+
parser.add_argument('--save_loc', default='results', type=str, help='folder to save results')
131+
parser.add_argument('--save_string', default='naswot', type=str, help='prefix of results file')
132+
parser.add_argument('--score', default='hook_logdet', type=str, help='the score to evaluate')
133+
parser.add_argument('--nasspace', default='nasbench101', type=str, help='the nas search space to use')
134+
parser.add_argument('--batch_size', default=128, type=int)
135+
parser.add_argument('--repeat', default=1, type=int, help='how often to repeat a single image with a batch')
136+
parser.add_argument('--augtype', default='none', type=str, help='which perturbations to use')
137+
parser.add_argument('--sigma', default=0.05, type=float, help='noise level if augtype is "gaussnoise"')
138+
parser.add_argument('--init', default='', type=str)
139+
parser.add_argument('--GPU', default='0', type=str)
140+
parser.add_argument('--seed', default=1, type=int)
141+
parser.add_argument('--trainval', action='store_true')
142+
parser.add_argument('--dropout', action='store_true')
143+
parser.add_argument('--maxofn', default=1, type=int, help='score is the max of this many evaluations of the network')
144+
parser.add_argument('--n_samples', default=-1, type=int)
145+
parser.add_argument('--n_runs', default=500, type=int)
146+
parser.add_argument('--stem_out_channels', default=16, type=int, help='output channels of stem convolution (nasbench101)')
147+
parser.add_argument('--num_stacks', default=3, type=int, help='#stacks of modules (nasbench101)')
148+
parser.add_argument('--num_modules_per_stack', default=3, type=int, help='#modules per stack (nasbench101)')
149+
parser.add_argument('--num_labels', default=10, type=int, help='#classes (nasbench101)')
150+
151+
152+
parser.add_argument('--dataset', default='cifar10', choices=['cifar10', 'cifar100', 'ImageNet16-120', 'ImageNet'], type=str, help='Data-set to be used')
153+
parser.add_argument('--search_space', default='NASBench201', choices=['NASBench201', 'NATSBench', 'NDS'], type=str, help='Search space')
154+
parser.add_argument('--nds_space', default='', choices=['nds_amoeba', 'nds_darts', 'nds_enas', 'nds_pnas', 'nds_nasnet'], type=str, help='NDS space specification')
155+
args = parser.parse_args()
156+
return args

EZNAS/evol_config.yaml

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
NUM_MATH_OPS: 28
2+
STATIC_ADDRS: 22
3+
LEN_IND_ADDR: 3
4+
NUM_DYNAMIC_ADDR_SPACES: 5
5+
NEW_INST_MIN_LEN: 8
6+
NEW_INST_MAX_LEN: 24
7+
NGEN: 10
8+
POPSIZE: 50
9+
TOURSIZE: 4
10+
MU: 25
11+
lambda_ : 50
12+
CXPR: 0.4
13+
MUTPR: 0.4
14+
nproc: 16
15+
NUM_NETS: 500
16+
SUBSAMPLE_NETS: 20
17+
NUM_SAMPLING_EVAL: 4
18+
rangemix: True
19+
MIN_TREE_DEPTH: 2
20+
MAX_TREE_DEPTH: 6
21+
data_folder: "data2"

0 commit comments

Comments
 (0)