diff --git a/blueoil/cmd/tune_ray.py b/blueoil/cmd/tune_ray.py index 2306a4b41..9266af5f7 100644 --- a/blueoil/cmd/tune_ray.py +++ b/blueoil/cmd/tune_ray.py @@ -20,7 +20,7 @@ import click import six import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import ray from blueoil.datasets.base import ObjectDetectionBase @@ -265,19 +265,19 @@ def run(config_file, tunable_id, local_dir): register_trainable(tunable_id, TrainTunable) lm_config = config_util.load(config_file) - def easydict_to_dict(config): - if isinstance(config, EasyDict): + def smartdict_to_dict(config): + if isinstance(config, SmartDict): config = dict(config) for key, value in config.items(): - if isinstance(value, EasyDict): + if isinstance(value, SmartDict): value = dict(value) - easydict_to_dict(value) + smartdict_to_dict(value) config[key] = value return config - tune_space = easydict_to_dict(lm_config['TUNE_SPACE']) - tune_spec = easydict_to_dict(lm_config['TUNE_SPEC']) + tune_space = smartdict_to_dict(lm_config['TUNE_SPACE']) + tune_spec = smartdict_to_dict(lm_config['TUNE_SPEC']) tune_spec['run'] = tunable_id tune_spec['config'] = {'lm_config': os.path.join(os.getcwd(), config_file)} tune_spec['local_dir'] = local_dir diff --git a/blueoil/configs/convert_weight_from_darknet/darknet19.py b/blueoil/configs/convert_weight_from_darknet/darknet19.py index 19cb20def..0fd69867f 100644 --- a/blueoil/configs/convert_weight_from_darknet/darknet19.py +++ b/blueoil/configs/convert_weight_from_darknet/darknet19.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.networks.classification.darknet import Darknet @@ -49,13 +49,13 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE NETWORK.DATA_FORMAT = DATA_FORMAT # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/convert_weight_from_darknet/yolo_v2.py b/blueoil/configs/convert_weight_from_darknet/yolo_v2.py index 1b33ca3fa..ffb22bf5c 100644 --- a/blueoil/configs/convert_weight_from_darknet/yolo_v2.py +++ b/blueoil/configs/convert_weight_from_darknet/yolo_v2.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.networks.object_detection.yolo_v2 import YoloV2 @@ -69,7 +69,7 @@ NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE NETWORK.DATA_FORMAT = DATA_FORMAT @@ -79,7 +79,7 @@ NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/darknet_cifar10.py b/blueoil/configs/core/classification/darknet_cifar10.py index 01f604932..17862e3d4 100644 --- a/blueoil/configs/core/classification/darknet_cifar10.py +++ b/blueoil/configs/core/classification/darknet_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -83,7 +83,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.0005 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/darknet_quantize_cifar10.py b/blueoil/configs/core/classification/darknet_quantize_cifar10.py index 6cea2dcc0..28818b485 100644 --- a/blueoil/configs/core/classification/darknet_quantize_cifar10.py +++ b/blueoil/configs/core/classification/darknet_quantize_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -71,7 +71,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -95,7 +95,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/darknet_quantize_ilsvrc_2012.py b/blueoil/configs/core/classification/darknet_quantize_ilsvrc_2012.py index 59b747b7e..15f94467e 100644 --- a/blueoil/configs/core/classification/darknet_quantize_ilsvrc_2012.py +++ b/blueoil/configs/core/classification/darknet_quantize_ilsvrc_2012.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -75,7 +75,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.polynomial_decay @@ -96,7 +96,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lm_resnet_quantize_cifar10.py b/blueoil/configs/core/classification/lm_resnet_quantize_cifar10.py index f9a1387d1..a9a7e5aed 100644 --- a/blueoil/configs/core/classification/lm_resnet_quantize_cifar10.py +++ b/blueoil/configs/core/classification/lm_resnet_quantize_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -75,7 +75,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -96,7 +96,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_cifar10.py b/blueoil/configs/core/classification/lmnet_cifar10.py index f84a16e38..c11ce2087 100644 --- a/blueoil/configs/core/classification/lmnet_cifar10.py +++ b/blueoil/configs/core/classification/lmnet_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -83,7 +83,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.0005 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_cifar100.py b/blueoil/configs/core/classification/lmnet_cifar100.py index 0c656a29d..8f611a9d7 100644 --- a/blueoil/configs/core/classification/lmnet_cifar100.py +++ b/blueoil/configs/core/classification/lmnet_cifar100.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -83,7 +83,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.0005 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_openimagesv4.py b/blueoil/configs/core/classification/lmnet_openimagesv4.py index e39a41df2..700e19039 100644 --- a/blueoil/configs/core/classification/lmnet_openimagesv4.py +++ b/blueoil/configs/core/classification/lmnet_openimagesv4.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -67,7 +67,7 @@ # SUMMARISE_STEPS = 2 # IS_DEBUG = True -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -81,7 +81,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.0005 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.PRE_PROCESSOR = PRE_PROCESSOR DATASET.AUGMENTOR = Sequence([ diff --git a/blueoil/configs/core/classification/lmnet_quantize_cifar10.py b/blueoil/configs/core/classification/lmnet_quantize_cifar10.py index fc7b88b93..84a5d1a77 100644 --- a/blueoil/configs/core/classification/lmnet_quantize_cifar10.py +++ b/blueoil/configs/core/classification/lmnet_quantize_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -72,7 +72,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -94,7 +94,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_quantize_cifar100.py b/blueoil/configs/core/classification/lmnet_quantize_cifar100.py index 7c612232f..fe4d54d62 100644 --- a/blueoil/configs/core/classification/lmnet_quantize_cifar100.py +++ b/blueoil/configs/core/classification/lmnet_quantize_cifar100.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -71,7 +71,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -93,7 +93,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_v1_cifar10.py b/blueoil/configs/core/classification/lmnet_v1_cifar10.py index 3be363910..044296715 100644 --- a/blueoil/configs/core/classification/lmnet_v1_cifar10.py +++ b/blueoil/configs/core/classification/lmnet_v1_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -67,7 +67,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -82,7 +82,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.0005 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10.py b/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10.py index 3d7942641..6768744e1 100644 --- a/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10.py +++ b/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -71,7 +71,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -93,7 +93,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10_tune.py b/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10_tune.py index cd84e9aa3..7c5c6cc43 100644 --- a/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10_tune.py +++ b/blueoil/configs/core/classification/lmnet_v1_quantize_cifar10_tune.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -104,7 +104,7 @@ 'weight_decay_rate': 0.0001, } -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = None NETWORK.OPTIMIZER_KWARGS = {} NETWORK.LEARNING_RATE_FUNC = None @@ -122,7 +122,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/lmnet_v1_quantize_fer_2013.py b/blueoil/configs/core/classification/lmnet_v1_quantize_fer_2013.py index a3d4d9d3d..e7e9a917f 100644 --- a/blueoil/configs/core/classification/lmnet_v1_quantize_fer_2013.py +++ b/blueoil/configs/core/classification/lmnet_v1_quantize_fer_2013.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -61,7 +61,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.cosine_decay @@ -81,7 +81,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/mobilenet_v2_cifar10.py b/blueoil/configs/core/classification/mobilenet_v2_cifar10.py index 30f6d8306..dcb5693cf 100644 --- a/blueoil/configs/core/classification/mobilenet_v2_cifar10.py +++ b/blueoil/configs/core/classification/mobilenet_v2_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -83,7 +83,7 @@ NETWORK.WEIGHT_DECAY_RATE = 0.00004 # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/classification/resnet_cifar10.py b/blueoil/configs/core/classification/resnet_cifar10.py index 77bb25da8..dfb32f688 100644 --- a/blueoil/configs/core/classification/resnet_cifar10.py +++ b/blueoil/configs/core/classification/resnet_cifar10.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -69,7 +69,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -82,7 +82,7 @@ NETWORK.DATA_FORMAT = DATA_FORMAT # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/keypoint_detection/lm_single_pose_v1_quantize_mscoco.py b/blueoil/configs/core/keypoint_detection/lm_single_pose_v1_quantize_mscoco.py index e6e762cbe..7ff58d578 100644 --- a/blueoil/configs/core/keypoint_detection/lm_single_pose_v1_quantize_mscoco.py +++ b/blueoil/configs/core/keypoint_detection/lm_single_pose_v1_quantize_mscoco.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -84,7 +84,7 @@ step_per_epoch = 149813 // BATCH_SIZE -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -104,7 +104,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.IMAGE_SIZE = IMAGE_SIZE DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT diff --git a/blueoil/configs/core/object_detection/lm_fyolo_bdd100k.py b/blueoil/configs/core/object_detection/lm_fyolo_bdd100k.py index 34098551d..835d878bc 100644 --- a/blueoil/configs/core/object_detection/lm_fyolo_bdd100k.py +++ b/blueoil/configs/core/object_detection/lm_fyolo_bdd100k.py @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================= import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.data_augmentor import (Brightness, Color, Contrast, FlipLeftRight, @@ -79,7 +79,7 @@ max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -112,7 +112,7 @@ NETWORK.LOSS_WARMUP_STEPS = int(8000 / BATCH_SIZE) # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/object_detection/lm_fyolo_quantize_pascalvoc_2007_2012.py b/blueoil/configs/core/object_detection/lm_fyolo_quantize_pascalvoc_2007_2012.py index a52a51d82..bbd64094c 100644 --- a/blueoil/configs/core/object_detection/lm_fyolo_quantize_pascalvoc_2007_2012.py +++ b/blueoil/configs/core/object_detection/lm_fyolo_quantize_pascalvoc_2007_2012.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -98,7 +98,7 @@ NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -137,7 +137,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/object_detection/yolo_v2_quantize_pascalvoc_2007_2012.py b/blueoil/configs/core/object_detection/yolo_v2_quantize_pascalvoc_2007_2012.py index 56ffd734f..95adb4ac9 100644 --- a/blueoil/configs/core/object_detection/yolo_v2_quantize_pascalvoc_2007_2012.py +++ b/blueoil/configs/core/object_detection/yolo_v2_quantize_pascalvoc_2007_2012.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -186,7 +186,7 @@ NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -225,7 +225,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/lm_bisenet_quantize_camvid.py b/blueoil/configs/core/segmentation/lm_bisenet_quantize_camvid.py index b9c801e52..1f679c303 100644 --- a/blueoil/configs/core/segmentation/lm_bisenet_quantize_camvid.py +++ b/blueoil/configs/core/segmentation/lm_bisenet_quantize_camvid.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -79,7 +79,7 @@ ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -98,7 +98,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/lm_segnet_v0_quantize_camvid.py b/blueoil/configs/core/segmentation/lm_segnet_v0_quantize_camvid.py index ad1acc028..31ada0673 100644 --- a/blueoil/configs/core/segmentation/lm_segnet_v0_quantize_camvid.py +++ b/blueoil/configs/core/segmentation/lm_segnet_v0_quantize_camvid.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -67,7 +67,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -81,7 +81,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/lm_segnet_v1_quantize_camvid.py b/blueoil/configs/core/segmentation/lm_segnet_v1_quantize_camvid.py index 739cad815..348289776 100644 --- a/blueoil/configs/core/segmentation/lm_segnet_v1_quantize_camvid.py +++ b/blueoil/configs/core/segmentation/lm_segnet_v1_quantize_camvid.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -71,7 +71,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -85,7 +85,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/segnet_camvid.py b/blueoil/configs/core/segmentation/segnet_camvid.py index a422bb893..f11f9f316 100644 --- a/blueoil/configs/core/segmentation/segnet_camvid.py +++ b/blueoil/configs/core/segmentation/segnet_camvid.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -65,14 +65,14 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE NETWORK.DATA_FORMAT = DATA_FORMAT -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/segnet_quantize_bdd100k.py b/blueoil/configs/core/segmentation/segnet_quantize_bdd100k.py index c4e3da3d0..f878d0e9c 100644 --- a/blueoil/configs/core/segmentation/segnet_quantize_bdd100k.py +++ b/blueoil/configs/core/segmentation/segnet_quantize_bdd100k.py @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================= import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.data_augmentor import (Brightness, Color, Contrast, FlipLeftRight, @@ -65,7 +65,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -79,7 +79,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/segnet_quantize_camvid.py b/blueoil/configs/core/segmentation/segnet_quantize_camvid.py index 1441965a5..419bd537b 100644 --- a/blueoil/configs/core/segmentation/segnet_quantize_camvid.py +++ b/blueoil/configs/core/segmentation/segnet_quantize_camvid.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -69,7 +69,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -83,7 +83,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/segnet_quantize_camvid_tune.py b/blueoil/configs/core/segmentation/segnet_quantize_camvid_tune.py index 8d5652472..de50805e7 100644 --- a/blueoil/configs/core/segmentation/segnet_quantize_camvid_tune.py +++ b/blueoil/configs/core/segmentation/segnet_quantize_camvid_tune.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -101,7 +101,7 @@ ), } -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = None NETWORK.OPTIMIZER_KWARGS = {} NETWORK.LEARNING_RATE_FUNC = None @@ -117,7 +117,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/core/segmentation/segnet_quantize_cityscapes.py b/blueoil/configs/core/segmentation/segnet_quantize_cityscapes.py index 43f78a377..88d1108e7 100644 --- a/blueoil/configs/core/segmentation/segnet_quantize_cityscapes.py +++ b/blueoil/configs/core/segmentation/segnet_quantize_cityscapes.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -71,7 +71,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -85,7 +85,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/example/classification.py b/blueoil/configs/example/classification.py index d5eee32eb..9b262437d 100644 --- a/blueoil/configs/example/classification.py +++ b/blueoil/configs/example/classification.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -79,7 +79,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -95,7 +95,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/configs/example/object_detection.py b/blueoil/configs/example/object_detection.py index c8a85b451..c0347d5dc 100644 --- a/blueoil/configs/example/object_detection.py +++ b/blueoil/configs/example/object_detection.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -87,7 +87,7 @@ NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant @@ -123,7 +123,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/templates/lmnet/classification.tpl.py b/blueoil/templates/lmnet/classification.tpl.py index 184692463..7b78334ae 100644 --- a/blueoil/templates/lmnet/classification.tpl.py +++ b/blueoil/templates/lmnet/classification.tpl.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -65,7 +65,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = {{optimizer_class}} NETWORK.OPTIMIZER_KWARGS = {{optimizer_kwargs}} @@ -87,7 +87,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/templates/lmnet/keypoint_detection.tpl.py b/blueoil/templates/lmnet/keypoint_detection.tpl.py index f2373ce62..d3e675f55 100644 --- a/blueoil/templates/lmnet/keypoint_detection.tpl.py +++ b/blueoil/templates/lmnet/keypoint_detection.tpl.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -79,7 +79,7 @@ step_per_epoch = int(149813 / BATCH_SIZE) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = {{optimizer_class}} NETWORK.OPTIMIZER_KWARGS = {{optimizer_kwargs}} NETWORK.LEARNING_RATE_FUNC = {{learning_rate_func}} @@ -97,7 +97,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.IMAGE_SIZE = IMAGE_SIZE DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT diff --git a/blueoil/templates/lmnet/object_detection.tpl.py b/blueoil/templates/lmnet/object_detection.tpl.py index d92486fca..9ab95fd42 100644 --- a/blueoil/templates/lmnet/object_detection.tpl.py +++ b/blueoil/templates/lmnet/object_detection.tpl.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -85,7 +85,7 @@ NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = {{optimizer_class}} @@ -120,7 +120,7 @@ NETWORK.QUANTIZE_LAST_CONVOLUTION = False # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/templates/lmnet/semantic_segmentation.tpl.py b/blueoil/templates/lmnet/semantic_segmentation.tpl.py index 90cda5faf..7d11ae13c 100644 --- a/blueoil/templates/lmnet/semantic_segmentation.tpl.py +++ b/blueoil/templates/lmnet/semantic_segmentation.tpl.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -69,7 +69,7 @@ ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = {{optimizer_class}} NETWORK.OPTIMIZER_KWARGS = {{optimizer_kwargs}} NETWORK.LEARNING_RATE_FUNC = {{learning_rate_func}} @@ -86,7 +86,7 @@ NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/blueoil/utils/config.py b/blueoil/utils/config.py index 9427f0694..c465e2323 100644 --- a/blueoil/utils/config.py +++ b/blueoil/utils/config.py @@ -18,7 +18,7 @@ from abc import ABCMeta import yaml -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from yaml.representer import Representer from blueoil.data_processor import Processor, Sequence @@ -109,7 +109,7 @@ def check_config(config, mode="inference"): def load(config_file): """dynamically load a config file as module. - Return: EasyDict object + Return: SmartDict object """ filename, file_extension = os.path.splitext(config_file) if file_extension.lower() in '.py': @@ -133,28 +133,28 @@ def _load_py(config_file): exec(source, globals(), config) # use only upper key. - return EasyDict({ + return SmartDict({ key: value for key, value in config.items() if key.isupper() }) -def _easy_dict_to_dict(config): - if isinstance(config, EasyDict): +def _smart_dict_to_dict(config): + if isinstance(config, SmartDict): config = dict(config) for key, value in config.items(): - if isinstance(value, EasyDict): + if isinstance(value, SmartDict): value = dict(value) - _easy_dict_to_dict(value) + _smart_dict_to_dict(value) config[key] = value return config def _save_meta_yaml(output_dir, config): output_file_name = 'meta.yaml' - config_dict = _easy_dict_to_dict(config) + config_dict = _smart_dict_to_dict(config) meta_dict = {key: value for key, value in config_dict.items() if key in PARAMS_FOR_EXPORT} @@ -213,7 +213,7 @@ def processor_representer(dumper, data): def _save_config_yaml(output_dir, config): file_name = 'config.yaml' - config_dict = _easy_dict_to_dict(config) + config_dict = _smart_dict_to_dict(config) file_path = os.path.join(output_dir, file_name) class Dumper(yaml.Dumper): @@ -259,14 +259,14 @@ def _load_yaml(config_file): # use only upper key. keys = [key for key in config.keys() if key.isupper()] config_dict = {key: config[key] for key in keys} - config = EasyDict(config_dict) + config = SmartDict(config_dict) return config def load_from_experiment(): """Load saved experiment config as module. - Return: EasyDict object + Return: SmartDict object """ config_file = _saved_config_file_path() return load(config_file) @@ -288,12 +288,12 @@ def copy_to_experiment_dir(config_file): def merge(base_config, override_config): """merge config. - Return: merged config (EasyDict object). + Return: merged config (SmartDict object). """ - result = EasyDict(base_config) + result = SmartDict(base_config) for k, v in override_config.items(): - if type(v) is EasyDict: + if type(v) is SmartDict: v = merge(base_config[k], override_config[k]) result[k] = v diff --git a/blueoil/visualize.py b/blueoil/visualize.py index ce27e9cba..485b7a6c1 100644 --- a/blueoil/visualize.py +++ b/blueoil/visualize.py @@ -31,7 +31,7 @@ def visualize_classification(image, post_processed, config): image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md - config (EasyDict): Inference config. + config (SmartDict): Inference config. Returns: PIL.Image.Image: drawn image object. @@ -66,7 +66,7 @@ def visualize_object_detection(image, post_processed, config): image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md - config (EasyDict): Inference config. + config (SmartDict): Inference config. Returns: PIL.Image.Image: drawn image object. @@ -108,7 +108,7 @@ def visualize_semantic_segmentation(image, post_processed, config): image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md - config (EasyDict): Inference config. + config (SmartDict): Inference config. Returns: PIL.Image.Image: drawn image object. diff --git a/output_template/python/config.py b/output_template/python/config.py index 6a3ae6a88..e7e2b74c2 100644 --- a/output_template/python/config.py +++ b/output_template/python/config.py @@ -16,9 +16,9 @@ from importlib import import_module import yaml -from easydict import EasyDict from blueoil.data_processor import Sequence +from blueoil.utils.smartdict import SmartDict from blueoil import post_processor, pre_processor @@ -29,7 +29,7 @@ def load_yaml(config_file): config_file (str): Path of the configuration file. Returns: - EasyDict: Dictionary object of loaded configuration file. + SmartDict: Dictionary object of loaded configuration file. Examples: >>> config = load_yaml("/path/of/meta.yaml") @@ -37,7 +37,7 @@ def load_yaml(config_file): with open(config_file) as config_file_stream: config = yaml.load(config_file_stream, Loader=yaml.Loader) # use only upper key. - return EasyDict({k: v for k, v in config.items() if k.isupper()}) + return SmartDict({k: v for k, v in config.items() if k.isupper()}) def build_pre_process(pre_processor_config): diff --git a/output_template/python/requirements.txt b/output_template/python/requirements.txt index 24ed2d8bc..98bc9298d 100644 --- a/output_template/python/requirements.txt +++ b/output_template/python/requirements.txt @@ -1,5 +1,4 @@ click==7.1.2 -easydict==1.9 numpy==1.19.1 Pillow==7.2. PyYAML==5.3.1 diff --git a/setup.cfg b/setup.cfg index dc5b81429..12008e979 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,7 +20,6 @@ python_requires = ~=3.6 install_requires = jinja2 click==7.1.2 - easydict==1.9 matplotlib==3.3.0 numpy==1.19.1 Pillow==7.2.0 diff --git a/tests/unit/fixtures/configs/for_build_tfds_classification.py b/tests/unit/fixtures/configs/for_build_tfds_classification.py index 786d0df99..9280ca1ee 100644 --- a/tests/unit/fixtures/configs/for_build_tfds_classification.py +++ b/tests/unit/fixtures/configs/for_build_tfds_classification.py @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================= import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.networks.classification.lmnet_v0 import LmnetV0Quantize @@ -69,7 +69,7 @@ class ClassificationDataset(ImageFolderBase): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -85,7 +85,7 @@ class ClassificationDataset(ImageFolderBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = None diff --git a/tests/unit/fixtures/configs/for_build_tfds_object_detection.py b/tests/unit/fixtures/configs/for_build_tfds_object_detection.py index 6b2901d66..89a7253d2 100644 --- a/tests/unit/fixtures/configs/for_build_tfds_object_detection.py +++ b/tests/unit/fixtures/configs/for_build_tfds_object_detection.py @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================= import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.common import Tasks from blueoil.networks.object_detection.lm_fyolo import LMFYoloQuantize @@ -90,7 +90,7 @@ class ObjectDetectionDataset(OpenImagesV4BoundingBoxBase): NMS(iou_threshold=0.5, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -107,7 +107,7 @@ class ObjectDetectionDataset(OpenImagesV4BoundingBoxBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = None diff --git a/tests/unit/fixtures/configs/for_build_tfds_segmentation.py b/tests/unit/fixtures/configs/for_build_tfds_segmentation.py index 803a60b1c..07c3ec06b 100644 --- a/tests/unit/fixtures/configs/for_build_tfds_segmentation.py +++ b/tests/unit/fixtures/configs/for_build_tfds_segmentation.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -74,7 +74,7 @@ class SegmentationDataset(CamvidCustom): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -88,7 +88,7 @@ class SegmentationDataset(CamvidCustom): NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer NETWORK.WEIGHT_QUANTIZER_KWARGS = {} -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/fixtures/configs/for_export.py b/tests/unit/fixtures/configs/for_export.py index 51ca04884..9547a7b2f 100644 --- a/tests/unit/fixtures/configs/for_export.py +++ b/tests/unit/fixtures/configs/for_export.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ class Dummy(ImageFolderBase): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -84,7 +84,7 @@ class Dummy(ImageFolderBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/fixtures/configs/for_predict_classification.py b/tests/unit/fixtures/configs/for_predict_classification.py index 51ca04884..9547a7b2f 100644 --- a/tests/unit/fixtures/configs/for_predict_classification.py +++ b/tests/unit/fixtures/configs/for_predict_classification.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ class Dummy(ImageFolderBase): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -84,7 +84,7 @@ class Dummy(ImageFolderBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/fixtures/configs/for_predict_object_detection.py b/tests/unit/fixtures/configs/for_predict_object_detection.py index 217c64e7d..850118900 100644 --- a/tests/unit/fixtures/configs/for_predict_object_detection.py +++ b/tests/unit/fixtures/configs/for_predict_object_detection.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -82,7 +82,7 @@ NMS(iou_threshold=0.5, classes=CLASSES,), ]) -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -99,7 +99,7 @@ NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/fixtures/configs/for_profile.py b/tests/unit/fixtures/configs/for_profile.py index 51ca04884..9547a7b2f 100644 --- a/tests/unit/fixtures/configs/for_profile.py +++ b/tests/unit/fixtures/configs/for_profile.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ class Dummy(ImageFolderBase): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -84,7 +84,7 @@ class Dummy(ImageFolderBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/fixtures/configs/for_train.py b/tests/unit/fixtures/configs/for_train.py index 8ed47955f..0f37fb3f9 100644 --- a/tests/unit/fixtures/configs/for_train.py +++ b/tests/unit/fixtures/configs/for_train.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import tensorflow as tf from blueoil.common import Tasks @@ -68,7 +68,7 @@ class Dummy(ImageFolderBase): ]) POST_PROCESSOR = None -NETWORK = EasyDict() +NETWORK = SmartDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} NETWORK.IMAGE_SIZE = IMAGE_SIZE @@ -83,7 +83,7 @@ class Dummy(ImageFolderBase): NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset -DATASET = EasyDict() +DATASET = SmartDict() DATASET.BATCH_SIZE = BATCH_SIZE DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR diff --git a/tests/unit/networks_tests/classification_test/test_darknet.py b/tests/unit/networks_tests/classification_test/test_darknet.py index d8a59bc1b..f0e27a837 100644 --- a/tests/unit/networks_tests/classification_test/test_darknet.py +++ b/tests/unit/networks_tests/classification_test/test_darknet.py @@ -15,7 +15,7 @@ # ============================================================================= import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -36,7 +36,7 @@ class Dummy(ImageFolderBase): def test_training(): """Test only no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = Darknet config.DATASET_CLASS = Dummy @@ -53,14 +53,14 @@ def test_training(): config.TASK = Tasks.CLASSIFICATION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE # daasegt config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE diff --git a/tests/unit/networks_tests/classification_test/test_example_quantize.py b/tests/unit/networks_tests/classification_test/test_example_quantize.py index b7f3afd52..192f40cfe 100644 --- a/tests/unit/networks_tests/classification_test/test_example_quantize.py +++ b/tests/unit/networks_tests/classification_test/test_example_quantize.py @@ -15,7 +15,7 @@ # ============================================================================= import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -40,7 +40,7 @@ class Dummy(ImageFolderBase): def test_training(): """Test only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = SampleNetworkQuantize config.DATASET_CLASS = Dummy @@ -57,7 +57,7 @@ def test_training(): config.TASK = Tasks.CLASSIFICATION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -72,7 +72,7 @@ def test_training(): config.NETWORK.DATA_FORMAT = "NHWC" # dataset config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" diff --git a/tests/unit/networks_tests/classification_test/test_lm_resnet_quantize.py b/tests/unit/networks_tests/classification_test/test_lm_resnet_quantize.py index 61f66443a..a0abe6215 100644 --- a/tests/unit/networks_tests/classification_test/test_lm_resnet_quantize.py +++ b/tests/unit/networks_tests/classification_test/test_lm_resnet_quantize.py @@ -15,7 +15,7 @@ # ============================================================================= import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -40,7 +40,7 @@ class Dummy(ImageFolderBase): def test_training(): """Test only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = LmResnetQuantize config.DATASET_CLASS = Dummy @@ -57,7 +57,7 @@ def test_training(): config.TASK = Tasks.CLASSIFICATION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -71,7 +71,7 @@ def test_training(): config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE diff --git a/tests/unit/networks_tests/classification_test/test_lmnet_quantize.py b/tests/unit/networks_tests/classification_test/test_lmnet_quantize.py index c8f14d8b0..8fb022989 100644 --- a/tests/unit/networks_tests/classification_test/test_lmnet_quantize.py +++ b/tests/unit/networks_tests/classification_test/test_lmnet_quantize.py @@ -15,7 +15,7 @@ # ============================================================================= import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -40,7 +40,7 @@ class Dummy(ImageFolderBase): def test_training(): """Test only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = LmnetQuantize config.DATASET_CLASS = Dummy @@ -57,7 +57,7 @@ def test_training(): config.TASK = Tasks.CLASSIFICATION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -71,7 +71,7 @@ def test_training(): config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE diff --git a/tests/unit/networks_tests/keypoint_detection_tests/test_lm_single_pose_v1.py b/tests/unit/networks_tests/keypoint_detection_tests/test_lm_single_pose_v1.py index 56a7b8d82..ad1107a93 100644 --- a/tests/unit/networks_tests/keypoint_detection_tests/test_lm_single_pose_v1.py +++ b/tests/unit/networks_tests/keypoint_detection_tests/test_lm_single_pose_v1.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import pytest import tensorflow as tf @@ -43,7 +43,7 @@ def test_training(): """Test only no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = LmSinglePoseV1Quantize config.DATASET_CLASS = MscocoSinglePersonKeypoints @@ -61,7 +61,7 @@ def test_training(): config.TASK = Tasks.KEYPOINT_DETECTION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -75,7 +75,7 @@ def test_training(): config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # daasegt config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Sequence([ ResizeWithJoints(image_size=config.IMAGE_SIZE), JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2), diff --git a/tests/unit/networks_tests/object_detection_tests/test_yolo_v1.py b/tests/unit/networks_tests/object_detection_tests/test_yolo_v1.py index 30ea10460..cdb9c1790 100644 --- a/tests/unit/networks_tests/object_detection_tests/test_yolo_v1.py +++ b/tests/unit/networks_tests/object_detection_tests/test_yolo_v1.py @@ -16,7 +16,7 @@ import numpy as np import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -266,7 +266,7 @@ def test_convert_boxes_space_inverse(): def test_training(): """Test only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = YoloV1 config.DATASET_CLASS = Pascalvoc2007 @@ -283,12 +283,12 @@ def test_training(): config.TASK = Tasks.OBJECT_DETECTION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE # daasegt config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE diff --git a/tests/unit/networks_tests/object_detection_tests/test_yolo_v2.py b/tests/unit/networks_tests/object_detection_tests/test_yolo_v2.py index 2952f1f05..0ff654c96 100644 --- a/tests/unit/networks_tests/object_detection_tests/test_yolo_v2.py +++ b/tests/unit/networks_tests/object_detection_tests/test_yolo_v2.py @@ -16,7 +16,7 @@ import numpy as np import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -696,7 +696,7 @@ def test_reorg(): def test_training(): """Test only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = YoloV2 config.DATASET_CLASS = Pascalvoc2007 @@ -713,7 +713,7 @@ def test_training(): config.TASK = Tasks.OBJECT_DETECTION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -721,7 +721,7 @@ def test_training(): config.NETWORK.DATA_FORMAT = "NHWC" # dataset config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" diff --git a/tests/unit/networks_tests/object_detection_tests/test_yolo_v2_quantize.py b/tests/unit/networks_tests/object_detection_tests/test_yolo_v2_quantize.py index 3c4d1209a..a85b8dd12 100644 --- a/tests/unit/networks_tests/object_detection_tests/test_yolo_v2_quantize.py +++ b/tests/unit/networks_tests/object_detection_tests/test_yolo_v2_quantize.py @@ -15,7 +15,7 @@ # ============================================================================= import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -37,7 +37,7 @@ def test_training(): """Test only no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = YoloV2Quantize config.DATASET_CLASS = Pascalvoc2007 @@ -54,7 +54,7 @@ def test_training(): config.TASK = Tasks.OBJECT_DETECTION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -68,7 +68,7 @@ def test_training(): config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # daasegt config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE diff --git a/tests/unit/networks_tests/segmentation_tests/test_lm_bisenet.py b/tests/unit/networks_tests/segmentation_tests/test_lm_bisenet.py index f335d52db..bee0458cd 100644 --- a/tests/unit/networks_tests/segmentation_tests/test_lm_bisenet.py +++ b/tests/unit/networks_tests/segmentation_tests/test_lm_bisenet.py @@ -16,7 +16,7 @@ import numpy as np import pytest import tensorflow as tf -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.cmd.train import start_training from blueoil import environment @@ -38,7 +38,7 @@ class DummyCamvid(Camvid): def test_training(): """Verify only that no error raised.""" - config = EasyDict() + config = SmartDict() config.NETWORK_CLASS = LMBiSeNet config.DATASET_CLASS = DummyCamvid @@ -55,7 +55,7 @@ def test_training(): config.TASK = Tasks.SEMANTIC_SEGMENTATION # network model config - config.NETWORK = EasyDict() + config.NETWORK = SmartDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE @@ -63,7 +63,7 @@ def test_training(): config.NETWORK.DATA_FORMAT = "NHWC" # daasegt config - config.DATASET = EasyDict() + config.DATASET = SmartDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" diff --git a/tests/unit/test_visualize.py b/tests/unit/test_visualize.py index 0cc0c0f0d..56a994c4f 100644 --- a/tests/unit/test_visualize.py +++ b/tests/unit/test_visualize.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict import numpy as np import PIL.Image @@ -42,7 +42,7 @@ def test_classification(): """Verify just image is changed.""" input_image = PIL.Image.new("RGB", size=(100, 200)) results = np.array([0.1, 0.3, 0.4, 0.2]) - config = EasyDict({"CLASSES": ["a", "b", "c", "d"]}) + config = SmartDict({"CLASSES": ["a", "b", "c", "d"]}) result_image = visualize_classification(np.array(input_image), results, config) @@ -53,7 +53,7 @@ def test_object_detection(): """Verify just image is changed.""" input_image = PIL.Image.new("RGB", size=(100, 200)) results = np.array([[32, 20, 10, 5, 2, 0.5], [2, 4, 2, 4, 1, 0.5]]) - config = EasyDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]}) + config = SmartDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]}) result_image = visualize_object_detection(np.array(input_image), results, config) @@ -64,7 +64,7 @@ def test_semantic_segmentation(): """Verify just image is changed.""" input_image = PIL.Image.new("RGB", size=(100, 200)) results = np.random.random_sample(size=(64, 64, 4)) - config = EasyDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]}) + config = SmartDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]}) result_image = visualize_semantic_segmentation(np.array(input_image), results, config) diff --git a/tests/unit/util_tests/test_config.py b/tests/unit/util_tests/test_config.py index 2a3abf10e..483e52a03 100644 --- a/tests/unit/util_tests/test_config.py +++ b/tests/unit/util_tests/test_config.py @@ -13,17 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -from easydict import EasyDict +from blueoil.utils.smartdict import SmartDict from blueoil.utils import config as config_util def test_merge(): - base_config = EasyDict({"a": "aa", "nest": EasyDict({"b": "bb", "c": "cc"}), "d": "dd"}) - override_config = EasyDict({"a": "_a", "nest": EasyDict({"b": "_b"})}) + base_config = SmartDict({"a": "aa", "nest": SmartDict({"b": "bb", "c": "cc"}), "d": "dd"}) + override_config = SmartDict({"a": "_a", "nest": SmartDict({"b": "_b"})}) - expected = EasyDict({"a": "_a", "nest": EasyDict({"b": "_b", "c": "cc"}), "d": "dd"}) + expected = SmartDict({"a": "_a", "nest": SmartDict({"b": "_b", "c": "cc"}), "d": "dd"}) config = config_util.merge(base_config, override_config) assert config == expected