Skip to content
Snippets Groups Projects
test_runner.py 85.9 KiB
Newer Older
RangiLyu's avatar
RangiLyu committed
# Copyright (c) OpenMMLab. All rights reserved.
import copy
RangiLyu's avatar
RangiLyu committed
import os.path as osp
import shutil
RangiLyu's avatar
RangiLyu committed
import tempfile
from unittest import TestCase

import numpy as np
RangiLyu's avatar
RangiLyu committed
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD, Adam
RangiLyu's avatar
RangiLyu committed
from torch.utils.data import DataLoader, Dataset

from mmengine.config import Config
from mmengine.data import DefaultSampler
from mmengine.evaluator import BaseMetric, Evaluator
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, Hook,
                            IterTimerHook, LoggerHook, ParamSchedulerHook,
                            RuntimeInfoHook)
from mmengine.logging import LogProcessor, MessageHub, MMLogger
from mmengine.model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from mmengine.optim import (DefaultOptimWrapperConstructor, MultiStepLR,
                            OptimWrapper, OptimWrapperDict, StepLR)
from mmengine.registry import (DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
                               LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
                               OPTIM_WRAPPER_CONSTRUCTORS, PARAM_SCHEDULERS,
                               RUNNERS, Registry)
from mmengine.runner import (BaseLoop, EpochBasedTrainLoop, IterBasedTrainLoop,
                             Runner, TestLoop, ValLoop)
from mmengine.runner.loops import _InfiniteDataloaderIterator
from mmengine.runner.priority import Priority, get_priority
from mmengine.utils import TORCH_VERSION, digit_version, is_list_of
from mmengine.visualization import Visualizer
RangiLyu's avatar
RangiLyu committed


@MODELS.register_module()
class ToyModel(BaseModel):
    def __init__(self, data_preprocessor=None):
        super().__init__(data_preprocessor=data_preprocessor)
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 1)
    def forward(self, batch_inputs, labels, mode='tensor'):
        labels = torch.stack(labels)
        outputs = self.linear1(batch_inputs)
        outputs = self.linear2(outputs)

        if mode == 'tensor':
            return outputs
        elif mode == 'loss':
            loss = (labels - outputs).sum()
            return outputs
            return outputs
@MODELS.register_module()
class ToyModel1(ToyModel):
    def __init__(self):
        super().__init__()


@MODELS.register_module()
class ToySyncBNModel(BaseModel):

    def __init__(self):
        super().__init__()
        self.conv = nn.Conv2d(3, 8, 2)
        self.bn = nn.SyncBatchNorm(8)

    def forward(self, batch_inputs, labels, mode='tensor'):
        labels = torch.stack(labels)
        outputs = self.conv(batch_inputs)
        outputs = self.bn(outputs)

        if mode == 'tensor':
            return outputs
        elif mode == 'loss':
            loss = (labels - outputs).sum()
            outputs = dict(loss=loss)
            return outputs
        elif mode == 'predict':
            outputs = dict(log_vars=dict(a=1, b=0.5))
            return outputs



    def __init__(self):
        super().__init__()
        self.linear1 = nn.Linear(2, 1)
        self.linear2 = nn.Linear(2, 1)

    def forward(self, batch_inputs, labels, mode='tensor'):
        labels = torch.stack(labels)
        output1 = self.linear1(batch_inputs)
        output2 = self.linear2(batch_inputs)

        if mode == 'tensor':
            return output1, output2
        elif mode == 'loss':
            loss1 = (labels - output1).sum()
            loss2 = (labels - output2).sum()
            outputs = dict(linear1=loss1, linear2=loss2)
            return outputs
        elif mode == 'predict':
            return output1, output2

    def train_step(self, data, optim_wrapper):
        batch_inputs, batch_labels = self.data_preprocessor(data)
        loss = self(batch_inputs, batch_labels, mode='loss')
        optim_wrapper['linear1'].update_params(loss['linear1'])
        optim_wrapper['linear2'].update_params(loss['linear2'])
        return loss


@MODEL_WRAPPERS.register_module()
class CustomModelWrapper(nn.Module):

    def __init__(self, module):
        super().__init__()
@OPTIM_WRAPPER_CONSTRUCTORS.register_module()
class ToyMultipleOptimizerConstructor:

    def __init__(self, optim_wrapper_cfg, paramwise_cfg=None):
        if not isinstance(optim_wrapper_cfg, dict):
            raise TypeError('optimizer_cfg should be a dict',
                            f'but got {type(optim_wrapper_cfg)}')
        assert paramwise_cfg is None, (
            'parawise_cfg should be set in each optimizer separately')
        self.optim_wrapper_cfg = optim_wrapper_cfg
        self.constructors = {}
        for key, cfg in self.optim_wrapper_cfg.items():
            _cfg = cfg.copy()
            paramwise_cfg_ = _cfg.pop('paramwise_cfg', None)
            self.constructors[key] = DefaultOptimWrapperConstructor(
                _cfg, paramwise_cfg_)

    def __call__(self, model: nn.Module) -> OptimWrapperDict:
        optimizers = {}
        while hasattr(model, 'module'):
            model = model.module

        for key, constructor in self.constructors.items():
            module = getattr(model, key)
            optimizers[key] = constructor(module)
        return OptimWrapperDict(**optimizers)
RangiLyu's avatar
RangiLyu committed
@DATASETS.register_module()
class ToyDataset(Dataset):
    data = torch.randn(12, 2)
    label = torch.ones(12)
    @property
    def metainfo(self):
        return self.METAINFO

RangiLyu's avatar
RangiLyu committed
    def __len__(self):
        return self.data.size(0)
RangiLyu's avatar
RangiLyu committed

    def __getitem__(self, index):
        return dict(inputs=self.data[index], data_sample=self.label[index])
@DATASETS.register_module()
class ToyDatasetNoMeta(Dataset):
    data = torch.randn(12, 2)
    label = torch.ones(12)

    def __len__(self):
        return self.data.size(0)

    def __getitem__(self, index):
        return dict(inputs=self.data[index], data_sample=self.label[index])


@METRICS.register_module()
class ToyMetric1(BaseMetric):
RangiLyu's avatar
RangiLyu committed

    def __init__(self, collect_device='cpu', dummy_metrics=None):
        super().__init__(collect_device=collect_device)
        self.dummy_metrics = dummy_metrics

    def process(self, data_samples, predictions):
        result = {'acc': 1}
        self.results.append(result)

    def compute_metrics(self, results):
        return dict(acc=1)


@METRICS.register_module()
class ToyMetric2(BaseMetric):

    def __init__(self, collect_device='cpu', dummy_metrics=None):
        super().__init__(collect_device=collect_device)
        self.dummy_metrics = dummy_metrics

    def process(self, data_samples, predictions):
        result = {'acc': 1}
        self.results.append(result)

    def compute_metrics(self, results):
        return dict(acc=1)


@HOOKS.register_module()
class ToyHook(Hook):
    priority = 'Lowest'

    def before_train_epoch(self, runner):
        pass


@HOOKS.register_module()
class ToyHook2(Hook):
    priority = 'Lowest'

    def after_train_epoch(self, runner):
        pass


@LOOPS.register_module()
class CustomTrainLoop(BaseLoop):

    def __init__(self, runner, dataloader, max_epochs):
        super().__init__(runner, dataloader)
        self._max_epochs = max_epochs

    def run(self) -> None:
        pass


@LOOPS.register_module()
class CustomValLoop(BaseLoop):

    def __init__(self, runner, dataloader, evaluator):
        super().__init__(runner, dataloader)
        self._runner = runner

        if isinstance(evaluator, dict) or is_list_of(evaluator, dict):
            self.evaluator = runner.build_evaluator(evaluator)  # type: ignore
        else:
            self.evaluator = evaluator

    def run(self) -> None:
        pass


@LOOPS.register_module()
class CustomTestLoop(BaseLoop):

    def __init__(self, runner, dataloader, evaluator):
        super().__init__(runner, dataloader)
        self._runner = runner

        if isinstance(evaluator, dict) or is_list_of(evaluator, dict):
            self.evaluator = runner.build_evaluator(evaluator)  # type: ignore
        else:
            self.evaluator = evaluator

    def run(self) -> None:
        pass


@LOG_PROCESSORS.register_module()
class CustomLogProcessor(LogProcessor):

    def __init__(self, window_size=10, by_epoch=True, custom_cfg=None):
        self.window_size = window_size
        self.by_epoch = by_epoch
        self.custom_cfg = custom_cfg if custom_cfg else []
        self._check_custom_cfg()


@RUNNERS.register_module()
class CustomRunner(Runner):

    def __init__(self,
                 model,
                 work_dir,
                 train_dataloader=None,
                 val_dataloader=None,
                 test_dataloader=None,
                 train_cfg=None,
                 val_cfg=None,
                 test_cfg=None,
                 param_scheduler=None,
                 val_evaluator=None,
                 test_evaluator=None,
                 default_hooks=None,
                 custom_hooks=None,
                 load_from=None,
                 resume=False,
                 launcher='none',
                 env_cfg=dict(dist_cfg=dict(backend='nccl')),
                 log_processor=None,
                 log_level='INFO',
                 visualizer=None,
                 default_scope=None,
                 randomness=dict(seed=None),
                 experiment_name=None,
                 cfg=None):
        pass

    def setup_env(self, env_cfg):
        pass


@EVALUATOR.register_module()
class ToyEvaluator(Evaluator):

    def __init__(self, metrics):
        super().__init__(metrics)


def collate_fn(data_batch):
    return data_batch


RangiLyu's avatar
RangiLyu committed
class TestRunner(TestCase):

    def setUp(self):
        self.temp_dir = tempfile.mkdtemp()
        epoch_based_cfg = dict(
RangiLyu's avatar
RangiLyu committed
            model=dict(type='ToyModel'),
            work_dir=self.temp_dir,
RangiLyu's avatar
RangiLyu committed
            train_dataloader=dict(
                dataset=dict(type='ToyDataset'),
                sampler=dict(type='DefaultSampler', shuffle=True),
                batch_size=3,
RangiLyu's avatar
RangiLyu committed
                num_workers=0),
            val_dataloader=dict(
                dataset=dict(type='ToyDataset'),
                sampler=dict(type='DefaultSampler', shuffle=False),
                batch_size=3,
RangiLyu's avatar
RangiLyu committed
                num_workers=0),
            test_dataloader=dict(
                dataset=dict(type='ToyDataset'),
                sampler=dict(type='DefaultSampler', shuffle=False),
                batch_size=3,
RangiLyu's avatar
RangiLyu committed
                num_workers=0),
            auto_scale_lr=dict(base_batch_size=16, enable=False),
            optim_wrapper=dict(
                type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01)),
RangiLyu's avatar
RangiLyu committed
            param_scheduler=dict(type='MultiStepLR', milestones=[1, 2]),
            val_evaluator=dict(type='ToyMetric1'),
            test_evaluator=dict(type='ToyMetric1'),
            train_cfg=dict(
                by_epoch=True, max_epochs=3, val_interval=1, val_begin=1),
            val_cfg=dict(),
RangiLyu's avatar
RangiLyu committed
            test_cfg=dict(),
            custom_hooks=[],
            default_hooks=dict(
                runtime_info=dict(type='RuntimeInfoHook'),
RangiLyu's avatar
RangiLyu committed
                timer=dict(type='IterTimerHook'),
                logger=dict(type='LoggerHook'),
                param_scheduler=dict(type='ParamSchedulerHook'),
                checkpoint=dict(
                    type='CheckpointHook', interval=1, by_epoch=True),
                sampler_seed=dict(type='DistSamplerSeedHook')),
            launcher='none',
            env_cfg=dict(dist_cfg=dict(backend='nccl')),
        )
        self.epoch_based_cfg = Config(epoch_based_cfg)
        self.iter_based_cfg = copy.deepcopy(self.epoch_based_cfg)
        self.iter_based_cfg.train_dataloader = dict(
            dataset=dict(type='ToyDataset'),
            sampler=dict(type='InfiniteSampler', shuffle=True),
            batch_size=3,
            num_workers=0)
        self.iter_based_cfg.train_cfg = dict(by_epoch=False, max_iters=12)
        self.iter_based_cfg.default_hooks = dict(
            runtime_info=dict(type='RuntimeInfoHook'),
            timer=dict(type='IterTimerHook'),
            logger=dict(type='LoggerHook'),
            param_scheduler=dict(type='ParamSchedulerHook'),
            checkpoint=dict(type='CheckpointHook', interval=1, by_epoch=False),
            sampler_seed=dict(type='DistSamplerSeedHook'))
RangiLyu's avatar
RangiLyu committed
    def tearDown(self):
        shutil.rmtree(self.temp_dir)

    def test_init(self):
        # 1. test arguments
        # 1.1 train_dataloader, train_cfg, optimizer and param_scheduler
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init1'
        cfg.pop('train_cfg')
        with self.assertRaisesRegex(ValueError, 'either all None or not None'):
            Runner(**cfg)

        # all of training related configs are None and param_scheduler should
        # also be None
        cfg.experiment_name = 'test_init2'
        cfg.pop('train_dataloader')
        cfg.pop('optim_wrapper')
        cfg.pop('param_scheduler')
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)

        # all of training related configs are not None
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init3'
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)

        # all of training related configs are not None and param_scheduler
        # can be None
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init4'
        cfg.pop('param_scheduler')
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)
        self.assertEqual(runner.param_schedulers, None)
        # param_scheduler should be None when optimizer is None
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init5'
        cfg.pop('train_cfg')
        cfg.pop('train_dataloader')
        cfg.pop('optim_wrapper')
        with self.assertRaisesRegex(ValueError, 'should be None'):
            runner = Runner(**cfg)

        # 1.2 val_dataloader, val_evaluator, val_cfg
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init6'
        cfg.pop('val_cfg')
        with self.assertRaisesRegex(ValueError, 'either all None or not None'):
            Runner(**cfg)

        cfg.experiment_name = 'test_init7'
        cfg.pop('val_dataloader')
        cfg.pop('val_evaluator')
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)

        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init8'
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)

        # 1.3 test_dataloader, test_evaluator and test_cfg
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init9'
        cfg.pop('test_cfg')
        with self.assertRaisesRegex(ValueError, 'either all None or not None'):
            runner = Runner(**cfg)

        cfg.experiment_name = 'test_init10'
        cfg.pop('test_dataloader')
        cfg.pop('test_evaluator')
        runner = Runner(**cfg)
        self.assertIsInstance(runner, Runner)

        # 1.4 test env params
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init11'
        runner = Runner(**cfg)
        self.assertFalse(runner.distributed)
        self.assertFalse(runner.deterministic)

        # 1.5 message_hub, logger and visualizer
        # they are all not specified
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init12'
        runner = Runner(**cfg)
        self.assertIsInstance(runner.logger, MMLogger)
        self.assertIsInstance(runner.message_hub, MessageHub)
        self.assertIsInstance(runner.visualizer, Visualizer)

        # they are all specified
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init13'
        cfg.log_level = 'INFO'
        cfg.visualizer = None
        runner = Runner(**cfg)
        self.assertIsInstance(runner.logger, MMLogger)
        self.assertIsInstance(runner.message_hub, MessageHub)
        self.assertIsInstance(runner.visualizer, Visualizer)
RangiLyu's avatar
RangiLyu committed

        assert runner.distributed is False
        assert runner.seed is not None
        assert runner.work_dir == self.temp_dir

        # 2 model should be initialized
        self.assertIsInstance(runner.model,
                              (nn.Module, DistributedDataParallel))
        self.assertEqual(runner.model_name, 'ToyModel')

        # 3. test lazy initialization
        self.assertIsInstance(runner._train_dataloader, dict)
        self.assertIsInstance(runner._val_dataloader, dict)
        self.assertIsInstance(runner._test_dataloader, dict)
        self.assertIsInstance(runner.optim_wrapper, dict)
        self.assertIsInstance(runner.param_schedulers, dict)

        # After calling runner.train(),
        # train_dataloader and val_loader should be initialized but
        # test_dataloader should also be dict
RangiLyu's avatar
RangiLyu committed
        runner.train()

        self.assertIsInstance(runner._train_loop, BaseLoop)
        self.assertIsInstance(runner.train_dataloader, DataLoader)
        self.assertIsInstance(runner.optim_wrapper, OptimWrapper)
        self.assertIsInstance(runner.param_schedulers[0], MultiStepLR)
        self.assertIsInstance(runner._val_loop, BaseLoop)
        self.assertIsInstance(runner._val_loop.dataloader, DataLoader)
        self.assertIsInstance(runner._val_loop.evaluator, Evaluator)
        # After calling runner.test(), test_dataloader should be initialized
        self.assertIsInstance(runner._test_loop, dict)
        runner.test()
        self.assertIsInstance(runner._test_loop, BaseLoop)
        self.assertIsInstance(runner._test_loop.dataloader, DataLoader)
        self.assertIsInstance(runner._test_loop.evaluator, Evaluator)
        # 4. initialize runner with objects rather than config
RangiLyu's avatar
RangiLyu committed
        model = ToyModel()
        optim_wrapper = OptimWrapper(SGD(
RangiLyu's avatar
RangiLyu committed
            model.parameters(),
            lr=0.01,
RangiLyu's avatar
RangiLyu committed
        toy_hook = ToyHook()
        toy_hook2 = ToyHook2()

        train_dataloader = DataLoader(ToyDataset(), collate_fn=collate_fn)
        val_dataloader = DataLoader(ToyDataset(), collate_fn=collate_fn)
        test_dataloader = DataLoader(ToyDataset(), collate_fn=collate_fn)
RangiLyu's avatar
RangiLyu committed
        runner = Runner(
            model=model,
            work_dir=self.temp_dir,
            train_cfg=dict(
                by_epoch=True, max_epochs=3, val_interval=1, val_begin=1),
            train_dataloader=train_dataloader,
            optim_wrapper=optim_wrapper,
            param_scheduler=MultiStepLR(optim_wrapper, milestones=[1, 2]),
            val_dataloader=val_dataloader,
            val_evaluator=ToyMetric1(),
            test_cfg=dict(),
            test_dataloader=test_dataloader,
            test_evaluator=ToyMetric1(),
RangiLyu's avatar
RangiLyu committed
            default_hooks=dict(param_scheduler=toy_hook),
            custom_hooks=[toy_hook2],
            experiment_name='test_init14')
RangiLyu's avatar
RangiLyu committed
        runner.train()
        runner.test()
        # 5. Test building multiple runners
        if torch.cuda.is_available():
            cfg = copy.deepcopy(self.epoch_based_cfg)
            cfg.experiment_name = 'test_init15'
            cfg.launcher = 'pytorch'
            os.environ['MASTER_ADDR'] = '127.0.0.1'
            os.environ['MASTER_PORT'] = '29600'
            os.environ['RANK'] = '0'
            os.environ['WORLD_SIZE'] = '1'
            os.environ['LOCAL_RANK'] = '0'
            runner = Runner(**cfg)
            cfg.experiment_name = 'test_init16'
            runner = Runner(**cfg)

        # 6.1 Test initializing with empty scheduler.
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_init17'
        cfg.param_scheduler = None
        runner = Runner(**cfg)
        self.assertIsNone(runner.param_schedulers)

        # 6.2 Test initializing single scheduler.
        cfg.experiment_name = 'test_init18'
        cfg.param_scheduler = dict(type='MultiStepLR', milestones=[1, 2])
        Runner(**cfg)

        # 6.3 Test initializing list of scheduler.
        cfg.param_scheduler = [
            dict(type='MultiStepLR', milestones=[1, 2]),
            dict(type='MultiStepLR', milestones=[2, 3])
        ]
        cfg.experiment_name = 'test_init19'
        Runner(**cfg)

        # 6.4 Test initializing 2 schedulers for 2 optimizers.
        cfg.param_scheduler = dict(
            linear1=dict(type='MultiStepLR', milestones=[1, 2]),
            linear2=dict(type='MultiStepLR', milestones=[1, 2]),
        )
        cfg.experiment_name = 'test_init20'
        Runner(**cfg)

        # 6.5 Test initializing 2 schedulers for 2 optimizers.
        cfg.param_scheduler = dict(
            linear1=[dict(type='MultiStepLR', milestones=[1, 2])],
            linear2=[dict(type='MultiStepLR', milestones=[1, 2])],
        )
        cfg.experiment_name = 'test_init21'
        Runner(**cfg)

        # 6.6 Test initializing with `_ParameterScheduler`.
        optimizer = SGD(nn.Linear(1, 1).parameters(), lr=0.1)
        cfg.param_scheduler = MultiStepLR(
            milestones=[1, 2], optimizer=optimizer)
        cfg.experiment_name = 'test_init22'
        Runner(**cfg)

        # 6.7 Test initializing with list of `_ParameterScheduler`.
        cfg.param_scheduler = [
            MultiStepLR(milestones=[1, 2], optimizer=optimizer)
        ]
        cfg.experiment_name = 'test_init23'
        Runner(**cfg)

        # 6.8 Test initializing with 2 `_ParameterScheduler` for 2 optimizers.
        cfg.param_scheduler = dict(
            linear1=MultiStepLR(milestones=[1, 2], optimizer=optimizer),
            linear2=MultiStepLR(milestones=[1, 2], optimizer=optimizer))
        cfg.experiment_name = 'test_init24'
        Runner(**cfg)

        # 6.9 Test initializing with 2 list of `_ParameterScheduler` for 2
        # optimizers.
        cfg.param_scheduler = dict(
            linear1=[MultiStepLR(milestones=[1, 2], optimizer=optimizer)],
            linear2=[MultiStepLR(milestones=[1, 2], optimizer=optimizer)])
        cfg.experiment_name = 'test_init25'
        Runner(**cfg)

        # 6.10 Test initializing with error type scheduler.
        cfg.param_scheduler = dict(linear1='error_type')
        cfg.experiment_name = 'test_init26'
        with self.assertRaisesRegex(AssertionError, 'Each value of'):
            Runner(**cfg)

        cfg.param_scheduler = 'error_type'
        cfg.experiment_name = 'test_init27'
        with self.assertRaisesRegex(TypeError,
                                    '`param_scheduler` should be a'):
            Runner(**cfg)

    def test_dump_config(self):
        # dump config from dict.
        cfg = copy.deepcopy(self.epoch_based_cfg)
        for idx, cfg in enumerate((cfg, cfg._cfg_dict)):
            cfg.experiment_name = f'test_dump{idx}'
            runner = Runner.from_cfg(cfg=cfg)
            assert osp.exists(
                osp.join(runner.work_dir, f'{runner.timestamp}.py'))
            # dump config from file.
            with tempfile.TemporaryDirectory() as temp_config_dir:
                temp_config_file = tempfile.NamedTemporaryFile(
                    dir=temp_config_dir, suffix='.py')
                file_cfg = Config(
                    self.epoch_based_cfg._cfg_dict,
                    filename=temp_config_file.name)
                file_cfg.experiment_name = f'test_dump2{idx}'
                runner = Runner.from_cfg(cfg=file_cfg)
                assert osp.exists(
                    osp.join(runner.work_dir,
                             osp.basename(temp_config_file.name)))
    def test_from_cfg(self):
        runner = Runner.from_cfg(cfg=self.epoch_based_cfg)
        self.assertIsInstance(runner, Runner)
RangiLyu's avatar
RangiLyu committed

    def test_setup_env(self):
    def test_build_logger(self):
        self.epoch_based_cfg.experiment_name = 'test_build_logger1'
        runner = Runner.from_cfg(self.epoch_based_cfg)
        self.assertIsInstance(runner.logger, MMLogger)
        self.assertEqual(runner.experiment_name, runner.logger.instance_name)

        # input is a dict
        logger = runner.build_logger(name='test_build_logger2')
        self.assertIsInstance(logger, MMLogger)
        self.assertEqual(logger.instance_name, 'test_build_logger2')

        # input is a dict but does not contain name key
        runner._experiment_name = 'test_build_logger3'
        logger = runner.build_logger()
        self.assertIsInstance(logger, MMLogger)
        self.assertEqual(logger.instance_name, 'test_build_logger3')

    def test_build_message_hub(self):
        self.epoch_based_cfg.experiment_name = 'test_build_message_hub1'
        runner = Runner.from_cfg(self.epoch_based_cfg)
        self.assertIsInstance(runner.message_hub, MessageHub)
        self.assertEqual(runner.message_hub.instance_name,
                         runner.experiment_name)

        # input is a dict
        message_hub_cfg = dict(name='test_build_message_hub2')
        message_hub = runner.build_message_hub(message_hub_cfg)
        self.assertIsInstance(message_hub, MessageHub)
        self.assertEqual(message_hub.instance_name, 'test_build_message_hub2')

        # input is a dict but does not contain name key
        runner._experiment_name = 'test_build_message_hub3'
        message_hub_cfg = dict()
        message_hub = runner.build_message_hub(message_hub_cfg)
        self.assertIsInstance(message_hub, MessageHub)
        self.assertEqual(message_hub.instance_name, 'test_build_message_hub3')

        # input is not a valid type
        with self.assertRaisesRegex(TypeError, 'message_hub should be'):
            runner.build_message_hub('invalid-type')

    def test_build_visualizer(self):
        self.epoch_based_cfg.experiment_name = 'test_build_visualizer1'
        runner = Runner.from_cfg(self.epoch_based_cfg)
        self.assertIsInstance(runner.visualizer, Visualizer)
        self.assertEqual(runner.experiment_name,
                         runner.visualizer.instance_name)
        # input is a Visualizer object
        self.assertEqual(
            id(runner.build_visualizer(runner.visualizer)),
            id(runner.visualizer))

        # input is a dict
        visualizer_cfg = dict(type='Visualizer', name='test_build_visualizer2')
        visualizer = runner.build_visualizer(visualizer_cfg)
        self.assertIsInstance(visualizer, Visualizer)
        self.assertEqual(visualizer.instance_name, 'test_build_visualizer2')

        # input is a dict but does not contain name key
        runner._experiment_name = 'test_build_visualizer3'
        visualizer_cfg = None
        visualizer = runner.build_visualizer(visualizer_cfg)
        self.assertIsInstance(visualizer, Visualizer)
        self.assertEqual(visualizer.instance_name, 'test_build_visualizer3')

        # input is not a valid type
        with self.assertRaisesRegex(TypeError, 'visualizer should be'):
            runner.build_visualizer('invalid-type')
RangiLyu's avatar
RangiLyu committed

    def test_default_scope(self):
        TOY_SCHEDULERS = Registry(
            'parameter scheduler', parent=PARAM_SCHEDULERS, scope='toy')

        @TOY_SCHEDULERS.register_module()
        class ToyScheduler(MultiStepLR):

            def __init__(self, *args, **kwargs):
                super().__init__(*args, **kwargs)

        self.epoch_based_cfg.param_scheduler = dict(
RangiLyu's avatar
RangiLyu committed
            type='ToyScheduler', milestones=[1, 2])
        self.epoch_based_cfg.default_scope = 'toy'
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_default_scope'
        runner = Runner.from_cfg(cfg)
RangiLyu's avatar
RangiLyu committed
        runner.train()
        self.assertIsInstance(runner.param_schedulers[0], ToyScheduler)
    def test_build_model(self):
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_build_model'
        runner = Runner.from_cfg(cfg)
        self.assertIsInstance(runner.model, ToyModel)
        self.assertIsInstance(runner.model.data_preprocessor,
                              BaseDataPreprocessor)

        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_data_preprocessor'
        cfg.data_preprocessor = dict(type='ImgDataPreprocessor')
        runner = Runner.from_cfg(cfg)
        # data_preprocessor is passed to used if no `data_preprocessor`
        # in model config.
        self.assertIsInstance(runner.model.data_preprocessor,
                              ImgDataPreprocessor)
        # input should be a nn.Module object or dict
        with self.assertRaisesRegex(TypeError, 'model should be'):
            runner.build_model('invalid-type')
        # input is a nn.Module object
        _model = ToyModel1()
        model = runner.build_model(_model)
        self.assertEqual(id(model), id(_model))
        # input is a dict
        model = runner.build_model(dict(type='ToyModel1'))
        self.assertIsInstance(model, ToyModel1)
    def test_wrap_model(self):
        # revert sync batchnorm
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_revert_syncbn'
        cfg.model = dict(type='ToySyncBNModel')
        runner = Runner.from_cfg(cfg)
        self.assertIsInstance(runner.model, BaseModel)
        assert not isinstance(runner.model.bn, nn.SyncBatchNorm)

        # custom model wrapper
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_wrap_model'
        cfg.model_wrapper_cfg = dict(type='CustomModelWrapper')
        runner = Runner.from_cfg(cfg)
        self.assertIsInstance(runner.model, BaseModel)
        if torch.cuda.is_available():
            os.environ['MASTER_ADDR'] = '127.0.0.1'
            os.environ['MASTER_PORT'] = '29515'
            os.environ['RANK'] = str(0)
            os.environ['WORLD_SIZE'] = str(1)
            cfg.launcher = 'pytorch'
            cfg.experiment_name = 'test_wrap_model1'
            runner = Runner.from_cfg(cfg)
            self.assertIsInstance(runner.model, CustomModelWrapper)
    def test_scale_lr(self):
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_scale_lr'
        runner = Runner.from_cfg(cfg)

        # When no base_batch_size in auto_scale_lr, an
        # assertion error will raise.
        auto_scale_lr = dict(enable=True)
        optim_wrapper = OptimWrapper(SGD(runner.model.parameters(), lr=0.01))
        with self.assertRaises(AssertionError):
            runner.scale_lr(optim_wrapper, auto_scale_lr)
        # When auto_scale_lr is None or enable is False, the lr will
        # not be linearly scaled.
        auto_scale_lr = dict(base_batch_size=16, enable=False)
        optim_wrapper = OptimWrapper(SGD(runner.model.parameters(), lr=0.01))
        runner.scale_lr(optim_wrapper)
        self.assertEqual(optim_wrapper.optimizer.param_groups[0]['lr'], 0.01)
        runner.scale_lr(optim_wrapper, auto_scale_lr)
        self.assertEqual(optim_wrapper.optimizer.param_groups[0]['lr'], 0.01)

        # When auto_scale_lr is correct and enable is True, the lr will
        # be linearly scaled.
        auto_scale_lr = dict(base_batch_size=16, enable=True)
        real_bs = runner.world_size * cfg.train_dataloader['batch_size']
        optim_wrapper = OptimWrapper(SGD(runner.model.parameters(), lr=0.01))
        runner.scale_lr(optim_wrapper, auto_scale_lr)
        self.assertEqual(optim_wrapper.optimizer.param_groups[0]['lr'],
                         0.01 * (real_bs / 16))

        # Test when optim_wrapper is an OptimWrapperDict
        optim_wrapper = OptimWrapper(SGD(runner.model.parameters(), lr=0.01))
        wrapper_dict = OptimWrapperDict(wrapper=optim_wrapper)
        runner.scale_lr(wrapper_dict, auto_scale_lr)
        scaled_lr = wrapper_dict['wrapper'].optimizer.param_groups[0]['lr']
        self.assertEqual(scaled_lr, 0.01 * (real_bs / 16))

    def test_build_optim_wrapper(self):
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_build_optim_wrapper'
        runner = Runner.from_cfg(cfg)

        # input should be an Optimizer object or dict
        with self.assertRaisesRegex(TypeError, 'optimizer wrapper should be'):
            runner.build_optim_wrapper('invalid-type')
        # 1. test one optimizer
        # 1.1 input is an Optimizer object
        optimizer = SGD(runner.model.parameters(), lr=0.01)
        optim_wrapper = OptimWrapper(optimizer)
        optim_wrapper = runner.build_optim_wrapper(optim_wrapper)
        self.assertEqual(id(optimizer), id(optim_wrapper.optimizer))
        # 1.2 input is a dict
        optim_wrapper = runner.build_optim_wrapper(
            dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01)))
        self.assertIsInstance(optim_wrapper, OptimWrapper)
        # 1.3 use default OptimWrapper type.
        optim_wrapper = runner.build_optim_wrapper(
            dict(optimizer=dict(type='SGD', lr=0.01)))
        self.assertIsInstance(optim_wrapper, OptimWrapper)

        # 2. test multiple optmizers
        # 2.1 input is a dict which contains multiple optimizer objects
        optimizer1 = SGD(runner.model.linear1.parameters(), lr=0.01)
        optim_wrapper1 = OptimWrapper(optimizer1)
        optimizer2 = Adam(runner.model.linear2.parameters(), lr=0.02)
        optim_wrapper2 = OptimWrapper(optimizer2)
        optim_wrapper_cfg = dict(key1=optim_wrapper1, key2=optim_wrapper2)
        optim_wrapper = runner.build_optim_wrapper(optim_wrapper_cfg)
        self.assertIsInstance(optim_wrapper, OptimWrapperDict)
        self.assertIsInstance(optim_wrapper['key1'].optimizer, SGD)
        self.assertIsInstance(optim_wrapper['key2'].optimizer, Adam)

        # 2.2 each item mush be an optimizer object when "type" and
        # "constructor" are not in optimizer
        optimizer1 = SGD(runner.model.linear1.parameters(), lr=0.01)
        optim_wrapper1 = OptimWrapper(optimizer1)
        optim_wrapper2 = dict(
            type='OptimWrapper', optimizer=dict(type='Adam', lr=0.01))
        optim_cfg = dict(key1=optim_wrapper1, key2=optim_wrapper2)
        with self.assertRaisesRegex(ValueError,
                                    'each item mush be an optimizer object'):
            runner.build_optim_wrapper(optim_cfg)

        # 2.3 input is a dict which contains multiple configs
        optim_wrapper_cfg = dict(
            linear1=dict(
                type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01)),
            linear2=dict(
                type='OptimWrapper', optimizer=dict(type='Adam', lr=0.02)),
            constructor='ToyMultipleOptimizerConstructor')
        optim_wrapper = runner.build_optim_wrapper(optim_wrapper_cfg)
        self.assertIsInstance(optim_wrapper, OptimWrapperDict)
        self.assertIsInstance(optim_wrapper['linear1'].optimizer, SGD)
        self.assertIsInstance(optim_wrapper['linear2'].optimizer, Adam)
    def test_build_param_scheduler(self):
        cfg = copy.deepcopy(self.epoch_based_cfg)
        cfg.experiment_name = 'test_build_param_scheduler'
        runner = Runner.from_cfg(cfg)
        # `build_optim_wrapper` should be called before
        # `build_param_scheduler`
        cfg = dict(type='MultiStepLR', milestones=[1, 2])
        runner.optim_wrapper = dict(
            key1=dict(
                type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01)),
            key2=dict(
                type='OptimWrapper', optimizer=dict(type='Adam', lr=0.02)),
        with self.assertRaisesRegex(AssertionError, 'should be called before'):
            runner.build_param_scheduler(cfg)

        runner.optim_wrapper = runner.build_optim_wrapper(
            dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01)))
        param_schedulers = runner.build_param_scheduler(cfg)
        self.assertIsInstance(param_schedulers, list)
        self.assertEqual(len(param_schedulers), 1)
        self.assertIsInstance(param_schedulers[0], MultiStepLR)
        # 1. test one optimizer and one parameter scheduler
        # 1.1 input is a ParamScheduler object
        param_scheduler = MultiStepLR(runner.optim_wrapper, milestones=[1, 2])
        param_schedulers = runner.build_param_scheduler(param_scheduler)
        self.assertEqual(len(param_schedulers), 1)
        self.assertEqual(id(param_schedulers[0]), id(param_scheduler))

        # 1.2 input is a dict
        param_schedulers = runner.build_param_scheduler(param_scheduler)
        self.assertEqual(len(param_schedulers), 1)
        self.assertIsInstance(param_schedulers[0], MultiStepLR)

        # 2. test one optimizer and list of parameter schedulers
        # 2.1 input is a list of dict
        cfg = [
            dict(type='MultiStepLR', milestones=[1, 2]),
            dict(type='StepLR', step_size=1)
        ]
        param_schedulers = runner.build_param_scheduler(cfg)
        self.assertEqual(len(param_schedulers), 2)
        self.assertIsInstance(param_schedulers[0], MultiStepLR)
        self.assertIsInstance(param_schedulers[1], StepLR)

        # 2.2 input is a list and some items are ParamScheduler objects
        cfg = [param_scheduler, dict(type='StepLR', step_size=1)]
        param_schedulers = runner.build_param_scheduler(cfg)
        self.assertEqual(len(param_schedulers), 2)
        self.assertIsInstance(param_schedulers[0], MultiStepLR)
        self.assertIsInstance(param_schedulers[1], StepLR)

        # 3. test multiple optimizers and list of parameter schedulers
        optimizer1 = SGD(runner.model.linear1.parameters(), lr=0.01)
        optim_wrapper1 = OptimWrapper(optimizer1)
        optimizer2 = Adam(runner.model.linear2.parameters(), lr=0.02)
        optim_wrapper2 = OptimWrapper(optimizer2)
        optim_wrapper_cfg = dict(key1=optim_wrapper1, key2=optim_wrapper2)
        runner.optim_wrapper = runner.build_optim_wrapper(optim_wrapper_cfg)
        cfg = [
            dict(type='MultiStepLR', milestones=[1, 2]),
            dict(type='StepLR', step_size=1)
        ]
        param_schedulers = runner.build_param_scheduler(cfg)
        print(param_schedulers)
        self.assertIsInstance(param_schedulers, dict)
        self.assertEqual(len(param_schedulers), 2)
        self.assertEqual(len(param_schedulers['key1']), 2)
        self.assertEqual(len(param_schedulers['key2']), 2)