Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
I
ICV-mmengine_basecode
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Florian Schiffel
ICV-mmengine_basecode
Commits
7a5d3c83
Unverified
Commit
7a5d3c83
authored
2 years ago
by
jbwang1997
Committed by
GitHub
2 years ago
Browse files
Options
Downloads
Patches
Plain Diff
[Fix] Replace auto_scale_lr_cfg to auto_scale_lr (#286)
* Replace auto_scale_lr_cfg to auto_scale_lr * Update
parent
931db990
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
mmengine/runner/runner.py
+14
-15
14 additions, 15 deletions
mmengine/runner/runner.py
tests/test_runner/test_runner.py
+12
-12
12 additions, 12 deletions
tests/test_runner/test_runner.py
with
26 additions
and
27 deletions
mmengine/runner/runner.py
+
14
−
15
View file @
7a5d3c83
...
...
@@ -102,7 +102,7 @@ class Runner:
If ``test_cfg`` specified, :attr:`test_dataloader` should also be
specified. Defaults to None.
See :meth:`build_test_loop` for more details.
auto_scale_lr
_cfg
(dict, Optional): Config to scale the learning rate
auto_scale_lr (dict, Optional): Config to scale the learning rate
automatically. It includes ``base_batch_size`` and ``enable``.
``base_batch_size`` is the batch size that the optimizer lr is
based on. ``enable`` is the switch to turn on and off the feature.
...
...
@@ -189,7 +189,7 @@ class Runner:
>>>
sampler
=
dict
(
type
=
'
DefaultSampler
'
,
shuffle
=
False
),
>>>
batch_size
=
1
,
>>>
num_workers
=
0
),
>>>
auto_scale_lr
_cfg
=
dict
(
base_batch_size
=
16
,
enable
=
False
),
>>>
auto_scale_lr
=
dict
(
base_batch_size
=
16
,
enable
=
False
),
>>>
optim_wrapper
=
dict
(
type
=
'
OptimizerWrapper
'
,
optimizer
=
dict
(
>>>
type
=
'
SGD
'
,
lr
=
0.01
)),
>>>
param_scheduler
=
dict
(
type
=
'
MultiStepLR
'
,
milestones
=
[
1
,
2
]),
...
...
@@ -231,7 +231,7 @@ class Runner:
train_cfg
:
Optional
[
Dict
]
=
None
,
val_cfg
:
Optional
[
Dict
]
=
None
,
test_cfg
:
Optional
[
Dict
]
=
None
,
auto_scale_lr
_cfg
:
Optional
[
Dict
]
=
None
,
auto_scale_lr
:
Optional
[
Dict
]
=
None
,
optim_wrapper
:
Optional
[
Union
[
OptimWrapper
,
Dict
]]
=
None
,
param_scheduler
:
Optional
[
Union
[
_ParamScheduler
,
Dict
,
List
]]
=
None
,
val_evaluator
:
Optional
[
Union
[
Evaluator
,
Dict
,
List
]]
=
None
,
...
...
@@ -279,7 +279,7 @@ class Runner:
self
.
optim_wrapper
:
Optional
[
Union
[
OptimWrapper
,
dict
]]
self
.
optim_wrapper
=
optim_wrapper
self
.
auto_scale_lr
_cfg
=
auto_scale_lr
_cfg
self
.
auto_scale_lr
=
auto_scale_lr
# If there is no need to adjust learning rate, momentum or other
# parameters of optimizer, param_scheduler can be None
...
...
@@ -420,7 +420,7 @@ class Runner:
train_cfg
=
cfg
.
get
(
'
train_cfg
'
),
val_cfg
=
cfg
.
get
(
'
val_cfg
'
),
test_cfg
=
cfg
.
get
(
'
test_cfg
'
),
auto_scale_lr
_cfg
=
cfg
.
get
(
'
auto_scale_lr
_cfg
'
),
auto_scale_lr
=
cfg
.
get
(
'
auto_scale_lr
'
),
optim_wrapper
=
cfg
.
get
(
'
optim_wrapper
'
),
param_scheduler
=
cfg
.
get
(
'
param_scheduler
'
),
val_evaluator
=
cfg
.
get
(
'
val_evaluator
'
),
...
...
@@ -830,7 +830,7 @@ class Runner:
def
scale_lr
(
self
,
optim_wrapper
:
OptimWrapper
,
auto_scale_lr
_cfg
:
Optional
[
Dict
]
=
None
)
->
None
:
auto_scale_lr
:
Optional
[
Dict
]
=
None
)
->
None
:
"""
Automatically scaling learning rate in training according to the
ratio of ``base_batch_size`` in ``autoscalelr_cfg`` and real batch
size.
...
...
@@ -845,23 +845,22 @@ class Runner:
Args:
optim_wrapper (OptimWrapper): An OptimWrapper object whose
parameter groups
'
learning rate need to be scaled.
auto_scale_lr
_cfg
(Dict, Optional): Config to scale the learning
auto_scale_lr (Dict, Optional): Config to scale the learning
rate automatically. It includes ``base_batch_size`` and
``enable``. ``base_batch_size`` is the batch size that the
optimizer lr is based on. ``enable`` is the switch to turn on
and off the feature.
"""
if
(
auto_scale_lr_cfg
is
None
or
not
auto_scale_lr_cfg
.
get
(
'
enable
'
,
False
)):
if
(
auto_scale_lr
is
None
or
not
auto_scale_lr
.
get
(
'
enable
'
,
False
)):
return
None
assert
'
base_batch_size
'
in
auto_scale_lr
_cfg
,
\
'
Lack of `base_batch_size` in `auto_scale_lr
_cfg
`.
'
assert
'
base_batch_size
'
in
auto_scale_lr
,
\
'
Lack of `base_batch_size` in `auto_scale_lr`.
'
dataloader
:
Union
[
DataLoader
,
Dict
]
=
self
.
_train_dataloader
bs
=
dataloader
.
batch_size
if
isinstance
(
dataloader
,
DataLoader
)
else
dataloader
[
'
batch_size
'
]
real_bs
=
self
.
world_size
*
bs
base_bs
=
auto_scale_lr
_cfg
[
'
base_batch_size
'
]
base_bs
=
auto_scale_lr
[
'
base_batch_size
'
]
ratio
=
float
(
real_bs
)
/
float
(
base_bs
)
self
.
logger
.
info
(
f
'
LR is set based on batch size of
{
base_bs
}
'
f
'
and the current batch size is
{
real_bs
}
.
'
...
...
@@ -1521,7 +1520,7 @@ class Runner:
self
.
optim_wrapper
=
self
.
build_optim_wrapper
(
self
.
optim_wrapper
)
# Automatically scaling lr by linear scaling rule
self
.
scale_lr
(
self
.
optim_wrapper
,
self
.
auto_scale_lr
_cfg
)
self
.
scale_lr
(
self
.
optim_wrapper
,
self
.
auto_scale_lr
)
if
self
.
param_schedulers
:
self
.
param_schedulers
=
self
.
build_param_scheduler
(
# type: ignore
...
...
@@ -1797,8 +1796,8 @@ class Runner:
self
.
logger
.
info
(
'
Number of GPU used for current experiment is not
'
'
consistent with resuming from checkpoint
'
)
if
(
self
.
auto_scale_lr
_cfg
is
None
or
not
self
.
auto_scale_lr
_cfg
.
get
(
'
enable
'
,
False
)):
if
(
self
.
auto_scale_lr
is
None
or
not
self
.
auto_scale_lr
.
get
(
'
enable
'
,
False
)):
raise
RuntimeError
(
'
Cannot automatically rescale lr in resuming. Please
'
'
make sure the number of GPU is consistent with the
'
...
...
This diff is collapsed.
Click to expand it.
tests/test_runner/test_runner.py
+
12
−
12
View file @
7a5d3c83
...
...
@@ -260,7 +260,7 @@ class CustomRunner(Runner):
train_cfg
=
None
,
val_cfg
=
None
,
test_cfg
=
None
,
auto_scale_lr
_cfg
=
None
,
auto_scale_lr
=
None
,
optim_wrapper
=
None
,
param_scheduler
=
None
,
val_evaluator
=
None
,
...
...
@@ -310,7 +310,7 @@ class TestRunner(TestCase):
sampler
=
dict
(
type
=
'
DefaultSampler
'
,
shuffle
=
False
),
batch_size
=
3
,
num_workers
=
0
),
auto_scale_lr
_cfg
=
dict
(
base_batch_size
=
16
,
enable
=
False
),
auto_scale_lr
=
dict
(
base_batch_size
=
16
,
enable
=
False
),
optim_wrapper
=
dict
(
type
=
'
OptimWrapper
'
,
optimizer
=
dict
(
type
=
'
SGD
'
,
lr
=
0.01
)),
param_scheduler
=
dict
(
type
=
'
MultiStepLR
'
,
milestones
=
[
1
,
2
]),
...
...
@@ -697,35 +697,35 @@ class TestRunner(TestCase):
cfg
.
experiment_name
=
'
test_scale_lr
'
runner
=
Runner
.
from_cfg
(
cfg
)
# When no base_batch_size in auto_scale_lr
_cfg
, an
# When no base_batch_size in auto_scale_lr, an
# assertion error will raise.
auto_scale_lr
_cfg
=
dict
(
enable
=
True
)
auto_scale_lr
=
dict
(
enable
=
True
)
optim_wrapper
=
OptimWrapper
(
SGD
(
runner
.
model
.
parameters
(),
lr
=
0.01
))
with
self
.
assertRaises
(
AssertionError
):
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
_cfg
)
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
)
# When auto_scale_lr
_cfg
is None or enable is False, the lr will
# When auto_scale_lr is None or enable is False, the lr will
# not be linearly scaled.
auto_scale_lr
_cfg
=
dict
(
base_batch_size
=
16
,
enable
=
False
)
auto_scale_lr
=
dict
(
base_batch_size
=
16
,
enable
=
False
)
optim_wrapper
=
OptimWrapper
(
SGD
(
runner
.
model
.
parameters
(),
lr
=
0.01
))
runner
.
scale_lr
(
optim_wrapper
)
self
.
assertEqual
(
optim_wrapper
.
optimizer
.
param_groups
[
0
][
'
lr
'
],
0.01
)
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
_cfg
)
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
)
self
.
assertEqual
(
optim_wrapper
.
optimizer
.
param_groups
[
0
][
'
lr
'
],
0.01
)
# When auto_scale_lr
_cfg
is correct and enable is True, the lr will
# When auto_scale_lr is correct and enable is True, the lr will
# be linearly scaled.
auto_scale_lr
_cfg
=
dict
(
base_batch_size
=
16
,
enable
=
True
)
auto_scale_lr
=
dict
(
base_batch_size
=
16
,
enable
=
True
)
real_bs
=
runner
.
world_size
*
cfg
.
train_dataloader
[
'
batch_size
'
]
optim_wrapper
=
OptimWrapper
(
SGD
(
runner
.
model
.
parameters
(),
lr
=
0.01
))
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
_cfg
)
runner
.
scale_lr
(
optim_wrapper
,
auto_scale_lr
)
self
.
assertEqual
(
optim_wrapper
.
optimizer
.
param_groups
[
0
][
'
lr
'
],
0.01
*
(
real_bs
/
16
))
# Test when optim_wrapper is an OptimWrapperDict
optim_wrapper
=
OptimWrapper
(
SGD
(
runner
.
model
.
parameters
(),
lr
=
0.01
))
wrapper_dict
=
OptimWrapperDict
(
wrapper
=
optim_wrapper
)
runner
.
scale_lr
(
wrapper_dict
,
auto_scale_lr
_cfg
)
runner
.
scale_lr
(
wrapper_dict
,
auto_scale_lr
)
scaled_lr
=
wrapper_dict
[
'
wrapper
'
].
optimizer
.
param_groups
[
0
][
'
lr
'
]
self
.
assertEqual
(
scaled_lr
,
0.01
*
(
real_bs
/
16
))
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment