Tianshou/tianshou/utils/lr_scheduler.py
Michael Panchenko 600f4bbd55
Python 3.9, black + ruff formatting (#921)
Preparation for #914 and #920

Changes formatting to ruff and black. Remove python 3.8

## Additional Changes

- Removed flake8 dependencies
- Adjusted pre-commit. Now CI and Make use pre-commit, reducing the
duplication of linting calls
- Removed check-docstyle option (ruff is doing that)
- Merged format and lint. In CI the format-lint step fails if any
changes are done, so it fulfills the lint functionality.

---------

Co-authored-by: Jiayi Weng <jiayi@openai.com>
2023-08-25 14:40:56 -07:00

41 lines
1.3 KiB
Python

import torch
class MultipleLRSchedulers:
"""A wrapper for multiple learning rate schedulers.
Every time :meth:`~tianshou.utils.MultipleLRSchedulers.step` is called,
it calls the step() method of each of the schedulers that it contains.
Example usage:
::
scheduler1 = ConstantLR(opt1, factor=0.1, total_iters=2)
scheduler2 = ExponentialLR(opt2, gamma=0.9)
scheduler = MultipleLRSchedulers(scheduler1, scheduler2)
policy = PPOPolicy(..., lr_scheduler=scheduler)
"""
def __init__(self, *args: torch.optim.lr_scheduler.LambdaLR):
self.schedulers = args
def step(self) -> None:
"""Take a step in each of the learning rate schedulers."""
for scheduler in self.schedulers:
scheduler.step()
def state_dict(self) -> list[dict]:
"""Get state_dict for each of the learning rate schedulers.
:return: A list of state_dict of learning rate schedulers.
"""
return [s.state_dict() for s in self.schedulers]
def load_state_dict(self, state_dict: list[dict]) -> None:
"""Load states from state_dict.
:param List[Dict] state_dict: A list of learning rate scheduler
state_dict, in the same order as the schedulers.
"""
for s, sd in zip(self.schedulers, state_dict):
s.__dict__.update(sd)