Feature/algo eval (#1074)
# Changes ## Dependencies - New extra "eval" ## Api Extension - `Experiment` and `ExperimentConfig` now have a `name`, that can however be overridden when `Experiment.run()` is called - When building an `Experiment` from an `ExperimentConfig`, the user has the option to add info about seeds to the name. - New method in `ExperimentConfig` called `build_default_seeded_experiments` - `SamplingConfig` has an explicit training seed, `test_seed` is inferred. - New `evaluation` package for repeating the same experiment with multiple seeds and aggregating the results (important extension!). Currently in alpha state. - Loggers can now restore the logged data into python by using the new `restore_logged_data` ## Breaking Changes - `AtariEnvFactory` (in examples) now receives explicit train and test seeds - `EnvFactoryRegistered` now requires an explicit `test_seed` - `BaseLogger.prepare_dict_for_logging` is now abstract --------- Co-authored-by: Maximilian Huettenrauch <m.huettenrauch@appliedai.de> Co-authored-by: Michael Panchenko <m.panchenko@appliedai.de> Co-authored-by: Michael Panchenko <35432522+MischaPanch@users.noreply.github.com>
This commit is contained in:
parent
9c0b3e7292
commit
ade85ab32b
2
.github/workflows/lint_and_docs.yml
vendored
2
.github/workflows/lint_and_docs.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
key: venv-${{ hashFiles('poetry.lock') }}
|
||||
- name: Install the project dependencies
|
||||
run: |
|
||||
poetry install --with dev
|
||||
poetry install --with dev --extras "eval"
|
||||
- name: Lint
|
||||
run: poetry run poe lint
|
||||
- name: Types
|
||||
|
2
.github/workflows/pytest.yml
vendored
2
.github/workflows/pytest.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
key: venv-${{ hashFiles('poetry.lock') }}
|
||||
- name: Install the project dependencies
|
||||
run: |
|
||||
poetry install --with dev --extras "envpool"
|
||||
poetry install --with dev --extras "envpool eval"
|
||||
- name: wandb login
|
||||
run: |
|
||||
poetry run wandb login e2366d661b89f2bee877c40bee15502d67b7abef
|
||||
|
@ -261,4 +261,8 @@ BA
|
||||
BH
|
||||
BO
|
||||
BD
|
||||
|
||||
configs
|
||||
postfix
|
||||
backend
|
||||
rliable
|
||||
hl
|
||||
|
@ -66,7 +66,13 @@ def main(
|
||||
replay_buffer_save_only_last_obs=True,
|
||||
)
|
||||
|
||||
env_factory = AtariEnvFactory(task, experiment_config.seed, frames_stack, scale=scale_obs)
|
||||
env_factory = AtariEnvFactory(
|
||||
task,
|
||||
sampling_config.train_seed,
|
||||
sampling_config.test_seed,
|
||||
frames_stack,
|
||||
scale=scale_obs,
|
||||
)
|
||||
|
||||
builder = (
|
||||
DQNExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -98,7 +104,7 @@ def main(
|
||||
)
|
||||
|
||||
experiment = builder.build()
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -65,7 +65,13 @@ def main(
|
||||
replay_buffer_save_only_last_obs=True,
|
||||
)
|
||||
|
||||
env_factory = AtariEnvFactory(task, experiment_config.seed, frames_stack, scale=scale_obs)
|
||||
env_factory = AtariEnvFactory(
|
||||
task,
|
||||
sampling_config.train_seed,
|
||||
sampling_config.test_seed,
|
||||
frames_stack,
|
||||
scale=scale_obs,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
IQNExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -90,7 +96,7 @@ def main(
|
||||
.with_epoch_stop_callback(AtariEpochStopCallback(task))
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -70,7 +70,13 @@ def main(
|
||||
replay_buffer_save_only_last_obs=True,
|
||||
)
|
||||
|
||||
env_factory = AtariEnvFactory(task, experiment_config.seed, frames_stack)
|
||||
env_factory = AtariEnvFactory(
|
||||
task,
|
||||
sampling_config.train_seed,
|
||||
sampling_config.test_seed,
|
||||
frames_stack,
|
||||
scale=scale_obs,
|
||||
)
|
||||
|
||||
builder = (
|
||||
PPOExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -109,7 +115,7 @@ def main(
|
||||
),
|
||||
)
|
||||
experiment = builder.build()
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -66,7 +66,13 @@ def main(
|
||||
replay_buffer_save_only_last_obs=True,
|
||||
)
|
||||
|
||||
env_factory = AtariEnvFactory(task, experiment_config.seed, frames_stack, scale=scale_obs)
|
||||
env_factory = AtariEnvFactory(
|
||||
task,
|
||||
sampling_config.train_seed,
|
||||
sampling_config.test_seed,
|
||||
frames_stack,
|
||||
scale=scale_obs,
|
||||
)
|
||||
|
||||
builder = (
|
||||
DiscreteSACExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -97,7 +103,7 @@ def main(
|
||||
),
|
||||
)
|
||||
experiment = builder.build()
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -383,7 +383,7 @@ def make_atari_env(
|
||||
|
||||
:return: a tuple of (single env, training envs, test envs).
|
||||
"""
|
||||
env_factory = AtariEnvFactory(task, seed, frame_stack, scale=bool(scale))
|
||||
env_factory = AtariEnvFactory(task, seed, seed + training_num, frame_stack, scale=bool(scale))
|
||||
envs = env_factory.create_envs(training_num, test_num)
|
||||
return envs.env, envs.train_envs, envs.test_envs
|
||||
|
||||
@ -392,7 +392,8 @@ class AtariEnvFactory(EnvFactoryRegistered):
|
||||
def __init__(
|
||||
self,
|
||||
task: str,
|
||||
seed: int,
|
||||
train_seed: int,
|
||||
test_seed: int,
|
||||
frame_stack: int,
|
||||
scale: bool = False,
|
||||
use_envpool_if_available: bool = True,
|
||||
@ -409,7 +410,8 @@ class AtariEnvFactory(EnvFactoryRegistered):
|
||||
log.info("Not using envpool, because it is not available")
|
||||
super().__init__(
|
||||
task=task,
|
||||
seed=seed,
|
||||
train_seed=train_seed,
|
||||
test_seed=test_seed,
|
||||
venv_type=VectorEnvType.SUBPROC_SHARED_MEM,
|
||||
envpool_factory=envpool_factory,
|
||||
)
|
||||
|
@ -16,7 +16,13 @@ from tianshou.utils.logging import run_main
|
||||
def main() -> None:
|
||||
experiment = (
|
||||
DQNExperimentBuilder(
|
||||
EnvFactoryRegistered(task="CartPole-v1", seed=0, venv_type=VectorEnvType.DUMMY),
|
||||
EnvFactoryRegistered(
|
||||
task="CartPole-v1",
|
||||
seed=0,
|
||||
venv_type=VectorEnvType.DUMMY,
|
||||
train_seed=0,
|
||||
test_seed=10,
|
||||
),
|
||||
ExperimentConfig(
|
||||
persistence_enabled=False,
|
||||
watch=True,
|
||||
|
@ -54,7 +54,12 @@ def main(
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=True)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
A2CExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -78,7 +83,7 @@ def main(
|
||||
.with_critic_factory_default(hidden_sizes, nn.Tanh)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -51,7 +51,12 @@ def main(
|
||||
start_timesteps_random=True,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=False)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=False,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
DDPGExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -69,7 +74,7 @@ def main(
|
||||
.with_critic_factory_default(hidden_sizes)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -37,7 +37,7 @@ def make_mujoco_env(
|
||||
|
||||
:return: a tuple of (single env, training envs, test envs).
|
||||
"""
|
||||
envs = MujocoEnvFactory(task, seed, obs_norm=obs_norm).create_envs(
|
||||
envs = MujocoEnvFactory(task, seed, seed + num_train_envs, obs_norm=obs_norm).create_envs(
|
||||
num_train_envs,
|
||||
num_test_envs,
|
||||
)
|
||||
@ -73,13 +73,15 @@ class MujocoEnvFactory(EnvFactoryRegistered):
|
||||
def __init__(
|
||||
self,
|
||||
task: str,
|
||||
seed: int,
|
||||
train_seed: int,
|
||||
test_seed: int,
|
||||
obs_norm: bool = True,
|
||||
venv_type: VectorEnvType = VectorEnvType.SUBPROC_SHARED_MEM,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
task=task,
|
||||
seed=seed,
|
||||
train_seed=train_seed,
|
||||
test_seed=test_seed,
|
||||
venv_type=venv_type,
|
||||
envpool_factory=EnvPoolFactory() if envpool_is_available else None,
|
||||
)
|
||||
|
@ -56,7 +56,12 @@ def main(
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=True)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
NPGExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -80,7 +85,7 @@ def main(
|
||||
.with_critic_factory_default(hidden_sizes, torch.nn.Tanh)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -61,7 +61,12 @@ def main(
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=True)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
PPOExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -90,7 +95,7 @@ def main(
|
||||
.with_critic_factory_default(hidden_sizes, torch.nn.Tanh)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
184
examples/mujoco/mujoco_ppo_hl_multi.py
Normal file
184
examples/mujoco/mujoco_ppo_hl_multi.py
Normal file
@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
"""The high-level multi experiment script demonstrates how to use the high-level API of TianShou to train
|
||||
a single configuration of an experiment (here a PPO agent on mujoco) with multiple non-intersecting seeds.
|
||||
Thus, the experiment will be repeated `num_experiments` times.
|
||||
For each repetition, a policy seed, train env seeds, and test env seeds are set that
|
||||
are non-intersecting with the seeds of the other experiments.
|
||||
Each experiment's results are stored in a separate subdirectory.
|
||||
|
||||
The final results are aggregated and turned into useful statistics with the rliable API.
|
||||
The call to `eval_experiments` will load the results from the log directory and
|
||||
create an interp-quantile mean plot for the returns as well as a performance profile plot.
|
||||
These plots are saved in the log directory and displayed in the console.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from collections.abc import Sequence
|
||||
from typing import Literal
|
||||
|
||||
import torch
|
||||
|
||||
from examples.mujoco.mujoco_env import MujocoEnvFactory
|
||||
from tianshou.evaluation.launcher import RegisteredExpLauncher
|
||||
from tianshou.evaluation.rliable_evaluation_hl import RLiableExperimentResult
|
||||
from tianshou.highlevel.config import SamplingConfig
|
||||
from tianshou.highlevel.env import VectorEnvType
|
||||
from tianshou.highlevel.experiment import (
|
||||
ExperimentConfig,
|
||||
PPOExperimentBuilder,
|
||||
)
|
||||
from tianshou.highlevel.logger import LoggerFactoryDefault
|
||||
from tianshou.highlevel.params.dist_fn import (
|
||||
DistributionFunctionFactoryIndependentGaussians,
|
||||
)
|
||||
from tianshou.highlevel.params.lr_scheduler import LRSchedulerFactoryLinear
|
||||
from tianshou.highlevel.params.policy_params import PPOParams
|
||||
from tianshou.utils import logging
|
||||
from tianshou.utils.logging import datetime_tag
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main(
|
||||
experiment_config: ExperimentConfig,
|
||||
task: str = "Ant-v4",
|
||||
num_experiments: int = 5,
|
||||
buffer_size: int = 4096,
|
||||
hidden_sizes: Sequence[int] = (64, 64),
|
||||
lr: float = 3e-4,
|
||||
gamma: float = 0.99,
|
||||
epoch: int = 3,
|
||||
step_per_epoch: int = 30000,
|
||||
step_per_collect: int = 2048,
|
||||
repeat_per_collect: int = 10,
|
||||
batch_size: int = 64,
|
||||
training_num: int = 10,
|
||||
test_num: int = 10,
|
||||
rew_norm: bool = True,
|
||||
vf_coef: float = 0.25,
|
||||
ent_coef: float = 0.0,
|
||||
gae_lambda: float = 0.95,
|
||||
bound_action_method: Literal["clip", "tanh"] | None = "clip",
|
||||
lr_decay: bool = True,
|
||||
max_grad_norm: float = 0.5,
|
||||
eps_clip: float = 0.2,
|
||||
dual_clip: float | None = None,
|
||||
value_clip: bool = False,
|
||||
norm_adv: bool = False,
|
||||
recompute_adv: bool = True,
|
||||
run_experiments_sequentially: bool = True,
|
||||
) -> str:
|
||||
"""Use the high-level API of TianShou to evaluate the PPO algorithm on a MuJoCo environment with multiple seeds for
|
||||
a given configuration. The results for each run are stored in separate sub-folders. After the agents are trained,
|
||||
the results are evaluated using the rliable API.
|
||||
|
||||
:param experiment_config:
|
||||
:param task: a mujoco task name
|
||||
:param num_experiments: how many experiments to run with different seeds
|
||||
:param buffer_size:
|
||||
:param hidden_sizes:
|
||||
:param lr:
|
||||
:param gamma:
|
||||
:param epoch:
|
||||
:param step_per_epoch:
|
||||
:param step_per_collect:
|
||||
:param repeat_per_collect:
|
||||
:param batch_size:
|
||||
:param training_num:
|
||||
:param test_num:
|
||||
:param rew_norm:
|
||||
:param vf_coef:
|
||||
:param ent_coef:
|
||||
:param gae_lambda:
|
||||
:param bound_action_method:
|
||||
:param lr_decay:
|
||||
:param max_grad_norm:
|
||||
:param eps_clip:
|
||||
:param dual_clip:
|
||||
:param value_clip:
|
||||
:param norm_adv:
|
||||
:param recompute_adv:
|
||||
:param run_experiments_sequentially: if True, the experiments are run sequentially, otherwise in parallel.
|
||||
LIMITATIONS: currently, the parallel execution does not seem to work properly on linux.
|
||||
It might generally be undesired to run multiple experiments in parallel on the same machine,
|
||||
as a single experiment already uses all available CPU cores by default.
|
||||
:return: the directory where the results are stored
|
||||
"""
|
||||
persistence_dir = os.path.abspath(os.path.join("log", task, "ppo", datetime_tag()))
|
||||
|
||||
experiment_config.persistence_base_dir = persistence_dir
|
||||
log.info(f"Will save all experiment results to {persistence_dir}.")
|
||||
experiment_config.watch = False
|
||||
|
||||
sampling_config = SamplingConfig(
|
||||
num_epochs=epoch,
|
||||
step_per_epoch=step_per_epoch,
|
||||
batch_size=batch_size,
|
||||
num_train_envs=training_num,
|
||||
num_test_envs=test_num,
|
||||
num_test_episodes=test_num,
|
||||
buffer_size=buffer_size,
|
||||
step_per_collect=step_per_collect,
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
venv_type=VectorEnvType.SUBPROC_SHARED_MEM_FORK_CONTEXT
|
||||
if sys.platform == "darwin"
|
||||
else VectorEnvType.SUBPROC_SHARED_MEM,
|
||||
)
|
||||
|
||||
experiments = (
|
||||
PPOExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
.with_ppo_params(
|
||||
PPOParams(
|
||||
discount_factor=gamma,
|
||||
gae_lambda=gae_lambda,
|
||||
action_bound_method=bound_action_method,
|
||||
reward_normalization=rew_norm,
|
||||
ent_coef=ent_coef,
|
||||
vf_coef=vf_coef,
|
||||
max_grad_norm=max_grad_norm,
|
||||
value_clip=value_clip,
|
||||
advantage_normalization=norm_adv,
|
||||
eps_clip=eps_clip,
|
||||
dual_clip=dual_clip,
|
||||
recompute_advantage=recompute_adv,
|
||||
lr=lr,
|
||||
lr_scheduler_factory=LRSchedulerFactoryLinear(sampling_config)
|
||||
if lr_decay
|
||||
else None,
|
||||
dist_fn=DistributionFunctionFactoryIndependentGaussians(),
|
||||
),
|
||||
)
|
||||
.with_actor_factory_default(hidden_sizes, torch.nn.Tanh, continuous_unbounded=True)
|
||||
.with_critic_factory_default(hidden_sizes, torch.nn.Tanh)
|
||||
.with_logger_factory(LoggerFactoryDefault("tensorboard"))
|
||||
.build_default_seeded_experiments(num_experiments)
|
||||
)
|
||||
|
||||
if run_experiments_sequentially:
|
||||
launcher = RegisteredExpLauncher.sequential.create_launcher()
|
||||
else:
|
||||
launcher = RegisteredExpLauncher.joblib.create_launcher()
|
||||
launcher.launch(experiments)
|
||||
|
||||
return persistence_dir
|
||||
|
||||
|
||||
def eval_experiments(log_dir: str) -> RLiableExperimentResult:
|
||||
"""Evaluate the experiments in the given log directory using the rliable API."""
|
||||
rliable_result = RLiableExperimentResult.load_from_disk(log_dir)
|
||||
rliable_result.eval_results(show_plots=True, save_plots=True)
|
||||
return rliable_result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
log_dir = logging.run_cli(main, level=logging.INFO)
|
||||
assert isinstance(log_dir, str) # for mypy
|
||||
evaluation_result = eval_experiments(log_dir)
|
@ -57,7 +57,12 @@ def main(
|
||||
start_timesteps_random=True,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=False)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=False,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
REDQExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -78,7 +83,7 @@ def main(
|
||||
.with_critic_ensemble_factory_default(hidden_sizes)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -49,7 +49,12 @@ def main(
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=True)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
PGExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -67,7 +72,7 @@ def main(
|
||||
.with_actor_factory_default(hidden_sizes, torch.nn.Tanh, continuous_unbounded=True)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -52,7 +52,12 @@ def main(
|
||||
start_timesteps_random=True,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=False)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=False,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
SACExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -75,7 +80,7 @@ def main(
|
||||
.with_common_critic_factory_default(hidden_sizes)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -58,7 +58,12 @@ def main(
|
||||
start_timesteps_random=True,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=False)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=False,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
TD3ExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -80,7 +85,7 @@ def main(
|
||||
.with_common_critic_factory_default(hidden_sizes, torch.nn.Tanh)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -58,7 +58,12 @@ def main(
|
||||
repeat_per_collect=repeat_per_collect,
|
||||
)
|
||||
|
||||
env_factory = MujocoEnvFactory(task, experiment_config.seed, obs_norm=True)
|
||||
env_factory = MujocoEnvFactory(
|
||||
task,
|
||||
train_seed=sampling_config.train_seed,
|
||||
test_seed=sampling_config.test_seed,
|
||||
obs_norm=True,
|
||||
)
|
||||
|
||||
experiment = (
|
||||
TRPOExperimentBuilder(env_factory, experiment_config, sampling_config)
|
||||
@ -84,7 +89,7 @@ def main(
|
||||
.with_critic_factory_default(hidden_sizes, torch.nn.Tanh)
|
||||
.build()
|
||||
)
|
||||
experiment.run(log_name)
|
||||
experiment.run(override_experiment_name=log_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
690
poetry.lock
generated
690
poetry.lock
generated
@ -152,6 +152,45 @@ files = [
|
||||
{file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arch"
|
||||
version = "5.3.1"
|
||||
description = "ARCH for Python"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "arch-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:75fa6f9386ecc2df81bcbf5d055a290a697482ca51e0b3459dab183d288993cb"},
|
||||
{file = "arch-5.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f9c9220d331618322517e0f2b3b3529f9c51f5e5a891441da4a107fd2d6d7fce"},
|
||||
{file = "arch-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c503acacf88786a78c0ea6606e292c7bfa66e42603c72b7d9fe8dca021a9ddf"},
|
||||
{file = "arch-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dbbae9bc19aa38492a1b5968d855e7f69f18e626bfba3dd42e43182ea7907d"},
|
||||
{file = "arch-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:522e19656759a9b8408cda652ddadaf8e65e23aff433c4b22a11ea79bd3c2b67"},
|
||||
{file = "arch-5.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c23b5138198127bc1a7ec432139fbe855d399e51f6391125b5dc3ab2f4a7860"},
|
||||
{file = "arch-5.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadb88a0199b51c6134634618fd074ffbb430a5d3c43126da0b6d259447e1f36"},
|
||||
{file = "arch-5.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96eb779fd90f16787376bc3ada24f3e162bc74f746d1fc3fb809ec36f954007e"},
|
||||
{file = "arch-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:7694ea6085bf817e09ddc8fcb4a871a0f255d3b6b486696cfa16121df591fdb9"},
|
||||
{file = "arch-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:32df883248a7d6f7ee204bf9ccb4a141ece43ab3b06ee22627cb84c8b4b7d24b"},
|
||||
{file = "arch-5.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef94fd5738fc0bccc4ee8a27871d5d7052b3962d784b397acf7f7bcc3afc34f4"},
|
||||
{file = "arch-5.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74e629d33ff41ab2a0917f475703826fd3c0976a3dc236873b19b41f719afe5b"},
|
||||
{file = "arch-5.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84c3944a47d28923bad70a7a6a11081d55482b80ef6abb8581a7f98e05ec9584"},
|
||||
{file = "arch-5.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b862462dd22297073b772e08144f31b7be05080b4063de5ce794c969d0348a94"},
|
||||
{file = "arch-5.3.1-cp38-cp38-win32.whl", hash = "sha256:ae2e8026085ca841e6c31144913462e79706c8604e46deda4558ec252a4c5833"},
|
||||
{file = "arch-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:0cb9b0c5751a3a0ecefe47842b40a04dae393d7754489128ec22df0649d49b52"},
|
||||
{file = "arch-5.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03a5cb976ffb230f59d827242e072cf605f70a993be0e7069d30378e13cb60f5"},
|
||||
{file = "arch-5.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:38857f8b2ca2fc46c7e1ac7889354eb4f16e7360283586a3730004097648b539"},
|
||||
{file = "arch-5.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd37af7633ae1d5d5719b5eaa7ed97b9a3450f2ed699e188c2c67f7e88ca7b44"},
|
||||
{file = "arch-5.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:236a8dc7414d557a59cae5dd39efff4fb49ab3fb792b68212f6c03a0c088d947"},
|
||||
{file = "arch-5.3.1-cp39-cp39-win32.whl", hash = "sha256:aabfc7b96416d6b3054164292ee364d1e86d2906a152faf1489562ba1669b2df"},
|
||||
{file = "arch-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:bed3352ab7d4ae79a206acb618f786a3f4bc4080e1b90f8c0b19c5a070a365a0"},
|
||||
{file = "arch-5.3.1.tar.gz", hash = "sha256:106f15c8770a34f71239b6c88f8517814e6b7fea3b8f2e009b3a8a23fd7e77c2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.17"
|
||||
pandas = ">=1.0"
|
||||
property-cached = ">=1.6.4"
|
||||
scipy = ">=1.3"
|
||||
statsmodels = ">=0.11"
|
||||
|
||||
[[package]]
|
||||
name = "argon2-cffi"
|
||||
version = "23.1.0"
|
||||
@ -719,6 +758,69 @@ traitlets = ">=4"
|
||||
[package.extras]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "contourpy"
|
||||
version = "1.2.1"
|
||||
description = "Python library for calculating contours of 2D quadrilateral grids"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "contourpy-1.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-win32.whl", hash = "sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8"},
|
||||
{file = "contourpy-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-win32.whl", hash = "sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec"},
|
||||
{file = "contourpy-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-win32.whl", hash = "sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f"},
|
||||
{file = "contourpy-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-win32.whl", hash = "sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba"},
|
||||
{file = "contourpy-1.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9"},
|
||||
{file = "contourpy-1.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609"},
|
||||
{file = "contourpy-1.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3"},
|
||||
{file = "contourpy-1.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f"},
|
||||
{file = "contourpy-1.2.1.tar.gz", hash = "sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.20"
|
||||
|
||||
[package.extras]
|
||||
bokeh = ["bokeh", "selenium"]
|
||||
docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
|
||||
mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.8.0)", "types-Pillow"]
|
||||
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
|
||||
test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"]
|
||||
|
||||
[[package]]
|
||||
name = "coverage"
|
||||
version = "7.3.2"
|
||||
@ -798,6 +900,21 @@ files = [
|
||||
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
testing = ["cssselect", "importlib-resources", "jaraco.test (>=5.1)", "lxml", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
|
||||
|
||||
[[package]]
|
||||
name = "cycler"
|
||||
version = "0.12.1"
|
||||
description = "Composable style cycles"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
|
||||
{file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
|
||||
tests = ["pytest", "pytest-cov", "pytest-xdist"]
|
||||
|
||||
[[package]]
|
||||
name = "cython"
|
||||
version = "3.0.8"
|
||||
@ -1174,6 +1291,71 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1
|
||||
testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
|
||||
typing = ["typing-extensions (>=4.8)"]
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.51.0"
|
||||
description = "Tools to manipulate font files"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "fonttools-4.51.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:84d7751f4468dd8cdd03ddada18b8b0857a5beec80bce9f435742abc9a851a74"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8b4850fa2ef2cfbc1d1f689bc159ef0f45d8d83298c1425838095bf53ef46308"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5b48a1121117047d82695d276c2af2ee3a24ffe0f502ed581acc2673ecf1037"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:180194c7fe60c989bb627d7ed5011f2bef1c4d36ecf3ec64daec8302f1ae0716"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96a48e137c36be55e68845fc4284533bda2980f8d6f835e26bca79d7e2006438"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:806e7912c32a657fa39d2d6eb1d3012d35f841387c8fc6cf349ed70b7c340039"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-win32.whl", hash = "sha256:32b17504696f605e9e960647c5f64b35704782a502cc26a37b800b4d69ff3c77"},
|
||||
{file = "fonttools-4.51.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7e91abdfae1b5c9e3a543f48ce96013f9a08c6c9668f1e6be0beabf0a569c1b"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a8feca65bab31479d795b0d16c9a9852902e3a3c0630678efb0b2b7941ea9c74"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ac27f436e8af7779f0bb4d5425aa3535270494d3bc5459ed27de3f03151e4c2"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e19bd9e9964a09cd2433a4b100ca7f34e34731e0758e13ba9a1ed6e5468cc0f"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2b92381f37b39ba2fc98c3a45a9d6383bfc9916a87d66ccb6553f7bdd129097"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5f6bc991d1610f5c3bbe997b0233cbc234b8e82fa99fc0b2932dc1ca5e5afec0"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9696fe9f3f0c32e9a321d5268208a7cc9205a52f99b89479d1b035ed54c923f1"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-win32.whl", hash = "sha256:3bee3f3bd9fa1d5ee616ccfd13b27ca605c2b4270e45715bd2883e9504735034"},
|
||||
{file = "fonttools-4.51.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f08c901d3866a8905363619e3741c33f0a83a680d92a9f0e575985c2634fcc1"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4060acc2bfa2d8e98117828a238889f13b6f69d59f4f2d5857eece5277b829ba"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1250e818b5f8a679ad79660855528120a8f0288f8f30ec88b83db51515411fcc"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76f1777d8b3386479ffb4a282e74318e730014d86ce60f016908d9801af9ca2a"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b5ad456813d93b9c4b7ee55302208db2b45324315129d85275c01f5cb7e61a2"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:68b3fb7775a923be73e739f92f7e8a72725fd333eab24834041365d2278c3671"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8e2f1a4499e3b5ee82c19b5ee57f0294673125c65b0a1ff3764ea1f9db2f9ef5"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-win32.whl", hash = "sha256:278e50f6b003c6aed19bae2242b364e575bcb16304b53f2b64f6551b9c000e15"},
|
||||
{file = "fonttools-4.51.0-cp312-cp312-win_amd64.whl", hash = "sha256:b3c61423f22165541b9403ee39874dcae84cd57a9078b82e1dce8cb06b07fa2e"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1621ee57da887c17312acc4b0e7ac30d3a4fb0fec6174b2e3754a74c26bbed1e"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d9298be7a05bb4801f558522adbe2feea1b0b103d5294ebf24a92dd49b78e5"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee1af4be1c5afe4c96ca23badd368d8dc75f611887fb0c0dac9f71ee5d6f110e"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c18b49adc721a7d0b8dfe7c3130c89b8704baf599fb396396d07d4aa69b824a1"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de7c29bdbdd35811f14493ffd2534b88f0ce1b9065316433b22d63ca1cd21f14"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cadf4e12a608ef1d13e039864f484c8a968840afa0258b0b843a0556497ea9ed"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-win32.whl", hash = "sha256:aefa011207ed36cd280babfaa8510b8176f1a77261833e895a9d96e57e44802f"},
|
||||
{file = "fonttools-4.51.0-cp38-cp38-win_amd64.whl", hash = "sha256:865a58b6e60b0938874af0968cd0553bcd88e0b2cb6e588727117bd099eef836"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:60a3409c9112aec02d5fb546f557bca6efa773dcb32ac147c6baf5f742e6258b"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7e89853d8bea103c8e3514b9f9dc86b5b4120afb4583b57eb10dfa5afbe0936"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56fc244f2585d6c00b9bcc59e6593e646cf095a96fe68d62cd4da53dd1287b55"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d145976194a5242fdd22df18a1b451481a88071feadf251221af110ca8f00ce"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5b8cab0c137ca229433570151b5c1fc6af212680b58b15abd797dcdd9dd5051"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:54dcf21a2f2d06ded676e3c3f9f74b2bafded3a8ff12f0983160b13e9f2fb4a7"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-win32.whl", hash = "sha256:0118ef998a0699a96c7b28457f15546815015a2710a1b23a7bf6c1be60c01636"},
|
||||
{file = "fonttools-4.51.0-cp39-cp39-win_amd64.whl", hash = "sha256:599bdb75e220241cedc6faebfafedd7670335d2e29620d207dd0378a4e9ccc5a"},
|
||||
{file = "fonttools-4.51.0-py3-none-any.whl", hash = "sha256:15c94eeef6b095831067f72c825eb0e2d48bb4cea0647c1b05c981ecba2bf39f"},
|
||||
{file = "fonttools-4.51.0.tar.gz", hash = "sha256:dc0673361331566d7a663d7ce0f6fdcbfbdc1f59c6e3ed1165ad7202ca183c68"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
|
||||
graphite = ["lz4 (>=1.7.4.2)"]
|
||||
interpolatable = ["munkres", "pycairo", "scipy"]
|
||||
lxml = ["lxml (>=4.0)"]
|
||||
pathops = ["skia-pathops (>=0.5.0)"]
|
||||
plot = ["matplotlib"]
|
||||
repacker = ["uharfbuzz (>=0.23.0)"]
|
||||
symfont = ["sympy"]
|
||||
type1 = ["xattr"]
|
||||
ufo = ["fs (>=2.2.0,<3)"]
|
||||
unicode = ["unicodedata2 (>=15.1.0)"]
|
||||
woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "fqdn"
|
||||
version = "1.5.1"
|
||||
@ -1940,6 +2122,17 @@ MarkupSafe = ">=2.0"
|
||||
[package.extras]
|
||||
i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "joblib"
|
||||
version = "1.4.0"
|
||||
description = "Lightweight pipelining with Python functions"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "joblib-1.4.0-py3-none-any.whl", hash = "sha256:42942470d4062537be4d54c83511186da1fc14ba354961a2114da91efa9a4ed7"},
|
||||
{file = "joblib-1.4.0.tar.gz", hash = "sha256:1eb0dc091919cd384490de890cb5dfd538410a6d4b3b54eef09fb8c50b409b1c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "json5"
|
||||
version = "0.9.14"
|
||||
@ -2362,6 +2555,119 @@ files = [
|
||||
{file = "jupyterlab_widgets-3.0.9.tar.gz", hash = "sha256:6005a4e974c7beee84060fdfba341a3218495046de8ae3ec64888e5fe19fdb4c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kiwisolver"
|
||||
version = "1.4.5"
|
||||
description = "A fast implementation of the Cassowary constraint solver"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"},
|
||||
{file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"},
|
||||
{file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"},
|
||||
{file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"},
|
||||
{file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"},
|
||||
{file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"},
|
||||
{file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"},
|
||||
{file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"},
|
||||
{file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"},
|
||||
{file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"},
|
||||
{file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"},
|
||||
{file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"},
|
||||
{file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"},
|
||||
{file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"},
|
||||
{file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"},
|
||||
{file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"},
|
||||
{file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"},
|
||||
{file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"},
|
||||
{file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"},
|
||||
{file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"},
|
||||
{file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"},
|
||||
{file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"},
|
||||
{file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "latexcodec"
|
||||
version = "2.0.1"
|
||||
@ -2537,6 +2843,54 @@ files = [
|
||||
{file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matplotlib"
|
||||
version = "3.8.4"
|
||||
description = "Python plotting package"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:abc9d838f93583650c35eca41cfcec65b2e7cb50fd486da6f0c49b5e1ed23014"},
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f65c9f002d281a6e904976007b2d46a1ee2bcea3a68a8c12dda24709ddc9106"},
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce1edd9f5383b504dbc26eeea404ed0a00656c526638129028b758fd43fc5f10"},
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd79298550cba13a43c340581a3ec9c707bd895a6a061a78fa2524660482fc0"},
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:90df07db7b599fe7035d2f74ab7e438b656528c68ba6bb59b7dc46af39ee48ef"},
|
||||
{file = "matplotlib-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:ac24233e8f2939ac4fd2919eed1e9c0871eac8057666070e94cbf0b33dd9c338"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:72f9322712e4562e792b2961971891b9fbbb0e525011e09ea0d1f416c4645661"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:232ce322bfd020a434caaffbd9a95333f7c2491e59cfc014041d95e38ab90d1c"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6addbd5b488aedb7f9bc19f91cd87ea476206f45d7116fcfe3d31416702a82fa"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc4ccdc64e3039fc303defd119658148f2349239871db72cd74e2eeaa9b80b71"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b7a2a253d3b36d90c8993b4620183b55665a429da8357a4f621e78cd48b2b30b"},
|
||||
{file = "matplotlib-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:8080d5081a86e690d7688ffa542532e87f224c38a6ed71f8fbed34dd1d9fedae"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6485ac1f2e84676cff22e693eaa4fbed50ef5dc37173ce1f023daef4687df616"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c89ee9314ef48c72fe92ce55c4e95f2f39d70208f9f1d9db4e64079420d8d732"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50bac6e4d77e4262c4340d7a985c30912054745ec99756ce213bfbc3cb3808eb"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f51c4c869d4b60d769f7b4406eec39596648d9d70246428745a681c327a8ad30"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b12ba985837e4899b762b81f5b2845bd1a28f4fdd1a126d9ace64e9c4eb2fb25"},
|
||||
{file = "matplotlib-3.8.4-cp312-cp312-win_amd64.whl", hash = "sha256:7a6769f58ce51791b4cb8b4d7642489df347697cd3e23d88266aaaee93b41d9a"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:843cbde2f0946dadd8c5c11c6d91847abd18ec76859dc319362a0964493f0ba6"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c13f041a7178f9780fb61cc3a2b10423d5e125480e4be51beaf62b172413b67"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb44f53af0a62dc80bba4443d9b27f2fde6acfdac281d95bc872dc148a6509cc"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:606e3b90897554c989b1e38a258c626d46c873523de432b1462f295db13de6f9"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9bb0189011785ea794ee827b68777db3ca3f93f3e339ea4d920315a0e5a78d54"},
|
||||
{file = "matplotlib-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:6209e5c9aaccc056e63b547a8152661324404dd92340a6e479b3a7f24b42a5d0"},
|
||||
{file = "matplotlib-3.8.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7064120a59ce6f64103c9cefba8ffe6fba87f2c61d67c401186423c9a20fd35"},
|
||||
{file = "matplotlib-3.8.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0e47eda4eb2614300fc7bb4657fced3e83d6334d03da2173b09e447418d499f"},
|
||||
{file = "matplotlib-3.8.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:493e9f6aa5819156b58fce42b296ea31969f2aab71c5b680b4ea7a3cb5c07d94"},
|
||||
{file = "matplotlib-3.8.4.tar.gz", hash = "sha256:8aac397d5e9ec158960e31c381c5ffc52ddd52bd9a47717e2a694038167dffea"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
contourpy = ">=1.0.1"
|
||||
cycler = ">=0.10"
|
||||
fonttools = ">=4.22.0"
|
||||
kiwisolver = ">=1.3.1"
|
||||
numpy = ">=1.21"
|
||||
packaging = ">=20.0"
|
||||
pillow = ">=8"
|
||||
pyparsing = ">=2.3.1"
|
||||
python-dateutil = ">=2.7"
|
||||
|
||||
[[package]]
|
||||
name = "matplotlib-inline"
|
||||
version = "0.1.6"
|
||||
@ -3257,6 +3611,7 @@ optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"},
|
||||
{file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux2014_aarch64.whl", hash = "sha256:211a63e7b30a9d62f1a853e19928fbb1a750e3f17a13a3d1f98ff0ced19478dd"},
|
||||
{file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"},
|
||||
]
|
||||
|
||||
@ -3402,6 +3757,133 @@ files = [
|
||||
{file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pandas"
|
||||
version = "2.1.0"
|
||||
description = "Powerful data structures for data analysis, time series, and statistics"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "pandas-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:40dd20439ff94f1b2ed55b393ecee9cb6f3b08104c2c40b0cb7186a2f0046242"},
|
||||
{file = "pandas-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4f38e4fedeba580285eaac7ede4f686c6701a9e618d8a857b138a126d067f2f"},
|
||||
{file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6a0fe052cf27ceb29be9429428b4918f3740e37ff185658f40d8702f0b3e09"},
|
||||
{file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d81e1813191070440d4c7a413cb673052b3b4a984ffd86b8dd468c45742d3cc"},
|
||||
{file = "pandas-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eb20252720b1cc1b7d0b2879ffc7e0542dd568f24d7c4b2347cb035206936421"},
|
||||
{file = "pandas-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:38f74ef7ebc0ffb43b3d633e23d74882bce7e27bfa09607f3c5d3e03ffd9a4a5"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cda72cc8c4761c8f1d97b169661f23a86b16fdb240bdc341173aee17e4d6cedd"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d97daeac0db8c993420b10da4f5f5b39b01fc9ca689a17844e07c0a35ac96b4b"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c58b1113892e0c8078f006a167cc210a92bdae23322bb4614f2f0b7a4b510f"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629124923bcf798965b054a540f9ccdfd60f71361255c81fa1ecd94a904b9dd3"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:70cf866af3ab346a10debba8ea78077cf3a8cd14bd5e4bed3d41555a3280041c"},
|
||||
{file = "pandas-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53c8c1001f6a192ff1de1efe03b31a423d0eee2e9e855e69d004308e046e694"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86f100b3876b8c6d1a2c66207288ead435dc71041ee4aea789e55ef0e06408cb"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28f330845ad21c11db51e02d8d69acc9035edfd1116926ff7245c7215db57957"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9a6ccf0963db88f9b12df6720e55f337447aea217f426a22d71f4213a3099a6"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99e678180bc59b0c9443314297bddce4ad35727a1a2656dbe585fd78710b3b9"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b31da36d376d50a1a492efb18097b9101bdbd8b3fbb3f49006e02d4495d4c644"},
|
||||
{file = "pandas-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0164b85937707ec7f70b34a6c3a578dbf0f50787f910f21ca3b26a7fd3363437"},
|
||||
{file = "pandas-2.1.0.tar.gz", hash = "sha256:62c24c7fc59e42b775ce0679cfa7b14a5f9bfb7643cfbe708c960699e05fb918"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = {version = ">=1.23.2", markers = "python_version >= \"3.11\""}
|
||||
python-dateutil = ">=2.8.2"
|
||||
pytz = ">=2020.1"
|
||||
tzdata = ">=2022.1"
|
||||
|
||||
[package.extras]
|
||||
all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
|
||||
aws = ["s3fs (>=2022.05.0)"]
|
||||
clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"]
|
||||
compression = ["zstandard (>=0.17.0)"]
|
||||
computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"]
|
||||
consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
|
||||
excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"]
|
||||
feather = ["pyarrow (>=7.0.0)"]
|
||||
fss = ["fsspec (>=2022.05.0)"]
|
||||
gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"]
|
||||
hdf5 = ["tables (>=3.7.0)"]
|
||||
html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"]
|
||||
mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"]
|
||||
output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"]
|
||||
parquet = ["pyarrow (>=7.0.0)"]
|
||||
performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"]
|
||||
plot = ["matplotlib (>=3.6.1)"]
|
||||
postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"]
|
||||
spss = ["pyreadstat (>=1.1.5)"]
|
||||
sql-other = ["SQLAlchemy (>=1.4.36)"]
|
||||
test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
|
||||
xml = ["lxml (>=4.8.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pandas"
|
||||
version = "2.2.2"
|
||||
description = "Powerful data structures for data analysis, time series, and statistics"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"},
|
||||
{file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"},
|
||||
{file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"},
|
||||
{file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"},
|
||||
{file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"},
|
||||
{file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = {version = ">=1.23.2", markers = "python_version == \"3.11\""}
|
||||
python-dateutil = ">=2.8.2"
|
||||
pytz = ">=2020.1"
|
||||
tzdata = ">=2022.7"
|
||||
|
||||
[package.extras]
|
||||
all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
|
||||
aws = ["s3fs (>=2022.11.0)"]
|
||||
clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
|
||||
compression = ["zstandard (>=0.19.0)"]
|
||||
computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
|
||||
consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
|
||||
excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
|
||||
feather = ["pyarrow (>=10.0.1)"]
|
||||
fss = ["fsspec (>=2022.11.0)"]
|
||||
gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
|
||||
hdf5 = ["tables (>=3.8.0)"]
|
||||
html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
|
||||
mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
|
||||
output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
|
||||
parquet = ["pyarrow (>=10.0.1)"]
|
||||
performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
|
||||
plot = ["matplotlib (>=3.6.3)"]
|
||||
postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
|
||||
pyarrow = ["pyarrow (>=10.0.1)"]
|
||||
spss = ["pyreadstat (>=1.2.0)"]
|
||||
sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
|
||||
test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
|
||||
xml = ["lxml (>=4.9.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "pandocfilters"
|
||||
version = "1.5.0"
|
||||
@ -3460,6 +3942,24 @@ files = [
|
||||
{file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "patsy"
|
||||
version = "0.5.6"
|
||||
description = "A Python package for describing statistical models and for building design matrices."
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"},
|
||||
{file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.4"
|
||||
six = "*"
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest", "pytest-cov", "scipy"]
|
||||
|
||||
[[package]]
|
||||
name = "pettingzoo"
|
||||
version = "1.24.2"
|
||||
@ -3503,7 +4003,7 @@ ptyprocess = ">=0.5"
|
||||
name = "pillow"
|
||||
version = "10.2.0"
|
||||
description = "Python Imaging Library (Fork)"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"},
|
||||
@ -3709,6 +4209,17 @@ files = [
|
||||
[package.dependencies]
|
||||
wcwidth = "*"
|
||||
|
||||
[[package]]
|
||||
name = "property-cached"
|
||||
version = "1.6.4"
|
||||
description = "A decorator for caching properties in classes (forked from cached-property)."
|
||||
optional = true
|
||||
python-versions = ">= 3.5"
|
||||
files = [
|
||||
{file = "property-cached-1.6.4.zip", hash = "sha256:3e9c4ef1ed3653909147510481d7df62a3cfb483461a6986a6f1dcd09b2ebb73"},
|
||||
{file = "property_cached-1.6.4-py2.py3-none-any.whl", hash = "sha256:135fc059ec969c1646424a0db15e7fbe1b5f8c36c0006d0b3c91ba568c11e7d8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "3.20.3"
|
||||
@ -4094,6 +4605,20 @@ files = [
|
||||
{file = "PyOpenGL-3.1.7.tar.gz", hash = "sha256:eef31a3888e6984fd4d8e6c9961b184c9813ca82604d37fe3da80eb000a76c86"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.1.2"
|
||||
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
||||
optional = false
|
||||
python-versions = ">=3.6.8"
|
||||
files = [
|
||||
{file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"},
|
||||
{file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
diagrams = ["jinja2", "railroad-diagrams"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.3"
|
||||
@ -4157,6 +4682,17 @@ files = [
|
||||
{file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytz"
|
||||
version = "2024.1"
|
||||
description = "World timezone definitions, modern and historical"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"},
|
||||
{file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pywin32"
|
||||
version = "306"
|
||||
@ -4537,6 +5073,28 @@ files = [
|
||||
{file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rliable"
|
||||
version = "1.0.8"
|
||||
description = "rliable: Reliable evaluation on reinforcement learning and machine learning benchmarks."
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = []
|
||||
develop = false
|
||||
|
||||
[package.dependencies]
|
||||
absl-py = ">=0.9.0"
|
||||
arch = "5.3.1"
|
||||
numpy = ">=1.16.4"
|
||||
scipy = ">=1.7.0"
|
||||
seaborn = ">=0.11.2"
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/aai-institute/rliable.git"
|
||||
reference = "HEAD"
|
||||
resolved_reference = "c756ac408d15507481166edb252f5b61cf5628ff"
|
||||
|
||||
[[package]]
|
||||
name = "rpds-py"
|
||||
version = "0.13.0"
|
||||
@ -4804,6 +5362,27 @@ dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyl
|
||||
doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
|
||||
test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
|
||||
|
||||
[[package]]
|
||||
name = "seaborn"
|
||||
version = "0.13.2"
|
||||
description = "Statistical data visualization"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987"},
|
||||
{file = "seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
matplotlib = ">=3.4,<3.6.1 || >3.6.1"
|
||||
numpy = ">=1.20,<1.24.0 || >1.24.0"
|
||||
pandas = ">=1.2"
|
||||
|
||||
[package.extras]
|
||||
dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"]
|
||||
docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"]
|
||||
stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"]
|
||||
|
||||
[[package]]
|
||||
name = "send2trash"
|
||||
version = "1.8.2"
|
||||
@ -5631,6 +6210,101 @@ pure-eval = "*"
|
||||
[package.extras]
|
||||
tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
|
||||
|
||||
[[package]]
|
||||
name = "statsmodels"
|
||||
version = "0.14.0"
|
||||
description = "Statistical computations and models for Python"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:16bfe0c96a53b20fa19067e3b6bd2f1d39e30d4891ea0d7bc20734a0ae95942d"},
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5a6a0a1a06ff79be8aa89c8494b33903442859add133f0dda1daf37c3c71682e"},
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77b3cd3a5268ef966a0a08582c591bd29c09c88b4566c892a7c087935234f285"},
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c64ebe9cf376cba0c31aed138e15ed179a1d128612dd241cdf299d159e5e882"},
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:229b2f676b4a45cb62d132a105c9c06ca8a09ffba060abe34935391eb5d9ba87"},
|
||||
{file = "statsmodels-0.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb471f757fc45102a87e5d86e87dc2c8c78b34ad4f203679a46520f1d863b9da"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:582f9e41092e342aaa04920d17cc3f97240e3ee198672f194719b5a3d08657d6"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7ebe885ccaa64b4bc5ad49ac781c246e7a594b491f08ab4cfd5aa456c363a6f6"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b587ee5d23369a0e881da6e37f78371dce4238cf7638a455db4b633a1a1c62d6"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef7fa4813c7a73b0d8a0c830250f021c102c71c95e9fe0d6877bcfb56d38b8c"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afe80544ef46730ea1b11cc655da27038bbaa7159dc5af4bc35bbc32982262f2"},
|
||||
{file = "statsmodels-0.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:a6ad7b8aadccd4e4dd7f315a07bef1bca41d194eeaf4ec600d20dea02d242fce"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0eea4a0b761aebf0c355b726ac5616b9a8b618bd6e81a96b9f998a61f4fd7484"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4c815ce7a699047727c65a7c179bff4031cff9ae90c78ca730cfd5200eb025dd"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:575f61337c8e406ae5fa074d34bc6eb77b5a57c544b2d4ee9bc3da6a0a084cf1"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8be53cdeb82f49c4cb0fda6d7eeeb2d67dbd50179b3e1033510e061863720d93"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6f7d762df4e04d1dde8127d07e91aff230eae643aa7078543e60e83e7d5b40db"},
|
||||
{file = "statsmodels-0.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:fc2c7931008a911e3060c77ea8933f63f7367c0f3af04f82db3a04808ad2cd2c"},
|
||||
{file = "statsmodels-0.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3757542c95247e4ab025291a740efa5da91dc11a05990c033d40fce31c450dc9"},
|
||||
{file = "statsmodels-0.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:de489e3ed315bdba55c9d1554a2e89faa65d212e365ab81bc323fa52681fc60e"},
|
||||
{file = "statsmodels-0.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76e290f4718177bffa8823a780f3b882d56dd64ad1c18cfb4bc8b5558f3f5757"},
|
||||
{file = "statsmodels-0.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71054f9dbcead56def14e3c9db6f66f943110fdfb19713caf0eb0f08c1ec03fd"},
|
||||
{file = "statsmodels-0.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:d7fda067837df94e0a614d93d3a38fb6868958d37f7f50afe2a534524f2660cb"},
|
||||
{file = "statsmodels-0.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1c7724ad573af26139a98393ae64bc318d1b19762b13442d96c7a3e793f495c3"},
|
||||
{file = "statsmodels-0.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b0a135f3bfdeec987e36e3b3b4c53e0bb87a8d91464d2fcc4d169d176f46fdb"},
|
||||
{file = "statsmodels-0.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce28eb1c397dba437ec39b9ab18f2101806f388c7a0cf9cdfd8f09294ad1c799"},
|
||||
{file = "statsmodels-0.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b1c768dd94cc5ba8398121a632b673c625491aa7ed627b82cb4c880a25563f"},
|
||||
{file = "statsmodels-0.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d1e3e10dfbfcd58119ba5a4d3c7d519182b970a2aebaf0b6f539f55ae16058d"},
|
||||
{file = "statsmodels-0.14.0.tar.gz", hash = "sha256:6875c7d689e966d948f15eb816ab5616f4928706b180cf470fd5907ab6f647a4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.18"
|
||||
packaging = ">=21.3"
|
||||
pandas = ">=1.0"
|
||||
patsy = ">=0.5.2"
|
||||
scipy = ">=1.4,<1.9.2 || >1.9.2"
|
||||
|
||||
[package.extras]
|
||||
build = ["cython (>=0.29.26)"]
|
||||
develop = ["colorama", "cython (>=0.29.26)", "cython (>=0.29.28,<3.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.0.1,<7.1.0)", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=7.0.0,<7.1.0)"]
|
||||
docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"]
|
||||
|
||||
[[package]]
|
||||
name = "statsmodels"
|
||||
version = "0.14.2"
|
||||
description = "Statistical computations and models for Python"
|
||||
optional = true
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df5d6f95c46f0341da6c79ee7617e025bf2b371e190a8e60af1ae9cabbdb7a97"},
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a87ef21fadb445b650f327340dde703f13aec1540f3d497afb66324499dea97a"},
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5827a12e3ede2b98a784476d61d6bec43011fedb64aa815f2098e0573bece257"},
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f2b7611a61adb7d596a6d239abdf1a4d5492b931b00d5ed23d32844d40e48e"},
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c254c66142f1167b4c7d031cf8db55294cc62ff3280e090fc45bd10a7f5fd029"},
|
||||
{file = "statsmodels-0.14.2-cp310-cp310-win_amd64.whl", hash = "sha256:0e46e9d59293c1af4cc1f4e5248f17e7e7bc596bfce44d327c789ac27f09111b"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:50fcb633987779e795142f51ba49fb27648d46e8a1382b32ebe8e503aaabaa9e"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:876794068abfaeed41df71b7887000031ecf44fbfa6b50d53ccb12ebb4ab747a"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a91f6c4943de13e3ce2e20ee3b5d26d02bd42300616a421becd53756f5deb37"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4864a1c4615c5ea5f2e3b078a75bdedc90dd9da210a37e0738e064b419eccee2"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afbd92410e0df06f3d8c4e7c0e2e71f63f4969531f280fb66059e2ecdb6e0415"},
|
||||
{file = "statsmodels-0.14.2-cp311-cp311-win_amd64.whl", hash = "sha256:8e004cfad0e46ce73fe3f3812010c746f0d4cfd48e307b45c14e9e360f3d2510"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb0ba1ad3627705f5ae20af6b2982f500546d43892543b36c7bca3e2f87105e7"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90fd2f0110b73fc3fa5a2f21c3ca99b0e22285cccf38e56b5b8fd8ce28791b0f"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac780ad9ff552773798829a0b9c46820b0faa10e6454891f5e49a845123758ab"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55d1742778400ae67acb04b50a2c7f5804182f8a874bd09ca397d69ed159a751"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f870d14a587ea58a3b596aa994c2ed889cc051f9e450e887d2c83656fc6a64bf"},
|
||||
{file = "statsmodels-0.14.2-cp312-cp312-win_amd64.whl", hash = "sha256:f450fcbae214aae66bd9d2b9af48e0f8ba1cb0e8596c6ebb34e6e3f0fec6542c"},
|
||||
{file = "statsmodels-0.14.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:201c3d00929c4a67cda1fe05b098c8dcf1b1eeefa88e80a8f963a844801ed59f"},
|
||||
{file = "statsmodels-0.14.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9edefa4ce08e40bc1d67d2f79bc686ee5e238e801312b5a029ee7786448c389a"},
|
||||
{file = "statsmodels-0.14.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c78a7601fdae1aa32104c5ebff2e0b72c26f33e870e2f94ab1bcfd927ece9b"},
|
||||
{file = "statsmodels-0.14.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f36494df7c03d63168fccee5038a62f469469ed6a4dd6eaeb9338abedcd0d5f5"},
|
||||
{file = "statsmodels-0.14.2-cp39-cp39-win_amd64.whl", hash = "sha256:8875823bdd41806dc853333cc4e1b7ef9481bad2380a999e66ea42382cf2178d"},
|
||||
{file = "statsmodels-0.14.2.tar.gz", hash = "sha256:890550147ad3a81cda24f0ba1a5c4021adc1601080bd00e191ae7cd6feecd6ad"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.22.3"
|
||||
packaging = ">=21.3"
|
||||
pandas = ">=1.4,<2.1.0 || >2.1.0"
|
||||
patsy = ">=0.5.6"
|
||||
scipy = ">=1.8,<1.9.2 || >1.9.2"
|
||||
|
||||
[package.extras]
|
||||
build = ["cython (>=0.29.33)"]
|
||||
develop = ["colorama", "cython (>=0.29.33)", "cython (>=3.0.10,<4)", "flake8", "isort", "joblib", "matplotlib (>=3)", "pytest (>=7.3.0,<8)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"]
|
||||
docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"]
|
||||
|
||||
[[package]]
|
||||
name = "swig"
|
||||
version = "4.2.0"
|
||||
@ -5972,6 +6646,17 @@ files = [
|
||||
{file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tzdata"
|
||||
version = "2024.1"
|
||||
description = "Provider of IANA time zone data"
|
||||
optional = false
|
||||
python-versions = ">=2"
|
||||
files = [
|
||||
{file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"},
|
||||
{file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uc-micro-py"
|
||||
version = "1.0.2"
|
||||
@ -6245,6 +6930,7 @@ atari = ["ale-py", "autorom", "opencv_python", "shimmy"]
|
||||
box2d = ["box2d_py", "pygame", "swig"]
|
||||
classic-control = ["pygame"]
|
||||
envpool = ["envpool"]
|
||||
eval = ["docstring-parser", "joblib", "jsonargparse", "rliable", "scipy"]
|
||||
mujoco = ["imageio", "mujoco"]
|
||||
mujoco-py = ["cython", "mujoco-py"]
|
||||
pybullet = ["pybullet"]
|
||||
@ -6254,4 +6940,4 @@ vizdoom = ["vizdoom"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "a7aa80de549e7af1147d14f9bdd48659b7018732af34022cc734565af1f742e9"
|
||||
content-hash = "719c5e7c9a09f731789aa753aeb7c37443c48a65e9bb058344c4763e17b7fa58"
|
||||
|
@ -28,10 +28,12 @@ python = "^3.11"
|
||||
deepdiff = "^7.0.1"
|
||||
gymnasium = "^0.28.0"
|
||||
h5py = "^3.9.0"
|
||||
matplotlib = ">=3.0.0"
|
||||
numba = "^0.57.1"
|
||||
numpy = "^1"
|
||||
overrides = "^7.4.0"
|
||||
packaging = "*"
|
||||
pandas = ">=2.0.0"
|
||||
pettingzoo = "^1.22"
|
||||
tensorboard = "^2.5.0"
|
||||
# Torch 2.0.1 causes problems, see https://github.com/pytorch/pytorch/issues/100974
|
||||
@ -57,6 +59,7 @@ docstring-parser = { version = "^0.15", optional = true }
|
||||
envpool = { version = "^0.8.2", optional = true, markers = "sys_platform != 'darwin'"}
|
||||
gymnasium-robotics = { version = "*", optional = true }
|
||||
imageio = { version = ">=2.14.1", optional = true }
|
||||
joblib = { version = "*", optional = true }
|
||||
jsonargparse = {version = "^4.24.1", optional = true}
|
||||
# we need <3 b/c of https://github.com/Farama-Foundation/Gymnasium/issues/749
|
||||
mujoco = { version = ">=2.1.5, <3", optional = true }
|
||||
@ -64,6 +67,8 @@ mujoco-py = { version = ">=2.1,<2.2", optional = true }
|
||||
opencv_python = { version = "*", optional = true }
|
||||
pybullet = { version = "*", optional = true }
|
||||
pygame = { version = ">=2.1.3", optional = true }
|
||||
rliable = {optional = true, git = "https://github.com/aai-institute/rliable.git"}
|
||||
scipy = { version = "*", optional = true }
|
||||
shimmy = { version = ">=0.1.0,<1.0", optional = true }
|
||||
swig = { version = "4.*", optional = true }
|
||||
vizdoom = { version = "*", optional = true }
|
||||
@ -78,6 +83,7 @@ pybullet = ["pybullet"]
|
||||
envpool = ["envpool"]
|
||||
robotics = ["gymnasium-robotics"]
|
||||
vizdoom = ["vizdoom"]
|
||||
eval = ["rliable", "joblib", "scipy", "jsonargparse", "docstring-parser"]
|
||||
|
||||
|
||||
[tool.poetry.group.dev]
|
||||
|
@ -2,11 +2,12 @@ from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
from tianshou.utils import BaseLogger
|
||||
from tianshou.utils import TensorboardLogger
|
||||
|
||||
|
||||
class TestBaseLogger:
|
||||
class TestTensorBoardLogger:
|
||||
@staticmethod
|
||||
@pytest.mark.parametrize(
|
||||
"input_dict, expected_output",
|
||||
@ -20,7 +21,8 @@ class TestBaseLogger:
|
||||
| dict[str, dict[str, dict[str, int]]],
|
||||
expected_output: dict[str, int],
|
||||
) -> None:
|
||||
result = BaseLogger.prepare_dict_for_logging(input_dict)
|
||||
logger = TensorboardLogger(SummaryWriter("log/logger"))
|
||||
result = logger.prepare_dict_for_logging(input_dict)
|
||||
assert result == expected_output
|
||||
|
||||
@staticmethod
|
||||
@ -36,7 +38,8 @@ class TestBaseLogger:
|
||||
delimiter: Literal["|", "."],
|
||||
expected_output: dict[str, int],
|
||||
) -> None:
|
||||
result = BaseLogger.prepare_dict_for_logging(input_dict, delimiter=delimiter)
|
||||
logger = TensorboardLogger(SummaryWriter("log/logger"))
|
||||
result = logger.prepare_dict_for_logging(input_dict, delimiter=delimiter)
|
||||
assert result == expected_output
|
||||
|
||||
@staticmethod
|
||||
@ -56,7 +59,8 @@ class TestBaseLogger:
|
||||
exclude_arrays: bool,
|
||||
expected_output: dict[str, np.ndarray],
|
||||
) -> None:
|
||||
result = BaseLogger.prepare_dict_for_logging(input_dict, exclude_arrays=exclude_arrays)
|
||||
logger = TensorboardLogger(SummaryWriter("log/logger"))
|
||||
result = logger.prepare_dict_for_logging(input_dict, exclude_arrays=exclude_arrays)
|
||||
assert result.keys() == expected_output.keys()
|
||||
for val1, val2 in zip(result.values(), expected_output.values(), strict=True):
|
||||
assert np.all(val1 == val2)
|
||||
@ -72,5 +76,6 @@ class TestBaseLogger:
|
||||
input_dict: dict[str, tuple[Literal[1]] | dict[str, str | dict[str, int]]],
|
||||
expected_output: dict[str, int],
|
||||
) -> None:
|
||||
result = BaseLogger.prepare_dict_for_logging(input_dict)
|
||||
logger = TensorboardLogger(SummaryWriter("log/logger"))
|
||||
result = logger.prepare_dict_for_logging(input_dict)
|
||||
assert result == expected_output
|
||||
|
@ -6,9 +6,19 @@ from tianshou.highlevel.env import (
|
||||
|
||||
class DiscreteTestEnvFactory(EnvFactoryRegistered):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(task="CartPole-v0", seed=42, venv_type=VectorEnvType.DUMMY)
|
||||
super().__init__(
|
||||
task="CartPole-v0",
|
||||
train_seed=42,
|
||||
test_seed=1337,
|
||||
venv_type=VectorEnvType.DUMMY,
|
||||
)
|
||||
|
||||
|
||||
class ContinuousTestEnvFactory(EnvFactoryRegistered):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(task="Pendulum-v1", seed=42, venv_type=VectorEnvType.DUMMY)
|
||||
super().__init__(
|
||||
task="Pendulum-v1",
|
||||
train_seed=42,
|
||||
test_seed=1337,
|
||||
venv_type=VectorEnvType.DUMMY,
|
||||
)
|
||||
|
@ -49,7 +49,7 @@ def test_experiment_builder_continuous_default_params(builder_cls: type[Experime
|
||||
sampling_config=sampling_config,
|
||||
)
|
||||
experiment = builder.build()
|
||||
experiment.run("test")
|
||||
experiment.run(override_experiment_name="test")
|
||||
print(experiment)
|
||||
|
||||
|
||||
@ -77,5 +77,32 @@ def test_experiment_builder_discrete_default_params(builder_cls: type[Experiment
|
||||
sampling_config=sampling_config,
|
||||
)
|
||||
experiment = builder.build()
|
||||
experiment.run("test")
|
||||
experiment.run(override_experiment_name="test")
|
||||
print(experiment)
|
||||
|
||||
|
||||
def test_temp_builder_modification() -> None:
|
||||
env_factory = DiscreteTestEnvFactory()
|
||||
sampling_config = SamplingConfig(
|
||||
num_epochs=1,
|
||||
step_per_epoch=100,
|
||||
num_train_envs=2,
|
||||
num_test_envs=2,
|
||||
)
|
||||
builder = PPOExperimentBuilder(
|
||||
experiment_config=ExperimentConfig(persistence_enabled=False),
|
||||
env_factory=env_factory,
|
||||
sampling_config=sampling_config,
|
||||
)
|
||||
original_seed = builder.experiment_config.seed
|
||||
original_train_seed = builder.sampling_config.train_seed
|
||||
|
||||
with builder.temp_config_mutation():
|
||||
builder.experiment_config.seed += 12345
|
||||
builder.sampling_config.train_seed += 456
|
||||
exp = builder.build()
|
||||
|
||||
assert builder.experiment_config.seed == original_seed
|
||||
assert builder.sampling_config.train_seed == original_train_seed
|
||||
assert exp.config.seed == original_seed + 12345
|
||||
assert exp.sampling_config.train_seed == original_train_seed + 456
|
||||
|
@ -6,6 +6,12 @@ import torch
|
||||
from tianshou.data import Batch
|
||||
from tianshou.data.batch import BatchProtocol, arr_type
|
||||
|
||||
TNestedDictValue = np.ndarray | dict[str, "TNestedDictValue"]
|
||||
|
||||
|
||||
d: dict[str, TNestedDictValue] = {"a": {"b": np.array([1, 2, 3])}}
|
||||
d["c"] = np.array([1, 2, 3])
|
||||
|
||||
|
||||
class ObsBatchProtocol(BatchProtocol, Protocol):
|
||||
"""Observations of an environment that a policy can turn into actions.
|
||||
|
0
tianshou/evaluation/__init__.py
Normal file
0
tianshou/evaluation/__init__.py
Normal file
75
tianshou/evaluation/launcher.py
Normal file
75
tianshou/evaluation/launcher.py
Normal file
@ -0,0 +1,75 @@
|
||||
"""Provides a basic interface for launching experiments. The API is experimental and subject to change!."""
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from copy import copy
|
||||
from dataclasses import asdict, dataclass
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from joblib import Parallel, delayed
|
||||
|
||||
from tianshou.highlevel.experiment import Experiment
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JoblibConfig:
|
||||
n_jobs: int = -1
|
||||
"""The maximum number of concurrently running jobs. If -1, all CPUs are used."""
|
||||
backend: Literal["loky", "multiprocessing", "threading"] | None = "loky"
|
||||
"""Allows to hard-code backend, otherwise inferred based on prefer and require."""
|
||||
verbose: int = 10
|
||||
"""If greater than zero, prints progress messages."""
|
||||
|
||||
|
||||
class ExpLauncher(ABC):
|
||||
@abstractmethod
|
||||
def launch(self, experiments: Sequence[Experiment]) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class SequentialExpLauncher(ExpLauncher):
|
||||
def launch(self, experiments: Sequence[Experiment]) -> None:
|
||||
for exp in experiments:
|
||||
exp.run()
|
||||
|
||||
|
||||
class JoblibExpLauncher(ExpLauncher):
|
||||
def __init__(self, joblib_cfg: JoblibConfig | None = None) -> None:
|
||||
self.joblib_cfg = copy(joblib_cfg) if joblib_cfg is not None else JoblibConfig()
|
||||
# Joblib's backend is hard-coded to loky since the threading backend produces different results
|
||||
if self.joblib_cfg.backend != "loky":
|
||||
log.warning(
|
||||
f"Ignoring the user provided joblib backend {self.joblib_cfg.backend} and using loky instead. "
|
||||
f"The current implementation requires loky to work and will be relaxed soon",
|
||||
)
|
||||
self.joblib_cfg.backend = "loky"
|
||||
|
||||
def launch(self, experiments: Sequence[Experiment]) -> None:
|
||||
Parallel(**asdict(self.joblib_cfg))(delayed(self._safe_execute)(exp) for exp in experiments)
|
||||
|
||||
@staticmethod
|
||||
def _safe_execute(exp: Experiment) -> None:
|
||||
try:
|
||||
exp.run()
|
||||
except BaseException as e:
|
||||
log.error(e)
|
||||
|
||||
|
||||
class RegisteredExpLauncher(Enum):
|
||||
joblib = "joblib"
|
||||
sequential = "sequential"
|
||||
|
||||
def create_launcher(self) -> ExpLauncher:
|
||||
match self:
|
||||
case RegisteredExpLauncher.joblib:
|
||||
return JoblibExpLauncher()
|
||||
case RegisteredExpLauncher.sequential:
|
||||
return SequentialExpLauncher()
|
||||
case _:
|
||||
raise NotImplementedError(
|
||||
f"Launcher {self} is not yet implemented.",
|
||||
)
|
218
tianshou/evaluation/rliable_evaluation_hl.py
Normal file
218
tianshou/evaluation/rliable_evaluation_hl.py
Normal file
@ -0,0 +1,218 @@
|
||||
"""The rliable-evaluation module provides a high-level interface to evaluate the results of an experiment with multiple runs
|
||||
on different seeds using the rliable library. The API is experimental and subject to change!.
|
||||
"""
|
||||
|
||||
import os
|
||||
from dataclasses import asdict, dataclass, fields
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scipy.stats as sst
|
||||
from rliable import library as rly
|
||||
from rliable import plot_utils
|
||||
|
||||
from tianshou.highlevel.experiment import Experiment
|
||||
from tianshou.utils import logging
|
||||
from tianshou.utils.logger.base import DataScope
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoggedSummaryData:
|
||||
mean: np.ndarray
|
||||
std: np.ndarray
|
||||
max: np.ndarray
|
||||
min: np.ndarray
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoggedCollectStats:
|
||||
env_step: np.ndarray | None = None
|
||||
n_collected_episodes: np.ndarray | None = None
|
||||
n_collected_steps: np.ndarray | None = None
|
||||
collect_time: np.ndarray | None = None
|
||||
collect_speed: np.ndarray | None = None
|
||||
returns_stat: LoggedSummaryData | None = None
|
||||
lens_stat: LoggedSummaryData | None = None
|
||||
|
||||
@classmethod
|
||||
def from_data_dict(cls, data: dict) -> "LoggedCollectStats":
|
||||
"""Create a LoggedCollectStats object from a dictionary.
|
||||
|
||||
Converts SequenceSummaryStats from dict format to dataclass format and ignores fields that are not present.
|
||||
"""
|
||||
field_names = [f.name for f in fields(cls)]
|
||||
for k, v in data.items():
|
||||
if k not in field_names:
|
||||
data.pop(k)
|
||||
if isinstance(v, dict):
|
||||
data[k] = LoggedSummaryData(**v)
|
||||
return cls(**data)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RLiableExperimentResult:
|
||||
"""The result of an experiment that can be used with the rliable library."""
|
||||
|
||||
exp_dir: str
|
||||
"""The base directory where each sub-directory contains the results of one experiment run."""
|
||||
|
||||
test_episode_returns_RE: np.ndarray
|
||||
"""The test episodes for each run of the experiment where each row corresponds to one run."""
|
||||
|
||||
env_steps_E: np.ndarray
|
||||
"""The number of environment steps at which the test episodes were evaluated."""
|
||||
|
||||
@classmethod
|
||||
def load_from_disk(cls, exp_dir: str) -> "RLiableExperimentResult":
|
||||
"""Load the experiment result from disk.
|
||||
|
||||
:param exp_dir: The directory from where the experiment results are restored.
|
||||
"""
|
||||
test_episode_returns = []
|
||||
env_step_at_test = None
|
||||
|
||||
# TODO: env_step_at_test should not be defined in a loop and overwritten at each iteration
|
||||
# just for retrieving them. We might need a cleaner directory structure.
|
||||
for entry in os.scandir(exp_dir):
|
||||
if entry.name.startswith(".") or not entry.is_dir():
|
||||
continue
|
||||
|
||||
exp = Experiment.from_directory(entry.path)
|
||||
logger = exp.logger_factory.create_logger(
|
||||
entry.path,
|
||||
entry.name,
|
||||
None,
|
||||
asdict(exp.config),
|
||||
)
|
||||
data = logger.restore_logged_data(entry.path)
|
||||
|
||||
if DataScope.TEST.value not in data or not data[DataScope.TEST.value]:
|
||||
continue
|
||||
restored_test_data = data[DataScope.TEST.value]
|
||||
if not isinstance(restored_test_data, dict):
|
||||
raise RuntimeError(
|
||||
f"Expected entry with key {DataScope.TEST.value} data to be a dictionary, "
|
||||
f"but got {restored_test_data=}.",
|
||||
)
|
||||
test_data = LoggedCollectStats.from_data_dict(restored_test_data)
|
||||
|
||||
if test_data.returns_stat is None:
|
||||
continue
|
||||
test_episode_returns.append(test_data.returns_stat.mean)
|
||||
env_step_at_test = test_data.env_step
|
||||
|
||||
if not test_episode_returns or env_step_at_test is None:
|
||||
raise ValueError(f"No experiment data found in {exp_dir}.")
|
||||
|
||||
return cls(
|
||||
test_episode_returns_RE=np.array(test_episode_returns),
|
||||
env_steps_E=np.array(env_step_at_test),
|
||||
exp_dir=exp_dir,
|
||||
)
|
||||
|
||||
def _get_rliable_data(
|
||||
self,
|
||||
algo_name: str | None = None,
|
||||
score_thresholds: np.ndarray | None = None,
|
||||
) -> tuple[dict, np.ndarray, np.ndarray]:
|
||||
"""Return the data in the format expected by the rliable library.
|
||||
|
||||
:param algo_name: The name of the algorithm to be shown in the figure legend. If None, the name of the algorithm
|
||||
is set to the experiment dir.
|
||||
:param score_thresholds: The score thresholds for the performance profile. If None, the thresholds are inferred
|
||||
from the minimum and maximum test episode returns.
|
||||
|
||||
:return: A tuple score_dict, env_steps, and score_thresholds.
|
||||
"""
|
||||
if score_thresholds is None:
|
||||
score_thresholds = np.linspace(
|
||||
np.min(self.test_episode_returns_RE),
|
||||
np.max(self.test_episode_returns_RE),
|
||||
101,
|
||||
)
|
||||
|
||||
if algo_name is None:
|
||||
algo_name = os.path.basename(self.exp_dir)
|
||||
|
||||
score_dict = {algo_name: self.test_episode_returns_RE}
|
||||
|
||||
return score_dict, self.env_steps_E, score_thresholds
|
||||
|
||||
def eval_results(
|
||||
self,
|
||||
algo_name: str | None = None,
|
||||
score_thresholds: np.ndarray | None = None,
|
||||
save_plots: bool = False,
|
||||
show_plots: bool = True,
|
||||
) -> tuple[plt.Figure, plt.Axes, plt.Figure, plt.Axes]:
|
||||
"""Evaluate the results of an experiment and create a sample efficiency curve and a performance profile.
|
||||
|
||||
:param algo_name: The name of the algorithm to be shown in the figure legend. If None, the name of the algorithm
|
||||
is set to the experiment dir.
|
||||
:param score_thresholds: The score thresholds for the performance profile. If None, the thresholds are inferred
|
||||
from the minimum and maximum test episode returns.
|
||||
:param save_plots: If True, the figures are saved to the experiment directory.
|
||||
:param show_plots: If True, the figures are shown.
|
||||
|
||||
:return: The created figures and axes.
|
||||
"""
|
||||
score_dict, env_steps, score_thresholds = self._get_rliable_data(
|
||||
algo_name,
|
||||
score_thresholds,
|
||||
)
|
||||
|
||||
iqm = lambda scores: sst.trim_mean(scores, proportiontocut=0.25, axis=0)
|
||||
iqm_scores, iqm_cis = rly.get_interval_estimates(score_dict, iqm)
|
||||
|
||||
# Plot IQM sample efficiency curve
|
||||
fig_iqm, ax_iqm = plt.subplots(ncols=1, figsize=(7, 5), constrained_layout=True)
|
||||
plot_utils.plot_sample_efficiency_curve(
|
||||
env_steps,
|
||||
iqm_scores,
|
||||
iqm_cis,
|
||||
algorithms=None,
|
||||
xlabel="env step",
|
||||
ylabel="IQM episode return",
|
||||
ax=ax_iqm,
|
||||
)
|
||||
if show_plots:
|
||||
plt.show(block=False)
|
||||
|
||||
if save_plots:
|
||||
iqm_sample_efficiency_curve_path = os.path.abspath(
|
||||
os.path.join(
|
||||
self.exp_dir,
|
||||
"iqm_sample_efficiency_curve.png",
|
||||
),
|
||||
)
|
||||
log.info(f"Saving iqm sample efficiency curve to {iqm_sample_efficiency_curve_path}.")
|
||||
fig_iqm.savefig(iqm_sample_efficiency_curve_path)
|
||||
|
||||
final_score_dict = {algo: returns[:, [-1]] for algo, returns in score_dict.items()}
|
||||
score_distributions, score_distributions_cis = rly.create_performance_profile(
|
||||
final_score_dict,
|
||||
score_thresholds,
|
||||
)
|
||||
|
||||
# Plot score distributions
|
||||
fig_profile, ax_profile = plt.subplots(ncols=1, figsize=(7, 5), constrained_layout=True)
|
||||
plot_utils.plot_performance_profiles(
|
||||
score_distributions,
|
||||
score_thresholds,
|
||||
performance_profile_cis=score_distributions_cis,
|
||||
xlabel=r"Episode return $(\tau)$",
|
||||
ax=ax_profile,
|
||||
)
|
||||
|
||||
if save_plots:
|
||||
profile_curve_path = os.path.abspath(
|
||||
os.path.join(self.exp_dir, "performance_profile.png"),
|
||||
)
|
||||
log.info(f"Saving performance profile curve to {profile_curve_path}.")
|
||||
fig_profile.savefig(profile_curve_path)
|
||||
if show_plots:
|
||||
plt.show(block=False)
|
||||
|
||||
return fig_iqm, ax_iqm, fig_profile, ax_profile
|
@ -53,6 +53,9 @@ class SamplingConfig(ToStringMixin):
|
||||
num_train_envs: int = -1
|
||||
"""the number of training environments to use. If set to -1, use number of CPUs/threads."""
|
||||
|
||||
train_seed: int = 42
|
||||
"""the seed to use for the training environments."""
|
||||
|
||||
num_test_envs: int = 1
|
||||
"""the number of test environments to use"""
|
||||
|
||||
@ -127,6 +130,10 @@ class SamplingConfig(ToStringMixin):
|
||||
temporal aspects (e.g. velocities of moving objects for which only positions are observed).
|
||||
"""
|
||||
|
||||
@property
|
||||
def test_seed(self) -> int:
|
||||
return self.train_seed + self.num_train_envs
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.num_train_envs == -1:
|
||||
self.num_train_envs = multiprocessing.cpu_count()
|
||||
|
@ -412,7 +412,8 @@ class EnvFactoryRegistered(EnvFactory):
|
||||
self,
|
||||
*,
|
||||
task: str,
|
||||
seed: int,
|
||||
train_seed: int,
|
||||
test_seed: int,
|
||||
venv_type: VectorEnvType,
|
||||
envpool_factory: EnvPoolFactory | None = None,
|
||||
render_mode_train: str | None = None,
|
||||
@ -434,7 +435,8 @@ class EnvFactoryRegistered(EnvFactory):
|
||||
super().__init__(venv_type)
|
||||
self.task = task
|
||||
self.envpool_factory = envpool_factory
|
||||
self.seed = seed
|
||||
self.train_seed = train_seed
|
||||
self.test_seed = test_seed
|
||||
self.render_modes = {
|
||||
EnvMode.TRAIN: render_mode_train,
|
||||
EnvMode.TEST: render_mode_test,
|
||||
@ -462,15 +464,16 @@ class EnvFactoryRegistered(EnvFactory):
|
||||
return gymnasium.make(self.task, **kwargs)
|
||||
|
||||
def create_venv(self, num_envs: int, mode: EnvMode) -> BaseVectorEnv:
|
||||
seed = self.train_seed if mode == EnvMode.TRAIN else self.test_seed
|
||||
if self.envpool_factory is not None:
|
||||
return self.envpool_factory.create_venv(
|
||||
self.task,
|
||||
num_envs,
|
||||
mode,
|
||||
self.seed,
|
||||
seed,
|
||||
self._create_kwargs(mode),
|
||||
)
|
||||
else:
|
||||
venv = super().create_venv(num_envs, mode)
|
||||
venv.seed(self.seed)
|
||||
venv.seed(seed)
|
||||
return venv
|
||||
|
@ -1,10 +1,12 @@
|
||||
import os
|
||||
import pickle
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from collections.abc import Iterator, Sequence
|
||||
from contextlib import contextmanager
|
||||
from copy import copy
|
||||
from dataclasses import dataclass
|
||||
from pprint import pformat
|
||||
from typing import Self
|
||||
from typing import Literal, Self
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@ -144,6 +146,7 @@ class Experiment(ToStringMixin):
|
||||
agent_factory: AgentFactory,
|
||||
sampling_config: SamplingConfig,
|
||||
logger_factory: LoggerFactory | None = None,
|
||||
name: str | Literal["DATETIME_TAG"] = "DATETIME_TAG",
|
||||
):
|
||||
if logger_factory is None:
|
||||
logger_factory = LoggerFactoryDefault()
|
||||
@ -152,6 +155,22 @@ class Experiment(ToStringMixin):
|
||||
self.env_factory = env_factory
|
||||
self.agent_factory = agent_factory
|
||||
self.logger_factory = logger_factory
|
||||
if name == "DATETIME_TAG":
|
||||
name = datetime_tag()
|
||||
self.name = name
|
||||
|
||||
def get_seeding_info_as_str(self) -> str:
|
||||
"""Useful for creating unique experiment names based on seeds.
|
||||
|
||||
A typical example is to do `experiment.name = f"{experiment.name}_{experiment.get_seeding_info_as_str()}"`.
|
||||
"""
|
||||
return "_".join(
|
||||
[
|
||||
f"exp_seed={self.config.seed}",
|
||||
f"train_seed={self.sampling_config.train_seed}",
|
||||
f"test_seed={self.sampling_config.test_seed}",
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_directory(cls, directory: str, restore_policy: bool = True) -> "Experiment":
|
||||
@ -186,35 +205,42 @@ class Experiment(ToStringMixin):
|
||||
|
||||
def run(
|
||||
self,
|
||||
experiment_name: str | None = None,
|
||||
override_experiment_name: str | Literal["DATETIME_TAG"] | None = None,
|
||||
logger_run_id: str | None = None,
|
||||
raise_error_on_dirname_collision: bool = True,
|
||||
) -> ExperimentResult:
|
||||
"""Run the experiment and return the results.
|
||||
|
||||
:param experiment_name: the experiment name, which corresponds to the directory (within the logging
|
||||
:param override_experiment_name: if not None, will adjust the current instance's `name` name attribute.
|
||||
The name corresponds to the directory (within the logging
|
||||
directory) where all results associated with the experiment will be saved.
|
||||
The name may contain path separators (i.e. `os.path.sep`, as used by `os.path.join`), in which case
|
||||
a nested directory structure will be created.
|
||||
If None, use a name containing the current date and time.
|
||||
If "DATETIME_TAG" is passed, use a name containing the current date and time. This option
|
||||
is useful for preventing file-name collisions if a single experiment is executed repeatedly.
|
||||
:param logger_run_id: Run identifier to use for logger initialization/resumption (applies when
|
||||
using wandb, in particular).
|
||||
:param raise_error_on_dirname_collision: set to `False` e.g., when continuing a previously executed
|
||||
experiment with the same name.
|
||||
:return:
|
||||
"""
|
||||
if experiment_name is None:
|
||||
experiment_name = datetime_tag()
|
||||
if override_experiment_name is not None:
|
||||
if override_experiment_name == "DATETIME_TAG":
|
||||
override_experiment_name = datetime_tag()
|
||||
self.name = override_experiment_name
|
||||
|
||||
# initialize persistence directory
|
||||
use_persistence = self.config.persistence_enabled
|
||||
persistence_dir = os.path.join(self.config.persistence_base_dir, experiment_name)
|
||||
persistence_dir = os.path.join(self.config.persistence_base_dir, self.name)
|
||||
if use_persistence:
|
||||
os.makedirs(persistence_dir, exist_ok=True)
|
||||
os.makedirs(persistence_dir, exist_ok=not raise_error_on_dirname_collision)
|
||||
|
||||
with logging.FileLoggerContext(
|
||||
os.path.join(persistence_dir, self.LOG_FILENAME),
|
||||
enabled=use_persistence and self.config.log_file_enabled,
|
||||
):
|
||||
# log initial information
|
||||
log.info(f"Running experiment (name='{experiment_name}'):\n{self.pprints()}")
|
||||
log.info(f"Running experiment (name='{self.name}'):\n{self.pprints()}")
|
||||
log.info(f"Working directory: {os.getcwd()}")
|
||||
|
||||
self._set_seed()
|
||||
@ -245,7 +271,7 @@ class Experiment(ToStringMixin):
|
||||
if use_persistence:
|
||||
logger = self.logger_factory.create_logger(
|
||||
log_dir=persistence_dir,
|
||||
experiment_name=experiment_name,
|
||||
experiment_name=self.name,
|
||||
run_id=logger_run_id,
|
||||
config_dict=full_config,
|
||||
)
|
||||
@ -338,6 +364,32 @@ class ExperimentBuilder:
|
||||
self._optim_factory: OptimizerFactory | None = None
|
||||
self._policy_wrapper_factory: PolicyWrapperFactory | None = None
|
||||
self._trainer_callbacks: TrainerCallbacks = TrainerCallbacks()
|
||||
self._experiment_name: str = ""
|
||||
|
||||
@contextmanager
|
||||
def temp_config_mutation(self) -> Iterator[Self]:
|
||||
"""Returns the builder instance where the configs can be modified without affecting the current instance."""
|
||||
original_sampling_config = copy(self.sampling_config)
|
||||
original_experiment_config = copy(self.experiment_config)
|
||||
yield self
|
||||
self.sampling_config = original_sampling_config
|
||||
self.experiment_config = original_experiment_config
|
||||
|
||||
@property
|
||||
def experiment_config(self) -> ExperimentConfig:
|
||||
return self._config
|
||||
|
||||
@experiment_config.setter
|
||||
def experiment_config(self, experiment_config: ExperimentConfig) -> None:
|
||||
self._config = experiment_config
|
||||
|
||||
@property
|
||||
def sampling_config(self) -> SamplingConfig:
|
||||
return self._sampling_config
|
||||
|
||||
@sampling_config.setter
|
||||
def sampling_config(self, sampling_config: SamplingConfig) -> None:
|
||||
self._sampling_config = sampling_config
|
||||
|
||||
def with_logger_factory(self, logger_factory: LoggerFactory) -> Self:
|
||||
"""Allows to customize the logger factory to use.
|
||||
@ -415,6 +467,20 @@ class ExperimentBuilder:
|
||||
self._trainer_callbacks.epoch_stop_callback = callback
|
||||
return self
|
||||
|
||||
def with_experiment_name(
|
||||
self,
|
||||
experiment_name: str | Literal["DATETIME_TAG"] = "DATETIME_TAG",
|
||||
) -> Self:
|
||||
"""Sets the name of the experiment.
|
||||
|
||||
:param experiment_name: the name. If "DATETIME_TAG" (default) is given, the current date and time will be used.
|
||||
:return: the builder
|
||||
"""
|
||||
if experiment_name == "DATETIME_TAG":
|
||||
experiment_name = datetime_tag()
|
||||
self._experiment_name = experiment_name
|
||||
return self
|
||||
|
||||
@abstractmethod
|
||||
def _create_agent_factory(self) -> AgentFactory:
|
||||
pass
|
||||
@ -425,9 +491,12 @@ class ExperimentBuilder:
|
||||
else:
|
||||
return self._optim_factory
|
||||
|
||||
def build(self) -> Experiment:
|
||||
def build(self, add_seeding_info_to_name: bool = False) -> Experiment:
|
||||
"""Creates the experiment based on the options specified via this builder.
|
||||
|
||||
:param add_seeding_info_to_name: whether to add a postfix to the experiment name that contains
|
||||
info about the training seeds. Useful for creating multiple experiments that only differ
|
||||
by seeds.
|
||||
:return: the experiment
|
||||
"""
|
||||
agent_factory = self._create_agent_factory()
|
||||
@ -440,9 +509,30 @@ class ExperimentBuilder:
|
||||
agent_factory,
|
||||
self._sampling_config,
|
||||
self._logger_factory,
|
||||
name=self._experiment_name,
|
||||
)
|
||||
if add_seeding_info_to_name:
|
||||
if not experiment.name:
|
||||
experiment.name = experiment.get_seeding_info_as_str()
|
||||
else:
|
||||
experiment.name = f"{experiment.name}_{experiment.get_seeding_info_as_str()}"
|
||||
return experiment
|
||||
|
||||
def build_default_seeded_experiments(self, num_experiments: int) -> list[Experiment]:
|
||||
"""Creates a list of experiments with non-overlapping seeds, starting from the configured seed.
|
||||
|
||||
Each experiment will have a unique name that is created from the original experiment name and the seeds used.
|
||||
"""
|
||||
num_train_envs = self.sampling_config.num_train_envs
|
||||
|
||||
seeded_experiments = []
|
||||
for i in range(num_experiments):
|
||||
with self.temp_config_mutation():
|
||||
self.experiment_config.seed += i
|
||||
self.sampling_config.train_seed += i * num_train_envs
|
||||
seeded_experiments.append(self.build(add_seeding_info_to_name=True))
|
||||
return seeded_experiments
|
||||
|
||||
|
||||
class _BuilderMixinActorFactory(ActorFutureProviderProtocol):
|
||||
def __init__(self, continuous_actor_type: ContinuousActorType):
|
||||
|
@ -32,7 +32,7 @@ class LoggerFactory(ToStringMixin, ABC):
|
||||
class LoggerFactoryDefault(LoggerFactory):
|
||||
def __init__(
|
||||
self,
|
||||
logger_type: Literal["tensorboard", "wandb"] = "tensorboard",
|
||||
logger_type: Literal["tensorboard", "wandb", "pandas"] = "tensorboard",
|
||||
wandb_project: str | None = None,
|
||||
):
|
||||
if logger_type == "wandb" and wandb_project is None:
|
||||
@ -47,17 +47,18 @@ class LoggerFactoryDefault(LoggerFactory):
|
||||
run_id: str | None,
|
||||
config_dict: dict,
|
||||
) -> TLogger:
|
||||
writer = SummaryWriter(log_dir)
|
||||
writer.add_text(
|
||||
"args",
|
||||
str(
|
||||
dict(
|
||||
log_dir=log_dir,
|
||||
logger_type=self.logger_type,
|
||||
wandb_project=self.wandb_project,
|
||||
if self.logger_type in ["wandb", "tensorboard"]:
|
||||
writer = SummaryWriter(log_dir)
|
||||
writer.add_text(
|
||||
"args",
|
||||
str(
|
||||
dict(
|
||||
log_dir=log_dir,
|
||||
logger_type=self.logger_type,
|
||||
wandb_project=self.wandb_project,
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
match self.logger_type:
|
||||
case "wandb":
|
||||
wandb_logger = WandbLogger(
|
||||
|
@ -24,7 +24,7 @@ class World:
|
||||
trainer: Optional["BaseTrainer"] = None
|
||||
|
||||
def persist_path(self, filename: str) -> str:
|
||||
return os.path.join(self.persist_directory, filename)
|
||||
return os.path.abspath(os.path.join(self.persist_directory, filename))
|
||||
|
||||
def restore_path(self, filename: str) -> str:
|
||||
if self.restore_directory is None:
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Utils package."""
|
||||
|
||||
from tianshou.utils.logger.base import BaseLogger, LazyLogger
|
||||
from tianshou.utils.logger.tensorboard import BasicLogger, TensorboardLogger
|
||||
from tianshou.utils.logger.tensorboard import TensorboardLogger
|
||||
from tianshou.utils.logger.wandb import WandbLogger
|
||||
from tianshou.utils.lr_scheduler import MultipleLRSchedulers
|
||||
from tianshou.utils.progress_bar import DummyTqdm, tqdm_config
|
||||
@ -15,7 +15,6 @@ __all__ = [
|
||||
"DummyTqdm",
|
||||
"BaseLogger",
|
||||
"TensorboardLogger",
|
||||
"BasicLogger",
|
||||
"LazyLogger",
|
||||
"WandbLogger",
|
||||
"deprecation",
|
||||
|
@ -3,14 +3,14 @@ from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from enum import Enum
|
||||
from numbers import Number
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
VALID_LOG_VALS_TYPE = int | Number | np.number | np.ndarray | float
|
||||
VALID_LOG_VALS = typing.get_args(
|
||||
VALID_LOG_VALS_TYPE,
|
||||
) # I know it's stupid, but we can't use Union type in isinstance
|
||||
# It's unfortunate, but we can't use Union type in isinstance, hence we resort to this
|
||||
VALID_LOG_VALS = typing.get_args(VALID_LOG_VALS_TYPE)
|
||||
|
||||
TRestoredData = dict[str, np.ndarray | dict[str, "TRestoredData"]]
|
||||
|
||||
|
||||
class DataScope(Enum):
|
||||
@ -21,15 +21,7 @@ class DataScope(Enum):
|
||||
|
||||
|
||||
class BaseLogger(ABC):
|
||||
"""The base class for any logger which is compatible with trainer.
|
||||
|
||||
Try to overwrite write() method to use your own writer.
|
||||
|
||||
:param train_interval: the log interval in log_train_data(). Default to 1000.
|
||||
:param test_interval: the log interval in log_test_data(). Default to 1.
|
||||
:param update_interval: the log interval in log_update_data(). Default to 1000.
|
||||
:param info_interval: the log interval in log_info_data(). Default to 1.
|
||||
"""
|
||||
"""The base class for any logger which is compatible with trainer."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -37,12 +29,20 @@ class BaseLogger(ABC):
|
||||
test_interval: int = 1,
|
||||
update_interval: int = 1000,
|
||||
info_interval: int = 1,
|
||||
exclude_arrays: bool = True,
|
||||
) -> None:
|
||||
""":param train_interval: the log interval in log_train_data(). Default to 1000.
|
||||
:param test_interval: the log interval in log_test_data(). Default to 1.
|
||||
:param update_interval: the log interval in log_update_data(). Default to 1000.
|
||||
:param info_interval: the log interval in log_info_data(). Default to 1.
|
||||
:param exclude_arrays: whether to exclude numpy arrays from the logger's output
|
||||
"""
|
||||
super().__init__()
|
||||
self.train_interval = train_interval
|
||||
self.test_interval = test_interval
|
||||
self.update_interval = update_interval
|
||||
self.info_interval = info_interval
|
||||
self.exclude_arrays = exclude_arrays
|
||||
self.last_log_train_step = -1
|
||||
self.last_log_test_step = -1
|
||||
self.last_log_update_step = -1
|
||||
@ -57,46 +57,15 @@ class BaseLogger(ABC):
|
||||
:param data: the data to write with format ``{key: value}``.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def prepare_dict_for_logging(
|
||||
input_dict: dict[str, Any],
|
||||
parent_key: str = "",
|
||||
delimiter: str = "/",
|
||||
exclude_arrays: bool = True,
|
||||
) -> dict[str, VALID_LOG_VALS_TYPE]:
|
||||
"""Flattens and filters a nested dictionary by recursively traversing all levels and compressing the keys.
|
||||
@abstractmethod
|
||||
def prepare_dict_for_logging(self, log_data: dict) -> dict[str, VALID_LOG_VALS_TYPE]:
|
||||
"""Prepare the dict for logging by filtering out invalid data types.
|
||||
|
||||
Filtering is performed with respect to valid logging data types.
|
||||
If necessary, reformulate the dict to be compatible with the writer.
|
||||
|
||||
:param input_dict: The nested dictionary to be flattened and filtered.
|
||||
:param parent_key: The parent key used as a prefix before the input_dict keys.
|
||||
:param delimiter: The delimiter used to separate the keys.
|
||||
:param exclude_arrays: Whether to exclude numpy arrays from the output.
|
||||
:return: A flattened dictionary where the keys are compressed and values are filtered.
|
||||
:param log_data: the dict to be prepared for logging.
|
||||
:return: the prepared dict.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
def add_to_result(
|
||||
cur_dict: dict,
|
||||
prefix: str = "",
|
||||
) -> None:
|
||||
for key, value in cur_dict.items():
|
||||
if exclude_arrays and isinstance(value, np.ndarray):
|
||||
continue
|
||||
|
||||
new_key = prefix + delimiter + key
|
||||
new_key = new_key.lstrip(delimiter)
|
||||
|
||||
if isinstance(value, dict):
|
||||
add_to_result(
|
||||
value,
|
||||
new_key,
|
||||
)
|
||||
elif isinstance(value, VALID_LOG_VALS):
|
||||
result[new_key] = value
|
||||
|
||||
add_to_result(input_dict, prefix=parent_key)
|
||||
return result
|
||||
|
||||
def log_train_data(self, log_data: dict, step: int) -> None:
|
||||
"""Use writer to log statistics generated during training.
|
||||
@ -106,8 +75,8 @@ class BaseLogger(ABC):
|
||||
"""
|
||||
# TODO: move interval check to calling method
|
||||
if step - self.last_log_train_step >= self.train_interval:
|
||||
log_data = self.prepare_dict_for_logging(log_data, parent_key=DataScope.TRAIN.value)
|
||||
self.write("train/env_step", step, log_data)
|
||||
log_data = self.prepare_dict_for_logging(log_data)
|
||||
self.write(f"{DataScope.TRAIN.value}/env_step", step, log_data)
|
||||
self.last_log_train_step = step
|
||||
|
||||
def log_test_data(self, log_data: dict, step: int) -> None:
|
||||
@ -118,8 +87,8 @@ class BaseLogger(ABC):
|
||||
"""
|
||||
# TODO: move interval check to calling method (stupid because log_test_data is only called from function in utils.py, not from BaseTrainer)
|
||||
if step - self.last_log_test_step >= self.test_interval:
|
||||
log_data = self.prepare_dict_for_logging(log_data, parent_key=DataScope.TEST.value)
|
||||
self.write(DataScope.TEST.value + "/env_step", step, log_data)
|
||||
log_data = self.prepare_dict_for_logging(log_data)
|
||||
self.write(f"{DataScope.TEST.value}/env_step", step, log_data)
|
||||
self.last_log_test_step = step
|
||||
|
||||
def log_update_data(self, log_data: dict, step: int) -> None:
|
||||
@ -130,8 +99,8 @@ class BaseLogger(ABC):
|
||||
"""
|
||||
# TODO: move interval check to calling method
|
||||
if step - self.last_log_update_step >= self.update_interval:
|
||||
log_data = self.prepare_dict_for_logging(log_data, parent_key=DataScope.UPDATE.value)
|
||||
self.write(DataScope.UPDATE.value + "/gradient_step", step, log_data)
|
||||
log_data = self.prepare_dict_for_logging(log_data)
|
||||
self.write(f"{DataScope.UPDATE.value}/gradient_step", step, log_data)
|
||||
self.last_log_update_step = step
|
||||
|
||||
def log_info_data(self, log_data: dict, step: int) -> None:
|
||||
@ -143,8 +112,8 @@ class BaseLogger(ABC):
|
||||
if (
|
||||
step - self.last_log_info_step >= self.info_interval
|
||||
): # TODO: move interval check to calling method
|
||||
log_data = self.prepare_dict_for_logging(log_data, parent_key=DataScope.INFO.value)
|
||||
self.write(DataScope.INFO.value + "/epoch", step, log_data)
|
||||
log_data = self.prepare_dict_for_logging(log_data)
|
||||
self.write(f"{DataScope.INFO.value}/epoch", step, log_data)
|
||||
self.last_log_info_step = step
|
||||
|
||||
@abstractmethod
|
||||
@ -166,7 +135,7 @@ class BaseLogger(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def restore_data(self) -> tuple[int, int, int]:
|
||||
"""Return the metadata from existing log.
|
||||
"""Restore internal data if present and return the metadata from existing log for continuation of training.
|
||||
|
||||
If it finds nothing or an error occurs during the recover process, it will
|
||||
return the default parameters.
|
||||
@ -174,6 +143,16 @@ class BaseLogger(ABC):
|
||||
:return: epoch, env_step, gradient_step.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def restore_logged_data(
|
||||
self,
|
||||
log_path: str,
|
||||
) -> TRestoredData:
|
||||
"""Load the logged data from disk for post-processing.
|
||||
|
||||
:return: a dict containing the logged data.
|
||||
"""
|
||||
|
||||
|
||||
class LazyLogger(BaseLogger):
|
||||
"""A logger that does nothing. Used as the placeholder in trainer."""
|
||||
@ -181,6 +160,12 @@ class LazyLogger(BaseLogger):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def prepare_dict_for_logging(
|
||||
self,
|
||||
data: dict[str, VALID_LOG_VALS_TYPE],
|
||||
) -> dict[str, VALID_LOG_VALS_TYPE]:
|
||||
return data
|
||||
|
||||
def write(self, step_type: str, step: int, data: dict[str, VALID_LOG_VALS_TYPE]) -> None:
|
||||
"""The LazyLogger writes nothing."""
|
||||
|
||||
@ -195,3 +180,6 @@ class LazyLogger(BaseLogger):
|
||||
|
||||
def restore_data(self) -> tuple[int, int, int]:
|
||||
return 0, 0, 0
|
||||
|
||||
def restore_logged_data(self, log_path: str) -> dict:
|
||||
return {}
|
||||
|
@ -1,11 +1,17 @@
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
from matplotlib.figure import Figure
|
||||
from tensorboard.backend.event_processing import event_accumulator
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
from tianshou.utils.logger.base import VALID_LOG_VALS_TYPE, BaseLogger
|
||||
from tianshou.utils.warning import deprecation
|
||||
from tianshou.utils.logger.base import (
|
||||
VALID_LOG_VALS,
|
||||
VALID_LOG_VALS_TYPE,
|
||||
BaseLogger,
|
||||
TRestoredData,
|
||||
)
|
||||
|
||||
|
||||
class TensorboardLogger(BaseLogger):
|
||||
@ -38,9 +44,58 @@ class TensorboardLogger(BaseLogger):
|
||||
self.last_save_step = -1
|
||||
self.writer = writer
|
||||
|
||||
def write(self, step_type: str, step: int, data: dict[str, VALID_LOG_VALS_TYPE]) -> None:
|
||||
def prepare_dict_for_logging(
|
||||
self,
|
||||
input_dict: dict[str, Any],
|
||||
parent_key: str = "",
|
||||
delimiter: str = "/",
|
||||
exclude_arrays: bool = True,
|
||||
) -> dict[str, VALID_LOG_VALS_TYPE]:
|
||||
"""Flattens and filters a nested dictionary by recursively traversing all levels and compressing the keys.
|
||||
|
||||
Filtering is performed with respect to valid logging data types.
|
||||
|
||||
:param input_dict: The nested dictionary to be flattened and filtered.
|
||||
:param parent_key: The parent key used as a prefix before the input_dict keys.
|
||||
:param delimiter: The delimiter used to separate the keys.
|
||||
:param exclude_arrays: Whether to exclude numpy arrays from the output.
|
||||
:return: A flattened dictionary where the keys are compressed and values are filtered.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
def add_to_result(
|
||||
cur_dict: dict,
|
||||
prefix: str = "",
|
||||
) -> None:
|
||||
for key, value in cur_dict.items():
|
||||
if exclude_arrays and isinstance(value, np.ndarray):
|
||||
continue
|
||||
|
||||
new_key = prefix + delimiter + key
|
||||
new_key = new_key.lstrip(delimiter)
|
||||
|
||||
if isinstance(value, dict):
|
||||
add_to_result(
|
||||
value,
|
||||
new_key,
|
||||
)
|
||||
elif isinstance(value, VALID_LOG_VALS):
|
||||
result[new_key] = value
|
||||
|
||||
add_to_result(input_dict, prefix=parent_key)
|
||||
return result
|
||||
|
||||
def write(self, step_type: str, step: int, data: dict[str, Any]) -> None:
|
||||
scope, step_name = step_type.split("/")
|
||||
self.writer.add_scalar(step_type, step, global_step=step)
|
||||
for k, v in data.items():
|
||||
self.writer.add_scalar(k, v, global_step=step)
|
||||
scope_key = f"{scope}/{k}"
|
||||
if isinstance(v, np.ndarray):
|
||||
self.writer.add_histogram(scope_key, v, global_step=step, bins="auto")
|
||||
elif isinstance(v, Figure):
|
||||
self.writer.add_figure(scope_key, v, global_step=step)
|
||||
else:
|
||||
self.writer.add_scalar(scope_key, v, global_step=step)
|
||||
if self.write_flush: # issue 580
|
||||
self.writer.flush() # issue #482
|
||||
|
||||
@ -81,16 +136,56 @@ class TensorboardLogger(BaseLogger):
|
||||
|
||||
return epoch, env_step, gradient_step
|
||||
|
||||
def restore_logged_data(
|
||||
self,
|
||||
log_path: str,
|
||||
) -> TRestoredData:
|
||||
"""Restores the logged data from the tensorboard log directory.
|
||||
|
||||
class BasicLogger(TensorboardLogger):
|
||||
"""BasicLogger has changed its name to TensorboardLogger in #427.
|
||||
The result is a nested dictionary where the keys are the tensorboard keys
|
||||
and the values are the corresponding numpy arrays. The keys in each level
|
||||
form a nested structure, where the hierarchy is represented by the slashes
|
||||
in the tensorboard key-strings.
|
||||
"""
|
||||
ea = event_accumulator.EventAccumulator(log_path)
|
||||
ea.Reload()
|
||||
|
||||
This class is for compatibility.
|
||||
"""
|
||||
def add_value_to_innermost_nested_dict(
|
||||
data_dict: dict[str, Any],
|
||||
key_string: str,
|
||||
value: Any,
|
||||
) -> None:
|
||||
"""A particular logic, walking through the keys in the
|
||||
`key_string` and adding the value to the `data_dict` in a nested manner,
|
||||
creating nested dictionaries on the fly if necessary, or updating existing ones.
|
||||
The value is added only to the innermost-nested dictionary.
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
deprecation(
|
||||
"Class BasicLogger is marked as deprecated and will be removed soon. "
|
||||
"Please use TensorboardLogger instead.",
|
||||
)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
Example:
|
||||
-------
|
||||
>>> data_dict = {}
|
||||
>>> add_value_to_innermost_nested_dict(data_dict, "a/b/c", 1)
|
||||
>>> data_dict
|
||||
{"a": {"b": {"c": 1}}}
|
||||
"""
|
||||
keys = key_string.split("/")
|
||||
|
||||
cur_nested_dict = data_dict
|
||||
# walk through the intermediate keys to reach the innermost-nested dict,
|
||||
# creating nested dictionaries on the fly if necessary
|
||||
for k in keys[:-1]:
|
||||
cur_nested_dict = cur_nested_dict.setdefault(k, {})
|
||||
# After the loop above,
|
||||
# this is the innermost-nested dict, where the value is finally set
|
||||
# for the last key in the key_string
|
||||
cur_nested_dict[keys[-1]] = value
|
||||
|
||||
restored_data: dict[str, np.ndarray | dict] = {}
|
||||
for key_string in ea.scalars.Keys():
|
||||
add_value_to_innermost_nested_dict(
|
||||
restored_data,
|
||||
key_string,
|
||||
np.array([s.value for s in ea.scalars.Items(key_string)]),
|
||||
)
|
||||
|
||||
return restored_data
|
||||
|
@ -6,7 +6,7 @@ from collections.abc import Callable
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
from tianshou.utils import BaseLogger, TensorboardLogger
|
||||
from tianshou.utils.logger.base import VALID_LOG_VALS_TYPE
|
||||
from tianshou.utils.logger.base import VALID_LOG_VALS_TYPE, TRestoredData
|
||||
|
||||
with contextlib.suppress(ImportError):
|
||||
import wandb
|
||||
@ -79,8 +79,18 @@ class WandbLogger(BaseLogger):
|
||||
if not wandb.run
|
||||
else wandb.run
|
||||
)
|
||||
# TODO: don't access private attribute!
|
||||
self.wandb_run._label(repo="tianshou") # type: ignore
|
||||
self.tensorboard_logger: TensorboardLogger | None = None
|
||||
self.writer: SummaryWriter | None = None
|
||||
|
||||
def prepare_dict_for_logging(self, log_data: dict) -> dict[str, VALID_LOG_VALS_TYPE]:
|
||||
if self.tensorboard_logger is None:
|
||||
raise Exception(
|
||||
"`logger` needs to load the Tensorboard Writer before "
|
||||
"preparing data for logging. Try `logger.load(SummaryWriter(log_path))`",
|
||||
)
|
||||
return self.tensorboard_logger.prepare_dict_for_logging(log_data)
|
||||
|
||||
def load(self, writer: SummaryWriter) -> None:
|
||||
self.writer = writer
|
||||
@ -95,7 +105,7 @@ class WandbLogger(BaseLogger):
|
||||
|
||||
def write(self, step_type: str, step: int, data: dict[str, VALID_LOG_VALS_TYPE]) -> None:
|
||||
if self.tensorboard_logger is None:
|
||||
raise Exception(
|
||||
raise RuntimeError(
|
||||
"`logger` needs to load the Tensorboard Writer before "
|
||||
"writing data. Try `logger.load(SummaryWriter(log_path))`",
|
||||
)
|
||||
@ -156,3 +166,12 @@ class WandbLogger(BaseLogger):
|
||||
except KeyError:
|
||||
env_step = 0
|
||||
return epoch, env_step, gradient_step
|
||||
|
||||
def restore_logged_data(self, log_path: str) -> TRestoredData:
|
||||
if self.tensorboard_logger is None:
|
||||
raise NotImplementedError(
|
||||
"Restoring logged data directly from W&B is not yet implemented."
|
||||
"Try instantiating the internal TensorboardLogger by calling something"
|
||||
"like `logger.load(SummaryWriter(log_path))`",
|
||||
)
|
||||
return self.tensorboard_logger.restore_logged_data(log_path)
|
||||
|
Loading…
x
Reference in New Issue
Block a user