Skip to content

utils

Distillation utilities shared across families/methods/entrypoints.

Modules

fastvideo.train.utils.builder

Assembly: build method + dataloader from a _target_-based config.

Classes

Functions

fastvideo.train.utils.builder.build_from_config
build_from_config(cfg: RunConfig) -> tuple[TrainingConfig, TrainingMethod, Any, int]

Build method + dataloader from a v3 run config.

  1. Instantiate each model in cfg.models via _target_.
  2. Resolve the method class from cfg.method["_target_"] and construct it with (cfg=cfg, role_models=...).
  3. Return (training_args, method, dataloader, start_step).
Source code in fastvideo/train/utils/builder.py
def build_from_config(cfg: RunConfig, ) -> tuple[TrainingConfig, TrainingMethod, Any, int]:
    """Build method + dataloader from a v3 run config.

    1. Instantiate each model in ``cfg.models`` via ``_target_``.
    2. Resolve the method class from ``cfg.method["_target_"]``
       and construct it with ``(cfg=cfg, role_models=...)``.
    3. Return ``(training_args, method, dataloader, start_step)``.
    """
    from fastvideo.train.models.base import ModelBase

    # --- 1. Build role model instances ---
    role_models: dict[str, ModelBase] = {}
    for role, model_cfg in cfg.models.items():
        model = instantiate(model_cfg, training_config=cfg.training)
        if not isinstance(model, ModelBase):
            raise TypeError(f"models.{role}._target_ must resolve to a "
                            f"ModelBase subclass, got {type(model).__name__}")
        role_models[role] = model

    # --- 2. Build method ---
    method_cfg = dict(cfg.method)
    method_target = str(method_cfg.pop("_target_"))
    method_cls = resolve_target(method_target)

    # The student model provides the dataloader.
    student = role_models.get("student")

    method = method_cls(
        cfg=cfg,
        role_models=role_models,
    )

    # --- 3. Gather dataloader and start_step ---
    dataloader = (getattr(student, "dataloader", None) if student is not None else None)
    start_step = int(getattr(student, "start_step", 0) if student is not None else 0)

    return cfg.training, method, dataloader, start_step

fastvideo.train.utils.checkpoint

Classes

fastvideo.train.utils.checkpoint.CheckpointManager
CheckpointManager(*, method: Any, dataloader: Any, output_dir: str, config: CheckpointConfig, callbacks: Any | None = None, raw_config: dict[str, Any] | None = None)

Role-based checkpoint manager for training runtime.

  • Checkpoint policy lives in YAML (via TrainingArgs fields).
  • Resume path is typically provided via CLI (--resume-from-checkpoint).
Source code in fastvideo/train/utils/checkpoint.py
def __init__(
    self,
    *,
    method: Any,
    dataloader: Any,
    output_dir: str,
    config: CheckpointConfig,
    callbacks: Any | None = None,
    raw_config: dict[str, Any] | None = None,
) -> None:
    self.method = method
    self.dataloader = dataloader
    self.output_dir = str(output_dir)
    self.config = config
    self._callbacks = callbacks
    self._raw_config = raw_config
    self._last_saved_step: int | None = None
Functions
fastvideo.train.utils.checkpoint.CheckpointManager.load_metadata staticmethod
load_metadata(checkpoint_dir: str | Path) -> dict[str, Any]

Read metadata.json from a checkpoint dir.

Source code in fastvideo/train/utils/checkpoint.py
@staticmethod
def load_metadata(checkpoint_dir: str | Path, ) -> dict[str, Any]:
    """Read ``metadata.json`` from a checkpoint dir."""
    meta_path = Path(checkpoint_dir) / "metadata.json"
    if not meta_path.is_file():
        raise FileNotFoundError(f"No metadata.json in {checkpoint_dir}")
    with open(meta_path, encoding="utf-8") as f:
        return json.load(f)  # type: ignore[no-any-return]
fastvideo.train.utils.checkpoint.CheckpointManager.load_rng_snapshot
load_rng_snapshot(checkpoint_path: str) -> None

Restore per-rank RNG state from the snapshot file.

Must be called AFTER dcp.load and after iter(dataloader) so no later operation can clobber the restored state.

Source code in fastvideo/train/utils/checkpoint.py
def load_rng_snapshot(
    self,
    checkpoint_path: str,
) -> None:
    """Restore per-rank RNG state from the snapshot file.

    Must be called AFTER ``dcp.load`` **and** after
    ``iter(dataloader)`` so no later operation can
    clobber the restored state.
    """
    resolved = _resolve_resume_checkpoint(
        checkpoint_path,
        output_dir=self.output_dir,
    )
    if resolved is None:
        return
    rank = _rank()
    rng_path = resolved / f"rng_state_rank{rank}.pt"
    if not rng_path.is_file():
        # Fall back to legacy single-file snapshot.
        rng_path = resolved / "rng_state.pt"
    if not rng_path.is_file():
        logger.warning(
            "No rng_state in %s; skipping "
            "RNG snapshot restore.",
            resolved,
        )
        return

    rng = torch.load(
        rng_path,
        map_location="cpu",
        weights_only=False,
    )
    if "torch_rng" in rng:
        torch.set_rng_state(rng["torch_rng"])
    if "python_rng" in rng:
        random.setstate(rng["python_rng"])
    if "numpy_rng" in rng:
        np.random.set_state(rng["numpy_rng"])

    torch.cuda.set_rng_state(rng["cuda_rng"])
    self.method.cuda_generator.set_state(rng["gen_cuda"])
    logger.info(
        "Restored RNG snapshot from %s",
        rng_path,
    )

Functions

fastvideo.train.utils.config

Training run config (_target_ based YAML).

Classes

fastvideo.train.utils.config.RunConfig dataclass
RunConfig(models: dict[str, dict[str, Any]], method: dict[str, Any], training: TrainingConfig, callbacks: dict[str, dict[str, Any]], raw: dict[str, Any])

Parsed run config loaded from YAML.

Functions
fastvideo.train.utils.config.RunConfig.resolved_config
resolved_config() -> dict[str, Any]

Return a fully-resolved config dict with defaults.

Suitable for logging to W&B so that every parameter (including defaults) is visible.

Source code in fastvideo/train/utils/config.py
def resolved_config(self) -> dict[str, Any]:
    """Return a fully-resolved config dict with defaults.

    Suitable for logging to W&B so that every parameter
    (including defaults) is visible.
    """
    import dataclasses

    def _safe_asdict(obj: Any) -> Any:
        if dataclasses.is_dataclass(obj) and not isinstance(obj, type):
            return {
                f.name: _safe_asdict(getattr(obj, f.name))
                for f in dataclasses.fields(obj) if not callable(getattr(obj, f.name))
            }
        if isinstance(obj, dict):
            return {k: _safe_asdict(v) for k, v in obj.items()}
        if isinstance(obj, list | tuple):
            return type(obj)(_safe_asdict(v) for v in obj)
        return obj

    resolved: dict[str, Any] = {}
    resolved["models"] = dict(self.models)
    resolved["method"] = dict(self.method)
    resolved["training"] = _safe_asdict(self.training)
    resolved["callbacks"] = dict(self.callbacks)
    return resolved

Functions

fastvideo.train.utils.config.load_run_config
load_run_config(path: str, overrides: list[str] | None = None) -> RunConfig

Load a run config from YAML.

Expected top-level keys: models, method, training (nested), and optionally callbacks and pipeline.

Parameters:

Name Type Description Default
path str

Path to the YAML config file.

required
overrides list[str] | None

Optional list of CLI override tokens, e.g. ["--training.distributed.num_gpus", "4"]. Dotted keys map to nested YAML paths.

None
Source code in fastvideo/train/utils/config.py
def load_run_config(
    path: str,
    overrides: list[str] | None = None,
) -> RunConfig:
    """Load a run config from YAML.

    Expected top-level keys: ``models``, ``method``,
    ``training`` (nested), and optionally ``callbacks``
    and ``pipeline``.

    Args:
        path: Path to the YAML config file.
        overrides: Optional list of CLI override tokens,
            e.g. ``["--training.distributed.num_gpus", "4"]``.
            Dotted keys map to nested YAML paths.
    """
    path = _resolve_existing_file(path)
    with open(path, encoding="utf-8") as f:
        raw = yaml.safe_load(f)
    cfg = _require_mapping(raw, where=path)

    # Apply CLI overrides before building typed config.
    if overrides:
        parsed = _parse_cli_overrides(overrides)
        _apply_overrides(cfg, parsed)
        logger.info("Applied CLI overrides: %s", parsed)

    # --- models ---
    models_raw = _require_mapping(cfg.get("models"), where="models")
    models: dict[str, dict[str, Any]] = {}
    for role, model_cfg_raw in models_raw.items():
        role_str = _require_str(role, where="models.<role>")
        model_cfg = _require_mapping(model_cfg_raw, where=f"models.{role_str}")
        if "_target_" not in model_cfg:
            raise ValueError(f"models.{role_str} must have a "
                             "'_target_' key")
        models[role_str] = dict(model_cfg)

    # --- method ---
    method_raw = _require_mapping(cfg.get("method"), where="method")
    if "_target_" not in method_raw:
        raise ValueError("method must have a '_target_' key")
    method = dict(method_raw)

    # --- callbacks ---
    callbacks_raw = cfg.get("callbacks", None)
    if callbacks_raw is None:
        callbacks: dict[str, dict[str, Any]] = {}
    else:
        callbacks = _require_mapping(callbacks_raw, where="callbacks")

    # --- pipeline config ---
    pipeline_config = _parse_pipeline_config(cfg, models=models)

    # --- training config ---
    training_raw = _require_mapping(cfg.get("training"), where="training")
    t = dict(training_raw)
    training = _build_training_config(t, models=models, pipeline_config=pipeline_config)

    return RunConfig(
        models=models,
        method=method,
        training=training,
        callbacks=callbacks,
        raw=cfg,
    )
fastvideo.train.utils.config.require_bool
require_bool(mapping: dict[str, Any], key: str, *, default: bool | None = None, where: str | None = None) -> bool

Read a bool value.

Source code in fastvideo/train/utils/config.py
def require_bool(
    mapping: dict[str, Any],
    key: str,
    *,
    default: bool | None = None,
    where: str | None = None,
) -> bool:
    """Read a bool value."""
    loc = where or key
    raw = mapping.get(key)
    if raw is None:
        if default is not None:
            return default
        raise ValueError(f"Missing required key {loc!r}")
    if not isinstance(raw, bool):
        raise ValueError(f"{loc} must be a bool, "
                         f"got {type(raw).__name__}")
    return raw
fastvideo.train.utils.config.require_choice
require_choice(mapping: dict[str, Any], key: str, choices: set[str] | frozenset[str], *, default: str | None = None, where: str | None = None) -> str

Read a string that must be one of choices.

Source code in fastvideo/train/utils/config.py
def require_choice(
    mapping: dict[str, Any],
    key: str,
    choices: set[str] | frozenset[str],
    *,
    default: str | None = None,
    where: str | None = None,
) -> str:
    """Read a string that must be one of *choices*."""
    loc = where or key
    raw = mapping.get(key)
    if raw is None:
        if default is not None:
            if default not in choices:
                raise ValueError(f"Default {default!r} not in {choices}")
            return default
        raise ValueError(f"Missing required key {loc!r}")
    if not isinstance(raw, str) or not raw.strip():
        raise ValueError(f"{loc} must be a non-empty string, "
                         f"got {type(raw).__name__}")
    val = raw.strip().lower()
    if val not in choices:
        raise ValueError(f"{loc} must be one of {sorted(choices)}, "
                         f"got {raw!r}")
    return val
fastvideo.train.utils.config.require_non_negative_float
require_non_negative_float(mapping: dict[str, Any], key: str, *, default: float | None = None, where: str | None = None) -> float

Read a float that must be >= 0.

Source code in fastvideo/train/utils/config.py
def require_non_negative_float(
    mapping: dict[str, Any],
    key: str,
    *,
    default: float | None = None,
    where: str | None = None,
) -> float:
    """Read a float that must be >= 0."""
    loc = where or key
    raw = mapping.get(key)
    if raw is None:
        if default is not None:
            return default
        raise ValueError(f"Missing required key {loc!r}")
    val = get_optional_float(mapping, key, where=loc)
    if val is None or val < 0.0:
        raise ValueError(f"{loc} must be a non-negative float, "
                         f"got {raw!r}")
    return val
fastvideo.train.utils.config.require_non_negative_int
require_non_negative_int(mapping: dict[str, Any], key: str, *, default: int | None = None, where: str | None = None) -> int

Read an int that must be >= 0.

Source code in fastvideo/train/utils/config.py
def require_non_negative_int(
    mapping: dict[str, Any],
    key: str,
    *,
    default: int | None = None,
    where: str | None = None,
) -> int:
    """Read an int that must be >= 0."""
    loc = where or key
    raw = mapping.get(key)
    if raw is None:
        if default is not None:
            return default
        raise ValueError(f"Missing required key {loc!r}")
    val = get_optional_int(mapping, key, where=loc)
    if val is None or val < 0:
        raise ValueError(f"{loc} must be a non-negative integer, "
                         f"got {raw!r}")
    return val
fastvideo.train.utils.config.require_positive_int
require_positive_int(mapping: dict[str, Any], key: str, *, default: int | None = None, where: str | None = None) -> int

Read an int that must be > 0.

Source code in fastvideo/train/utils/config.py
def require_positive_int(
    mapping: dict[str, Any],
    key: str,
    *,
    default: int | None = None,
    where: str | None = None,
) -> int:
    """Read an int that must be > 0."""
    loc = where or key
    raw = mapping.get(key)
    if raw is None:
        if default is not None:
            return default
        raise ValueError(f"Missing required key {loc!r}")
    val = get_optional_int(mapping, key, where=loc)
    if val is None or val <= 0:
        raise ValueError(f"{loc} must be a positive integer, got {raw!r}")
    return val

fastvideo.train.utils.dataloader

Functions

fastvideo.train.utils.dataloader.build_parquet_t2v_train_dataloader
build_parquet_t2v_train_dataloader(data_config: DataConfig, *, text_len: int, parquet_schema: Any) -> Any

Build a parquet dataloader for T2V-style datasets.

Source code in fastvideo/train/utils/dataloader.py
def build_parquet_t2v_train_dataloader(
    data_config: DataConfig,
    *,
    text_len: int,
    parquet_schema: Any,
) -> Any:
    """Build a parquet dataloader for T2V-style datasets."""

    from fastvideo.dataset import (
        build_parquet_map_style_dataloader, )

    _dataset, dataloader = (build_parquet_map_style_dataloader(
        data_config.data_path,
        data_config.train_batch_size,
        num_data_workers=(data_config.dataloader_num_workers),
        parquet_schema=parquet_schema,
        cfg_rate=data_config.training_cfg_rate,
        drop_last=True,
        text_padding_length=int(text_len),
        seed=int(data_config.seed or 0),
    ))
    return dataloader

fastvideo.train.utils.instantiate

_target_-based instantiation utilities.

These helpers resolve a dotted Python path to a class and instantiate it, filtering constructor kwargs through inspect.signature so that only recognized parameters are forwarded. Unrecognized keys emit a warning rather than raising — this keeps YAML configs forward-compatible when a class drops a parameter in a later version.

Functions

fastvideo.train.utils.instantiate.instantiate
instantiate(cfg: dict[str, Any], **extra: Any) -> Any

Instantiate the class specified by cfg["_target_"].

All remaining keys in cfg (minus _target_) plus any extra keyword arguments are forwarded to the constructor. Keys that do not match an __init__ parameter are silently warned about and dropped, so callers can safely pass a superset.

Source code in fastvideo/train/utils/instantiate.py
def instantiate(cfg: dict[str, Any], **extra: Any) -> Any:
    """Instantiate the class specified by ``cfg["_target_"]``.

    All remaining keys in *cfg* (minus ``_target_``) plus any *extra*
    keyword arguments are forwarded to the constructor.  Keys that do
    not match an ``__init__`` parameter are silently warned about and
    dropped, so callers can safely pass a superset.
    """
    if not isinstance(cfg, dict):
        raise TypeError(f"instantiate() expects a dict with '_target_', "
                        f"got {type(cfg).__name__}")
    target_str = cfg.get("_target_")
    if target_str is None:
        raise KeyError("Config dict is missing '_target_' key")

    cls = resolve_target(str(target_str))
    kwargs: dict[str, Any] = {k: v for k, v in cfg.items() if k != "_target_"}
    kwargs.update(extra)

    sig = inspect.signature(cls.__init__)  # type: ignore[misc]
    params = sig.parameters
    has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values())

    if not has_var_keyword:
        valid_names = {
            name
            for name, p in params.items() if p.kind in (
                inspect.Parameter.POSITIONAL_OR_KEYWORD,
                inspect.Parameter.KEYWORD_ONLY,
            )
        }
        valid_names.discard("self")
        unrecognized = set(kwargs) - valid_names
        if unrecognized:
            warnings.warn(
                f"instantiate({target_str}): dropping unrecognized "
                f"kwargs {sorted(unrecognized)}",
                stacklevel=2,
            )
            for key in unrecognized:
                del kwargs[key]

    return cls(**kwargs)
fastvideo.train.utils.instantiate.resolve_target
resolve_target(target: str) -> type

Import and return the class (or callable) at target.

target must be a fully-qualified dotted path, e.g. "fastvideo.train.models.wan.wan.WanModel".

Source code in fastvideo/train/utils/instantiate.py
def resolve_target(target: str) -> type:
    """Import and return the class (or callable) at *target*.

    *target* must be a fully-qualified dotted path, e.g.
    ``"fastvideo.train.models.wan.wan.WanModel"``.
    """
    if not isinstance(target, str) or not target.strip():
        raise ValueError(f"_target_ must be a non-empty dotted path string, "
                         f"got {target!r}")
    target = target.strip()
    parts = target.rsplit(".", 1)
    if len(parts) != 2:
        raise ValueError(f"_target_ must contain at least one dot "
                         f"(module.ClassName), got {target!r}")
    module_path, attr_name = parts
    try:
        module = importlib.import_module(module_path)
    except ModuleNotFoundError as exc:
        raise ImportError(f"Cannot import module {module_path!r} "
                          f"(from _target_={target!r})") from exc
    try:
        cls = getattr(module, attr_name)
    except AttributeError as exc:
        raise ImportError(f"Module {module_path!r} has no attribute "
                          f"{attr_name!r} (from _target_={target!r})") from exc
    return cls

fastvideo.train.utils.module_state

Functions

fastvideo.train.utils.module_state.apply_trainable
apply_trainable(module: Module, *, trainable: bool) -> Module

Apply train/eval mode + requires_grad based on a role's trainable flag.

Source code in fastvideo/train/utils/module_state.py
def apply_trainable(module: torch.nn.Module, *, trainable: bool) -> torch.nn.Module:
    """Apply train/eval mode + requires_grad based on a role's trainable flag."""

    module.requires_grad_(bool(trainable))
    if trainable:
        module.train()
    else:
        module.eval()
    return module

fastvideo.train.utils.moduleloader

Classes

Functions

fastvideo.train.utils.moduleloader.load_module_from_path
load_module_from_path(*, model_path: str, module_type: str, training_config: TrainingConfig, disable_custom_init_weights: bool = False, override_transformer_cls_name: str | None = None, transformer_override_safetensor: str | None = None) -> Module

Load a single pipeline component module.

Accepts a TrainingConfig and internally builds the TrainingArgs needed by PipelineComponentLoader.

Source code in fastvideo/train/utils/moduleloader.py
def load_module_from_path(
    *,
    model_path: str,
    module_type: str,
    training_config: TrainingConfig,
    disable_custom_init_weights: bool = False,
    override_transformer_cls_name: str | None = None,
    transformer_override_safetensor: str | None = None,
) -> torch.nn.Module:
    """Load a single pipeline component module.

    Accepts a ``TrainingConfig`` and internally builds the
    ``TrainingArgs`` needed by ``PipelineComponentLoader``.
    """
    fastvideo_args: Any = _make_training_args(training_config, model_path=model_path)

    local_model_path = maybe_download_model(model_path)
    config = verify_model_config_and_directory(local_model_path)

    if module_type not in config:
        raise ValueError(f"Module {module_type!r} not found in "
                         f"config at {local_model_path}")

    module_info = config[module_type]
    if module_info is None:
        raise ValueError(f"Module {module_type!r} has null value in "
                         f"config at {local_model_path}")

    transformers_or_diffusers, _architecture = module_info
    component_path = os.path.join(local_model_path, module_type)

    old_override: str | None = None
    if override_transformer_cls_name is not None:
        old_override = getattr(
            fastvideo_args,
            "override_transformer_cls_name",
            None,
        )
        fastvideo_args.override_transformer_cls_name = str(override_transformer_cls_name)

    if transformer_override_safetensor:
        fastvideo_args.init_weights_from_safetensors = str(transformer_override_safetensor)

    if disable_custom_init_weights:
        fastvideo_args._loading_teacher_critic_model = True
    try:
        module = PipelineComponentLoader.load_module(
            module_name=module_type,
            component_model_path=component_path,
            transformers_or_diffusers=(transformers_or_diffusers),
            fastvideo_args=fastvideo_args,
        )
    finally:
        if disable_custom_init_weights and hasattr(fastvideo_args, "_loading_teacher_critic_model"):
            del fastvideo_args._loading_teacher_critic_model
        if override_transformer_cls_name is not None:
            if old_override is None:
                if hasattr(
                        fastvideo_args,
                        "override_transformer_cls_name",
                ):
                    fastvideo_args.override_transformer_cls_name = (None)
            else:
                fastvideo_args.override_transformer_cls_name = (old_override)

    if not isinstance(module, torch.nn.Module):
        raise TypeError(f"Loaded {module_type!r} is not a "
                        f"torch.nn.Module: {type(module)}")
    return module
fastvideo.train.utils.moduleloader.make_inference_args
make_inference_args(tc: TrainingConfig, *, model_path: str) -> TrainingArgs

Build a TrainingArgs for inference (validation / pipelines).

Source code in fastvideo/train/utils/moduleloader.py
def make_inference_args(
    tc: TrainingConfig,
    *,
    model_path: str,
) -> TrainingArgs:
    """Build a TrainingArgs for inference (validation / pipelines)."""
    args = _make_training_args(tc, model_path=model_path)
    args.inference_mode = True
    args.mode = ExecutionMode.INFERENCE
    args.dit_cpu_offload = True
    args.VSA_sparsity = tc.vsa_sparsity
    return args

fastvideo.train.utils.optimizer

Functions

fastvideo.train.utils.optimizer.build_optimizer_and_scheduler
build_optimizer_and_scheduler(*, params: list[Parameter], optimizer_config: OptimizerConfig, loop_config: TrainingLoopConfig, learning_rate: float, betas: tuple[float, float], scheduler_name: str) -> tuple[Optimizer, object]

Build an AdamW optimizer and LR scheduler.

Returns (optimizer, lr_scheduler) so the caller can store them as method-level attributes.

Source code in fastvideo/train/utils/optimizer.py
def build_optimizer_and_scheduler(
    *,
    params: list[torch.nn.Parameter],
    optimizer_config: OptimizerConfig,
    loop_config: TrainingLoopConfig,
    learning_rate: float,
    betas: tuple[float, float],
    scheduler_name: str,
) -> tuple[torch.optim.Optimizer, object]:
    """Build an AdamW optimizer and LR scheduler.

    Returns ``(optimizer, lr_scheduler)`` so the caller can store them
    as method-level attributes.
    """
    if not params:
        raise ValueError("No trainable parameters passed to "
                         "build_optimizer_and_scheduler")

    optimizer = torch.optim.AdamW(
        params,
        lr=float(learning_rate),
        betas=betas,
        weight_decay=float(optimizer_config.weight_decay),
        eps=1e-8,
    )

    scheduler = get_scheduler(
        str(scheduler_name),
        optimizer=optimizer,
        num_warmup_steps=int(optimizer_config.lr_warmup_steps),
        num_training_steps=int(loop_config.max_train_steps),
        num_cycles=int(optimizer_config.lr_num_cycles),
        power=float(optimizer_config.lr_power),
        min_lr_ratio=float(optimizer_config.min_lr_ratio),
        last_epoch=-1,
    )

    return optimizer, scheduler

fastvideo.train.utils.tracking

Functions

fastvideo.train.utils.tracking.build_tracker
build_tracker(tracker_config: TrackerConfig, checkpoint_config: CheckpointConfig, *, config: dict[str, Any] | None) -> Any

Build a tracker instance for a distillation run.

Source code in fastvideo/train/utils/tracking.py
def build_tracker(
    tracker_config: TrackerConfig,
    checkpoint_config: CheckpointConfig,
    *,
    config: dict[str, Any] | None,
) -> Any:
    """Build a tracker instance for a distillation run."""

    world_group = get_world_group()

    trackers = list(tracker_config.trackers)
    if not trackers and str(tracker_config.project_name):
        trackers.append(Trackers.WANDB.value)
    if world_group.rank != 0:
        trackers = []

    tracker_log_dir = (checkpoint_config.output_dir or os.getcwd())
    if trackers:
        tracker_log_dir = os.path.join(tracker_log_dir, "tracker")

    tracker_config_dict = config if trackers else None
    tracker_run_name = tracker_config.run_name or None
    project = (tracker_config.project_name or "fastvideo")

    return initialize_trackers(
        trackers,
        experiment_name=project,
        config=tracker_config_dict,
        log_dir=tracker_log_dir,
        run_name=tracker_run_name,
    )

fastvideo.train.utils.training_config

Typed training config — replaces TrainingArgs.

Classes