Skip to content

entrypoints

Modules

fastvideo.entrypoints.cli

Modules

fastvideo.entrypoints.cli.bench

Runs benchmark against a running FastVideo OpenAI-compatible server.

Example usage

fastvideo bench --dataset vbench --num-prompts 20 --port 8000

Classes
fastvideo.entrypoints.cli.bench.BenchSubcommand
BenchSubcommand()

Bases: CLISubcommand

The bench subcommand — runs serving benchmarks.

Source code in fastvideo/entrypoints/cli/bench.py
def __init__(self) -> None:
    self.name = "bench"
    super().__init__()
Functions
fastvideo.entrypoints.cli.bench_serving

Benchmark online serving for diffusion models (Image/Video Generation).

Example usage
launch a server and benchmark on it
T2V or T2I or any other multimodal generation model

fastvideo serve --model-path Wan-AI/Wan2.1-T2V-1.3B-Diffusers --port 8000

benchmark it and make sure the port is the same as the server's port

fastvideo bench --dataset vbench --num-prompts 20 --port 8000

Classes
fastvideo.entrypoints.cli.bench_serving.VBenchDataset
VBenchDataset(args, api_url: str, model: str)

Bases: BaseDataset

Dataset loader for VBench prompts. Supports t2v, i2v.

Source code in fastvideo/entrypoints/cli/bench_serving.py
def __init__(self, args, api_url: str, model: str):
    super().__init__(args, api_url, model)
    self.cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "fastvideo")
    self.items = self._load_data()
Functions
fastvideo.entrypoints.cli.cli_types
Classes
fastvideo.entrypoints.cli.cli_types.CLISubcommand

Base class for CLI subcommands

Functions
fastvideo.entrypoints.cli.cli_types.CLISubcommand.cmd
cmd(args: Namespace) -> None

Execute the command with the given arguments

Source code in fastvideo/entrypoints/cli/cli_types.py
def cmd(self, args: argparse.Namespace) -> None:
    """Execute the command with the given arguments"""
    raise NotImplementedError
fastvideo.entrypoints.cli.cli_types.CLISubcommand.subparser_init
subparser_init(subparsers: _SubParsersAction) -> FlexibleArgumentParser

Initialize the subparser for this command

Source code in fastvideo/entrypoints/cli/cli_types.py
def subparser_init(self, subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser:
    """Initialize the subparser for this command"""
    raise NotImplementedError
fastvideo.entrypoints.cli.cli_types.CLISubcommand.validate
validate(args: Namespace) -> None

Validate the arguments for this command

Source code in fastvideo/entrypoints/cli/cli_types.py
def validate(self, args: argparse.Namespace) -> None:
    """Validate the arguments for this command"""
    pass
fastvideo.entrypoints.cli.generate
Classes
fastvideo.entrypoints.cli.generate.GenerateSubcommand
GenerateSubcommand()

Bases: CLISubcommand

The generate subcommand for the FastVideo CLI

Source code in fastvideo/entrypoints/cli/generate.py
def __init__(self) -> None:
    self.name = "generate"
    super().__init__()
    self.init_arg_names = self._get_init_arg_names()
    self.generation_arg_names = self._get_generation_arg_names()
Functions
fastvideo.entrypoints.cli.generate.GenerateSubcommand.validate
validate(args: Namespace) -> None

Validate the arguments for this command

Source code in fastvideo/entrypoints/cli/generate.py
def validate(self, args: argparse.Namespace) -> None:
    """Validate the arguments for this command"""
    if args.num_gpus is not None and args.num_gpus <= 0:
        raise ValueError("Number of gpus must be positive")

    if args.config and not os.path.exists(args.config):
        raise ValueError(f"Config file not found: {args.config}")
Functions
fastvideo.entrypoints.cli.main
Classes
Functions
fastvideo.entrypoints.cli.main.cmd_init
cmd_init() -> list[CLISubcommand]

Initialize all commands from separate modules

Source code in fastvideo/entrypoints/cli/main.py
def cmd_init() -> list[CLISubcommand]:
    """Initialize all commands from separate modules"""
    commands = []
    commands.extend(generate_cmd_init())
    commands.extend(serve_cmd_init())
    commands.extend(bench_cmd_init())
    return commands
fastvideo.entrypoints.cli.serve
Classes
fastvideo.entrypoints.cli.serve.ServeSubcommand
ServeSubcommand()

Bases: CLISubcommand

Starts an OpenAI-compatible API server.

Source code in fastvideo/entrypoints/cli/serve.py
def __init__(self) -> None:
    self.name = "serve"
    super().__init__()
Functions
fastvideo.entrypoints.cli.utils
Functions
fastvideo.entrypoints.cli.utils.launch_distributed
launch_distributed(num_gpus: int, args: list[str], master_port: int | None = None) -> int

Launch a distributed job with the given arguments

Parameters:

Name Type Description Default
num_gpus int

Number of GPUs to use

required
args list[str]

Arguments to pass to v1_fastvideo_inference.py (defaults to sys.argv[1:])

required
master_port int | None

Port for the master process (default: random)

None
Source code in fastvideo/entrypoints/cli/utils.py
def launch_distributed(num_gpus: int, args: list[str], master_port: int | None = None) -> int:
    """
    Launch a distributed job with the given arguments

    Args:
        num_gpus: Number of GPUs to use
        args: Arguments to pass to v1_fastvideo_inference.py (defaults to sys.argv[1:])
        master_port: Port for the master process (default: random)
    """

    current_env = os.environ.copy()
    python_executable = sys.executable
    project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../.."))
    main_script = os.path.join(project_root, "fastvideo/sample/v1_fastvideo_inference.py")

    cmd = [python_executable, "-m", "torch.distributed.run", f"--nproc_per_node={num_gpus}"]

    if master_port is not None:
        cmd.append(f"--master_port={master_port}")

    cmd.append(main_script)
    cmd.extend(args)

    logger.info("Running inference with %d GPU(s)", num_gpus)
    logger.info("Launching command: %s", " ".join(cmd))

    current_env["PYTHONIOENCODING"] = "utf-8"
    process = subprocess.Popen(cmd,
                               env=current_env,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT,
                               universal_newlines=True,
                               bufsize=1,
                               encoding='utf-8',
                               errors='replace')

    if process.stdout:
        for line in iter(process.stdout.readline, ''):
            print(line.strip())

    return process.wait()

fastvideo.entrypoints.openai

Modules

fastvideo.entrypoints.openai.api_server
Classes
Functions
fastvideo.entrypoints.openai.api_server.create_app
create_app(fastvideo_args: FastVideoArgs, output_dir: str = DEFAULT_OUTPUT_DIR) -> FastAPI

Build the FastAPI application with all routers mounted

Source code in fastvideo/entrypoints/openai/api_server.py
def create_app(
    fastvideo_args: FastVideoArgs,
    output_dir: str = DEFAULT_OUTPUT_DIR,
) -> FastAPI:
    """Build the FastAPI application with all routers mounted"""

    app = FastAPI(
        title="FastVideo OpenAI-Compatible API",
        version="0.1.0",
        lifespan=lifespan,
    )
    app.state.fastvideo_args = fastvideo_args
    app.state.output_dir = output_dir

    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )

    # Import and mount routers
    from fastvideo.entrypoints.openai.common_api import router as common_router
    from fastvideo.entrypoints.openai.image_api import router as image_router
    from fastvideo.entrypoints.openai.video_api import router as video_router

    app.include_router(common_router)
    app.include_router(video_router)
    app.include_router(image_router)

    @app.get("/health")
    async def health():
        return {"status": "ok"}

    return app
fastvideo.entrypoints.openai.api_server.lifespan async
lifespan(app: FastAPI) -> AsyncIterator[None]

Load model on startup, clean up on shutdown

Source code in fastvideo/entrypoints/openai/api_server.py
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncIterator[None]:
    """Load model on startup, clean up on shutdown"""
    args: FastVideoArgs = app.state.fastvideo_args
    output_dir: str = app.state.output_dir

    logger.info("Loading model from %s ...", args.model_path)
    generator = VideoGenerator.from_fastvideo_args(args)
    logger.info("Model loaded successfully.")

    set_state(generator, args, output_dir)

    yield  # server is running

    logger.info("Shutting down — releasing model resources ...")
    generator.shutdown()
    clear_state()
    logger.info("Shutdown complete.")
fastvideo.entrypoints.openai.api_server.run_server
run_server(fastvideo_args: FastVideoArgs, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, output_dir: str = DEFAULT_OUTPUT_DIR)

Create the app and run it with uvicorn

Source code in fastvideo/entrypoints/openai/api_server.py
def run_server(
    fastvideo_args: FastVideoArgs,
    host: str = DEFAULT_HOST,
    port: int = DEFAULT_PORT,
    output_dir: str = DEFAULT_OUTPUT_DIR,
):
    """Create the app and run it with uvicorn"""
    app = create_app(fastvideo_args, output_dir=output_dir)

    logger.info("Starting FastVideo server on %s:%d", host, port)
    logger.info("Model: %s", fastvideo_args.model_path)

    uvicorn.run(
        app,
        host=host,
        port=port,
        log_level="info",
        timeout_keep_alive=300,
    )
fastvideo.entrypoints.openai.common_api
Classes
fastvideo.entrypoints.openai.common_api.ModelCard

Bases: BaseModel

OpenAI-compatible model card

Functions
fastvideo.entrypoints.openai.common_api.available_models async
available_models()

Show available models

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/models", response_class=ORJSONResponse)
async def available_models():
    """Show available models"""
    args = get_server_args()
    card = ModelCard(id=args.model_path, root=args.model_path)
    return {"object": "list", "data": [card.model_dump()]}
fastvideo.entrypoints.openai.common_api.model_info async
model_info()

Get basic model information

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/model_info")
async def model_info():
    """Get basic model information"""
    args = get_server_args()
    return {"model_path": args.model_path}
fastvideo.entrypoints.openai.common_api.retrieve_model async
retrieve_model(model: str)

Retrieve a model by name

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/models/{model:path}", response_class=ORJSONResponse)
async def retrieve_model(model: str):
    """Retrieve a model by name"""
    args = get_server_args()
    if model != args.model_path:
        return ORJSONResponse(
            status_code=404,
            content={
                "error": {
                    "message": f"The model '{model}' does not exist",
                    "type": "invalid_request_error",
                    "param": "model",
                    "code": "model_not_found",
                }
            },
        )
    card = ModelCard(id=model, root=model)
    return card.model_dump()
fastvideo.entrypoints.openai.image_api
Functions
fastvideo.entrypoints.openai.protocol
Functions
fastvideo.entrypoints.openai.protocol.generate_request_id
generate_request_id() -> str

Generate a unique request ID

Source code in fastvideo/entrypoints/openai/protocol.py
def generate_request_id() -> str:
    """Generate a unique request ID"""
    return uuid.uuid4().hex
fastvideo.entrypoints.openai.state

Global server state shared across API modules.

Keeping state in a dedicated module prevents the classic 'main vs package module' duplication that occurs when api_server.py is run with python -m. All modules that need the generator or server args should import from here.

Classes
Functions
fastvideo.entrypoints.openai.state.clear_state
clear_state() -> None

Clear server state on shutdown.

Source code in fastvideo/entrypoints/openai/state.py
def clear_state() -> None:
    """Clear server state on shutdown."""
    global _generator, _fastvideo_args
    _generator = None
    _fastvideo_args = None
fastvideo.entrypoints.openai.state.get_generator
get_generator() -> VideoGenerator

Return the global VideoGenerator instance (set during startup).

Source code in fastvideo/entrypoints/openai/state.py
def get_generator() -> VideoGenerator:
    """Return the global VideoGenerator instance (set during startup)."""
    assert _generator is not None, "Server not initialized — generator is None"
    return _generator
fastvideo.entrypoints.openai.state.get_output_dir
get_output_dir() -> str

Return the configured output directory.

Source code in fastvideo/entrypoints/openai/state.py
def get_output_dir() -> str:
    """Return the configured output directory."""
    return _output_dir
fastvideo.entrypoints.openai.state.get_server_args
get_server_args() -> FastVideoArgs

Return the global FastVideoArgs (set during startup).

Source code in fastvideo/entrypoints/openai/state.py
def get_server_args() -> FastVideoArgs:
    """Return the global FastVideoArgs (set during startup)."""
    assert _fastvideo_args is not None, "Server not initialized — args is None"
    return _fastvideo_args
fastvideo.entrypoints.openai.state.set_state
set_state(generator: VideoGenerator, fastvideo_args: FastVideoArgs, output_dir: str) -> None

Set all server state at once (called from lifespan).

Source code in fastvideo/entrypoints/openai/state.py
def set_state(
    generator: VideoGenerator,
    fastvideo_args: FastVideoArgs,
    output_dir: str,
) -> None:
    """Set all server state at once (called from lifespan)."""
    global _generator, _fastvideo_args, _output_dir
    _generator = generator
    _fastvideo_args = fastvideo_args
    _output_dir = output_dir
fastvideo.entrypoints.openai.stores
Classes
fastvideo.entrypoints.openai.stores.AsyncDictStore
AsyncDictStore()

A small async-safe in-memory key-value store for dict items.

This encapsulates the usual pattern of a module-level dict guarded by an asyncio.Lock and provides simple CRUD methods that are safe to call concurrently from FastAPI request handlers and background tasks.

Source code in fastvideo/entrypoints/openai/stores.py
def __init__(self) -> None:
    self._items: dict[str, dict[str, Any]] = {}
    self._lock = asyncio.Lock()
fastvideo.entrypoints.openai.utils
Functions
fastvideo.entrypoints.openai.utils.choose_image_ext
choose_image_ext(output_format: str | None, background: str | None) -> str

Pick a file extension for image outputs

Source code in fastvideo/entrypoints/openai/utils.py
def choose_image_ext(output_format: str | None, background: str | None) -> str:
    """Pick a file extension for image outputs"""
    fmt = (output_format or "").lower()
    if fmt in {"png", "webp", "jpeg", "jpg"}:
        return "jpg" if fmt == "jpeg" else fmt
    if (background or "auto").lower() == "transparent":
        return "png"
    return "jpg"
fastvideo.entrypoints.openai.utils.merge_image_input_list
merge_image_input_list(*inputs: list | Any | None) -> list

Merge multiple image input sources into a single flat list

Source code in fastvideo/entrypoints/openai/utils.py
def merge_image_input_list(*inputs: list | Any | None) -> list:
    """Merge multiple image input sources into a single flat list"""
    result = []
    for input_item in inputs:
        if input_item is not None:
            if isinstance(input_item, list):
                result.extend(input_item)
            else:
                result.append(input_item)
    return result
fastvideo.entrypoints.openai.utils.parse_size
parse_size(size: str) -> tuple[int, int] | tuple[None, None]

Parse a 'WIDTHxHEIGHT' string into (width, height)

Source code in fastvideo/entrypoints/openai/utils.py
def parse_size(size: str) -> tuple[int, int] | tuple[None, None]:
    """Parse a 'WIDTHxHEIGHT' string into (width, height)"""
    try:
        parts = size.lower().replace(" ", "").split("x")
        if len(parts) != 2:
            raise ValueError
        w, h = int(parts[0]), int(parts[1])
        return w, h
    except Exception:
        return None, None
fastvideo.entrypoints.openai.utils.save_image_to_path async
save_image_to_path(image: UploadFile | str, target_path: str) -> str

Save an uploaded file or download from URL to target_path

Source code in fastvideo/entrypoints/openai/utils.py
async def save_image_to_path(image: UploadFile | str, target_path: str) -> str:
    """Save an uploaded file or download from URL to *target_path*"""
    input_path = await _maybe_url_image(image, target_path)
    if input_path is None:
        input_path = await _save_upload_to_path(image, target_path)
    return input_path
fastvideo.entrypoints.openai.video_api
Functions

fastvideo.entrypoints.streaming_generator

Classes

fastvideo.entrypoints.streaming_generator.StreamingVideoGenerator
StreamingVideoGenerator(fastvideo_args: FastVideoArgs, executor_class: type[Executor], log_stats: bool, use_queue_mode: bool = True)

Bases: VideoGenerator

This class extends VideoGenerator with streaming capabilities, allowing incremental video generation with step-by-step control.

Source code in fastvideo/entrypoints/streaming_generator.py
def __init__(self,
             fastvideo_args: FastVideoArgs,
             executor_class: type[Executor],
             log_stats: bool,
             use_queue_mode: bool = True):
    super().__init__(fastvideo_args, executor_class, log_stats)
    self.accumulated_frames: list[np.ndarray] = []
    self.sampling_param: SamplingParam | None = None
    self.batch: ForwardBatch | None = None
    self._use_queue_mode = use_queue_mode and isinstance(self.executor, MultiprocExecutor)
    self.writer: IncrementalVideoWriter | None = None
    self.block_dir: str | None = None
    self.block_idx: int = 0

Functions

fastvideo.entrypoints.video_generator

VideoGenerator module for FastVideo.

This module provides a consolidated interface for generating videos using diffusion models.

Classes

fastvideo.entrypoints.video_generator.VideoGenerator
VideoGenerator(fastvideo_args: FastVideoArgs, executor_class: type[Executor], log_stats: bool)

A unified class for generating videos using diffusion models.

This class provides a simple interface for video generation with rich customization options, similar to popular frameworks like HF Diffusers.

Initialize the video generator.

Parameters:

Name Type Description Default
fastvideo_args FastVideoArgs

The inference arguments

required
executor_class type[Executor]

The executor class to use for inference

required
Source code in fastvideo/entrypoints/video_generator.py
def __init__(self, fastvideo_args: FastVideoArgs, executor_class: type[Executor], log_stats: bool):
    """
    Initialize the video generator.

    Args:
        fastvideo_args: The inference arguments
        executor_class: The executor class to use for inference
    """
    self.fastvideo_args = fastvideo_args
    self.executor = executor_class(fastvideo_args)
Functions
fastvideo.entrypoints.video_generator.VideoGenerator.from_fastvideo_args classmethod
from_fastvideo_args(fastvideo_args: FastVideoArgs) -> VideoGenerator

Create a video generator with the specified arguments.

Parameters:

Name Type Description Default
fastvideo_args FastVideoArgs

The inference arguments

required

Returns:

Type Description
VideoGenerator

The created video generator

Source code in fastvideo/entrypoints/video_generator.py
@classmethod
def from_fastvideo_args(cls, fastvideo_args: FastVideoArgs) -> "VideoGenerator":
    """
    Create a video generator with the specified arguments.

    Args:
        fastvideo_args: The inference arguments

    Returns:
        The created video generator
    """
    # Initialize distributed environment if needed
    # initialize_distributed_and_parallelism(fastvideo_args)

    executor_class = Executor.get_class(fastvideo_args)
    return cls(
        fastvideo_args=fastvideo_args,
        executor_class=executor_class,
        log_stats=False,  # TODO: implement
    )
fastvideo.entrypoints.video_generator.VideoGenerator.from_pretrained classmethod
from_pretrained(model_path: str, **kwargs) -> VideoGenerator

Create a video generator from a pretrained model.

Parameters:

Name Type Description Default
model_path str

Path or identifier for the pretrained model

required
pipeline_config

Pipeline config to use for inference

required
**kwargs

Additional arguments to customize model loading, set any FastVideoArgs or PipelineConfig attributes here.

{}

Returns:

Type Description
VideoGenerator

The created video generator

Priority level: Default pipeline config < User's pipeline config < User's kwargs

Source code in fastvideo/entrypoints/video_generator.py
@classmethod
def from_pretrained(cls, model_path: str, **kwargs) -> "VideoGenerator":
    """
    Create a video generator from a pretrained model.

    Args:
        model_path: Path or identifier for the pretrained model
        pipeline_config: Pipeline config to use for inference
        **kwargs: Additional arguments to customize model loading, set any FastVideoArgs or PipelineConfig attributes here.

    Returns:
        The created video generator

    Priority level: Default pipeline config < User's pipeline config < User's kwargs
    """
    # If users also provide some kwargs, it will override the FastVideoArgs and PipelineConfig.
    kwargs['model_path'] = model_path
    fastvideo_args = FastVideoArgs.from_kwargs(**kwargs)

    return cls.from_fastvideo_args(fastvideo_args)
fastvideo.entrypoints.video_generator.VideoGenerator.generate_video
generate_video(prompt: str | None = None, sampling_param: SamplingParam | None = None, mouse_cond: Tensor | None = None, keyboard_cond: Tensor | None = None, grid_sizes: tuple[int, int, int] | list[int] | Tensor | None = None, **kwargs) -> dict[str, Any] | list[dict[str, Any]]

Generate a video based on the given prompt.

Parameters:

Name Type Description Default
prompt str | None

The prompt to use for generation (optional if prompt_txt is provided)

None
negative_prompt

The negative prompt to use (overrides the one in fastvideo_args)

required
output_path

Path to save the video (overrides the one in fastvideo_args)

required
prompt_path

Path to prompt file

required
save_video

Whether to save the video to disk

required
return_frames

Whether to include raw frames in the result dict

required
num_inference_steps

Number of denoising steps (overrides fastvideo_args)

required
guidance_scale

Classifier-free guidance scale (overrides fastvideo_args)

required
num_frames

Number of frames to generate (overrides fastvideo_args)

required
height

Height of generated video (overrides fastvideo_args)

required
width

Width of generated video (overrides fastvideo_args)

required
fps

Frames per second for saved video (overrides fastvideo_args)

required
seed

Random seed for generation (overrides fastvideo_args)

required
callback

Callback function called after each step

required
callback_steps

Number of steps between each callback

required

Returns:

Type Description
dict[str, Any] | list[dict[str, Any]]

A metadata dictionary for single-prompt generation, or a list of

dict[str, Any] | list[dict[str, Any]]

metadata dictionaries for prompt-file batch generation.

Source code in fastvideo/entrypoints/video_generator.py
def generate_video(
    self,
    prompt: str | None = None,
    sampling_param: SamplingParam | None = None,
    # Action control inputs (Matrix-Game)
    mouse_cond: torch.Tensor | None = None,
    keyboard_cond: torch.Tensor | None = None,
    grid_sizes: tuple[int, int, int] | list[int] | torch.Tensor
    | None = None,
    **kwargs,
) -> dict[str, Any] | list[dict[str, Any]]:
    """
    Generate a video based on the given prompt.

    Args:
        prompt: The prompt to use for generation (optional if prompt_txt is provided)
        negative_prompt: The negative prompt to use (overrides the one in fastvideo_args)
        output_path: Path to save the video (overrides the one in fastvideo_args)
        prompt_path: Path to prompt file
        save_video: Whether to save the video to disk
        return_frames: Whether to include raw frames in the result dict
        num_inference_steps: Number of denoising steps (overrides fastvideo_args)
        guidance_scale: Classifier-free guidance scale (overrides fastvideo_args)
        num_frames: Number of frames to generate (overrides fastvideo_args)
        height: Height of generated video (overrides fastvideo_args)
        width: Width of generated video (overrides fastvideo_args)
        fps: Frames per second for saved video (overrides fastvideo_args)
        seed: Random seed for generation (overrides fastvideo_args)
        callback: Callback function called after each step
        callback_steps: Number of steps between each callback

    Returns:
        A metadata dictionary for single-prompt generation, or a list of
        metadata dictionaries for prompt-file batch generation.
    """
    # Handle batch processing from text file
    if sampling_param is None:
        sampling_param = SamplingParam.from_pretrained(self.fastvideo_args.model_path)

    # Add action control inputs to kwargs if provided
    if mouse_cond is not None:
        kwargs['mouse_cond'] = mouse_cond
    if keyboard_cond is not None:
        kwargs['keyboard_cond'] = keyboard_cond
    if grid_sizes is not None:
        kwargs['grid_sizes'] = grid_sizes

    sampling_param.update(kwargs)

    if self.fastvideo_args.prompt_txt is not None or sampling_param.prompt_path is not None:
        prompt_txt_path = sampling_param.prompt_path or self.fastvideo_args.prompt_txt
        if not os.path.exists(prompt_txt_path):
            raise FileNotFoundError(f"Prompt text file not found: {prompt_txt_path}")

        # Read prompts from file
        with open(prompt_txt_path, encoding='utf-8') as f:
            prompts = [line.strip() for line in f if line.strip()]

        if not prompts:
            raise ValueError(f"No prompts found in file: {prompt_txt_path}")

        logger.info("Found %d prompts in %s", len(prompts), prompt_txt_path)

        results = []
        for i, batch_prompt in enumerate(prompts):
            logger.info("Processing prompt %d/%d: %s...", i + 1, len(prompts), batch_prompt[:100])
            try:
                # Generate video for this prompt using the same logic below
                output_path = self._prepare_output_path(sampling_param.output_path, batch_prompt)
                kwargs["output_path"] = output_path
                result = self._generate_single_video(prompt=batch_prompt, sampling_param=sampling_param, **kwargs)

                # Add prompt info to result
                result["prompt_index"] = i
                result["prompt"] = batch_prompt

                results.append(result)
                logger.info("Successfully generated video for prompt %d", i + 1)

            except Exception as e:
                logger.error("Failed to generate video for prompt %d: %s", i + 1, e)
                continue

        logger.info("Completed batch processing. Generated %d videos successfully.", len(results))
        return results

    # Single prompt generation (original behavior)
    if prompt is None:
        raise ValueError("Either prompt or prompt_txt must be provided")
    output_path = self._prepare_output_path(sampling_param.output_path, prompt)
    kwargs["output_path"] = output_path
    return self._generate_single_video(prompt=prompt, sampling_param=sampling_param, **kwargs)
fastvideo.entrypoints.video_generator.VideoGenerator.shutdown
shutdown()

Shutdown the video generator.

Source code in fastvideo/entrypoints/video_generator.py
def shutdown(self):
    """
    Shutdown the video generator.
    """
    self.executor.shutdown()
    del self.executor
fastvideo.entrypoints.video_generator.VideoGenerator.unmerge_lora_weights
unmerge_lora_weights() -> None

Use unmerged weights for inference to produce videos that align with validation videos generated during training.

Source code in fastvideo/entrypoints/video_generator.py
def unmerge_lora_weights(self) -> None:
    """
    Use unmerged weights for inference to produce videos that align with 
    validation videos generated during training.
    """
    self.executor.unmerge_lora_weights()

Functions