Skip to content

openai

Modules

fastvideo.entrypoints.openai.api_server

Classes

Functions

fastvideo.entrypoints.openai.api_server.create_app
create_app(fastvideo_args: FastVideoArgs, output_dir: str = DEFAULT_OUTPUT_DIR) -> FastAPI

Build the FastAPI application with all routers mounted

Source code in fastvideo/entrypoints/openai/api_server.py
def create_app(
    fastvideo_args: FastVideoArgs,
    output_dir: str = DEFAULT_OUTPUT_DIR,
) -> FastAPI:
    """Build the FastAPI application with all routers mounted"""

    app = FastAPI(
        title="FastVideo OpenAI-Compatible API",
        version="0.1.0",
        lifespan=lifespan,
    )
    app.state.fastvideo_args = fastvideo_args
    app.state.output_dir = output_dir

    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )

    # Import and mount routers
    from fastvideo.entrypoints.openai.common_api import router as common_router
    from fastvideo.entrypoints.openai.image_api import router as image_router
    from fastvideo.entrypoints.openai.video_api import router as video_router

    app.include_router(common_router)
    app.include_router(video_router)
    app.include_router(image_router)

    @app.get("/health")
    async def health():
        return {"status": "ok"}

    return app
fastvideo.entrypoints.openai.api_server.lifespan async
lifespan(app: FastAPI) -> AsyncIterator[None]

Load model on startup, clean up on shutdown

Source code in fastvideo/entrypoints/openai/api_server.py
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncIterator[None]:
    """Load model on startup, clean up on shutdown"""
    args: FastVideoArgs = app.state.fastvideo_args
    output_dir: str = app.state.output_dir

    logger.info("Loading model from %s ...", args.model_path)
    generator = VideoGenerator.from_fastvideo_args(args)
    logger.info("Model loaded successfully.")

    set_state(generator, args, output_dir)

    yield  # server is running

    logger.info("Shutting down — releasing model resources ...")
    generator.shutdown()
    clear_state()
    logger.info("Shutdown complete.")
fastvideo.entrypoints.openai.api_server.run_server
run_server(fastvideo_args: FastVideoArgs, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, output_dir: str = DEFAULT_OUTPUT_DIR)

Create the app and run it with uvicorn

Source code in fastvideo/entrypoints/openai/api_server.py
def run_server(
    fastvideo_args: FastVideoArgs,
    host: str = DEFAULT_HOST,
    port: int = DEFAULT_PORT,
    output_dir: str = DEFAULT_OUTPUT_DIR,
):
    """Create the app and run it with uvicorn"""
    app = create_app(fastvideo_args, output_dir=output_dir)

    logger.info("Starting FastVideo server on %s:%d", host, port)
    logger.info("Model: %s", fastvideo_args.model_path)

    uvicorn.run(
        app,
        host=host,
        port=port,
        log_level="info",
        timeout_keep_alive=300,
    )

fastvideo.entrypoints.openai.common_api

Classes

fastvideo.entrypoints.openai.common_api.ModelCard

Bases: BaseModel

OpenAI-compatible model card

Functions

fastvideo.entrypoints.openai.common_api.available_models async
available_models()

Show available models

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/models", response_class=ORJSONResponse)
async def available_models():
    """Show available models"""
    args = get_server_args()
    card = ModelCard(id=args.model_path, root=args.model_path)
    return {"object": "list", "data": [card.model_dump()]}
fastvideo.entrypoints.openai.common_api.model_info async
model_info()

Get basic model information

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/model_info")
async def model_info():
    """Get basic model information"""
    args = get_server_args()
    return {"model_path": args.model_path}
fastvideo.entrypoints.openai.common_api.retrieve_model async
retrieve_model(model: str)

Retrieve a model by name

Source code in fastvideo/entrypoints/openai/common_api.py
@router.get("/models/{model:path}", response_class=ORJSONResponse)
async def retrieve_model(model: str):
    """Retrieve a model by name"""
    args = get_server_args()
    if model != args.model_path:
        return ORJSONResponse(
            status_code=404,
            content={
                "error": {
                    "message": f"The model '{model}' does not exist",
                    "type": "invalid_request_error",
                    "param": "model",
                    "code": "model_not_found",
                }
            },
        )
    card = ModelCard(id=model, root=model)
    return card.model_dump()

fastvideo.entrypoints.openai.image_api

Functions

fastvideo.entrypoints.openai.protocol

Functions

fastvideo.entrypoints.openai.protocol.generate_request_id
generate_request_id() -> str

Generate a unique request ID

Source code in fastvideo/entrypoints/openai/protocol.py
def generate_request_id() -> str:
    """Generate a unique request ID"""
    return uuid.uuid4().hex

fastvideo.entrypoints.openai.state

Global server state shared across API modules.

Keeping state in a dedicated module prevents the classic 'main vs package module' duplication that occurs when api_server.py is run with python -m. All modules that need the generator or server args should import from here.

Classes

Functions

fastvideo.entrypoints.openai.state.clear_state
clear_state() -> None

Clear server state on shutdown.

Source code in fastvideo/entrypoints/openai/state.py
def clear_state() -> None:
    """Clear server state on shutdown."""
    global _generator, _fastvideo_args
    _generator = None
    _fastvideo_args = None
fastvideo.entrypoints.openai.state.get_generator
get_generator() -> VideoGenerator

Return the global VideoGenerator instance (set during startup).

Source code in fastvideo/entrypoints/openai/state.py
def get_generator() -> VideoGenerator:
    """Return the global VideoGenerator instance (set during startup)."""
    assert _generator is not None, "Server not initialized — generator is None"
    return _generator
fastvideo.entrypoints.openai.state.get_output_dir
get_output_dir() -> str

Return the configured output directory.

Source code in fastvideo/entrypoints/openai/state.py
def get_output_dir() -> str:
    """Return the configured output directory."""
    return _output_dir
fastvideo.entrypoints.openai.state.get_server_args
get_server_args() -> FastVideoArgs

Return the global FastVideoArgs (set during startup).

Source code in fastvideo/entrypoints/openai/state.py
def get_server_args() -> FastVideoArgs:
    """Return the global FastVideoArgs (set during startup)."""
    assert _fastvideo_args is not None, "Server not initialized — args is None"
    return _fastvideo_args
fastvideo.entrypoints.openai.state.set_state
set_state(generator: VideoGenerator, fastvideo_args: FastVideoArgs, output_dir: str) -> None

Set all server state at once (called from lifespan).

Source code in fastvideo/entrypoints/openai/state.py
def set_state(
    generator: VideoGenerator,
    fastvideo_args: FastVideoArgs,
    output_dir: str,
) -> None:
    """Set all server state at once (called from lifespan)."""
    global _generator, _fastvideo_args, _output_dir
    _generator = generator
    _fastvideo_args = fastvideo_args
    _output_dir = output_dir

fastvideo.entrypoints.openai.stores

Classes

fastvideo.entrypoints.openai.stores.AsyncDictStore
AsyncDictStore()

A small async-safe in-memory key-value store for dict items.

This encapsulates the usual pattern of a module-level dict guarded by an asyncio.Lock and provides simple CRUD methods that are safe to call concurrently from FastAPI request handlers and background tasks.

Source code in fastvideo/entrypoints/openai/stores.py
def __init__(self) -> None:
    self._items: dict[str, dict[str, Any]] = {}
    self._lock = asyncio.Lock()

fastvideo.entrypoints.openai.utils

Functions

fastvideo.entrypoints.openai.utils.choose_image_ext
choose_image_ext(output_format: str | None, background: str | None) -> str

Pick a file extension for image outputs

Source code in fastvideo/entrypoints/openai/utils.py
def choose_image_ext(output_format: str | None, background: str | None) -> str:
    """Pick a file extension for image outputs"""
    fmt = (output_format or "").lower()
    if fmt in {"png", "webp", "jpeg", "jpg"}:
        return "jpg" if fmt == "jpeg" else fmt
    if (background or "auto").lower() == "transparent":
        return "png"
    return "jpg"
fastvideo.entrypoints.openai.utils.merge_image_input_list
merge_image_input_list(*inputs: list | Any | None) -> list

Merge multiple image input sources into a single flat list

Source code in fastvideo/entrypoints/openai/utils.py
def merge_image_input_list(*inputs: list | Any | None) -> list:
    """Merge multiple image input sources into a single flat list"""
    result = []
    for input_item in inputs:
        if input_item is not None:
            if isinstance(input_item, list):
                result.extend(input_item)
            else:
                result.append(input_item)
    return result
fastvideo.entrypoints.openai.utils.parse_size
parse_size(size: str) -> tuple[int, int] | tuple[None, None]

Parse a 'WIDTHxHEIGHT' string into (width, height)

Source code in fastvideo/entrypoints/openai/utils.py
def parse_size(size: str) -> tuple[int, int] | tuple[None, None]:
    """Parse a 'WIDTHxHEIGHT' string into (width, height)"""
    try:
        parts = size.lower().replace(" ", "").split("x")
        if len(parts) != 2:
            raise ValueError
        w, h = int(parts[0]), int(parts[1])
        return w, h
    except Exception:
        return None, None
fastvideo.entrypoints.openai.utils.save_image_to_path async
save_image_to_path(image: UploadFile | str, target_path: str) -> str

Save an uploaded file or download from URL to target_path

Source code in fastvideo/entrypoints/openai/utils.py
async def save_image_to_path(image: UploadFile | str, target_path: str) -> str:
    """Save an uploaded file or download from URL to *target_path*"""
    input_path = await _maybe_url_image(image, target_path)
    if input_path is None:
        input_path = await _save_upload_to_path(image, target_path)
    return input_path

fastvideo.entrypoints.openai.video_api

Functions