# syntax=docker/dockerfile:1.6
#
# This Dockerfile is used to build the Llama Stack container image.
# Supports multi-architecture builds: linux/amd64, linux/arm64, and more
#
# Single-architecture build example:
# docker build \
#   -f containers/Containerfile \
#   --build-arg DISTRO_NAME=starter \
#   --tag llama-stack:starter .
#
# Multi-arch build and push example (creates a container index / manifest list):
# docker buildx build --platform linux/amd64,linux/arm64 \
#   --push \
#   -f containers/Containerfile \
#   --build-arg DISTRO_NAME=starter \
#   --tag docker.io/llamastack/distribution-starter:latest .

ARG BASE_IMAGE=python:3.12-slim
# Buildx automatically handles platform selection for the base image when --platform is specified
FROM ${BASE_IMAGE}

ARG INSTALL_MODE="pypi"
ARG LLAMA_STACK_DIR="/workspace"
ARG LLAMA_STACK_CLIENT_DIR=""
ARG PYPI_VERSION=""
ARG TEST_PYPI_VERSION=""
ARG KEEP_WORKSPACE=""
ARG DISTRO_NAME="starter"
ARG RUN_CONFIG_PATH=""
ARG UV_HTTP_TIMEOUT=500
ARG UV_EXTRA_INDEX_URL=""
ARG UV_INDEX_STRATEGY=""
ENV UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT}
ENV PYTHONDONTWRITEBYTECODE=1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
WORKDIR /app

RUN set -eux; \
    if command -v dnf >/dev/null 2>&1; then \
        dnf -y update && \
        dnf install -y iputils git net-tools wget \
            vim-minimal python3.12 python3.12-pip python3.12-wheel \
            python3.12-setuptools python3.12-devel gcc gcc-c++ make libpq-devel && \
        ln -sf /usr/bin/pip3.12 /usr/local/bin/pip && \
        ln -sf /usr/bin/python3.12 /usr/local/bin/python && \
        dnf clean all; \
    elif command -v apt-get >/dev/null 2>&1; then \
        apt-get update && \
        apt-get install -y --no-install-recommends \
            iputils-ping net-tools iproute2 dnsutils telnet \
            curl wget git procps psmisc lsof traceroute bubblewrap \
            gcc g++ libpq-dev && \
        rm -rf /var/lib/apt/lists/*; \
    else \
        echo "Unsupported base image: expected dnf or apt-get" >&2; \
        exit 1; \
    fi

RUN pip install --no-cache uv
ENV UV_SYSTEM_PYTHON=1

ENV INSTALL_MODE=${INSTALL_MODE}
ENV LLAMA_STACK_DIR=${LLAMA_STACK_DIR}
ENV LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR}
ENV PYPI_VERSION=${PYPI_VERSION}
ENV TEST_PYPI_VERSION=${TEST_PYPI_VERSION}
ENV KEEP_WORKSPACE=${KEEP_WORKSPACE}
ENV DISTRO_NAME=${DISTRO_NAME}
ENV RUN_CONFIG_PATH=${RUN_CONFIG_PATH}

# Copy the repository so editable installs and run configurations are available.
COPY . /workspace

# Install the client package if it is provided
# NOTE: this is installed before llama-stack since llama-stack depends on llama-stack-client-python
# Unset UV index env vars to ensure we only use PyPI for the client
RUN set -eux; \
    unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
    if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then \
        if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ]; then \
            echo "LLAMA_STACK_CLIENT_DIR is set but $LLAMA_STACK_CLIENT_DIR does not exist" >&2; \
            exit 1; \
        fi; \
        uv pip install --no-cache -e "$LLAMA_STACK_CLIENT_DIR"; \
    fi;

# Install llama-stack
# Use UV_EXTRA_INDEX_URL inline only for editable install with RC dependencies
RUN set -eux; \
    SAVED_UV_EXTRA_INDEX_URL="${UV_EXTRA_INDEX_URL:-}"; \
    SAVED_UV_INDEX_STRATEGY="${UV_INDEX_STRATEGY:-}"; \
    unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
    if [ "$INSTALL_MODE" = "editable" ]; then \
        if [ ! -d "$LLAMA_STACK_DIR" ]; then \
            echo "INSTALL_MODE=editable requires LLAMA_STACK_DIR to point to a directory inside the build context" >&2; \
            exit 1; \
        fi; \
        if [ -n "$SAVED_UV_EXTRA_INDEX_URL" ] && [ -n "$SAVED_UV_INDEX_STRATEGY" ]; then \
            UV_EXTRA_INDEX_URL="$SAVED_UV_EXTRA_INDEX_URL" UV_INDEX_STRATEGY="$SAVED_UV_INDEX_STRATEGY" \
                uv pip install --no-cache -e "$LLAMA_STACK_DIR"; \
        else \
            uv pip install --no-cache -e "$LLAMA_STACK_DIR"; \
        fi; \
    elif [ "$INSTALL_MODE" = "test-pypi" ]; then \
        uv pip install --no-cache fastapi libcst; \
        if [ -n "$TEST_PYPI_VERSION" ]; then \
            uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match "llama-stack==$TEST_PYPI_VERSION"; \
        else \
            uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match llama-stack; \
        fi; \
    else \
        if [ -n "$PYPI_VERSION" ]; then \
            uv pip install --no-cache "llama-stack==$PYPI_VERSION"; \
        else \
            uv pip install --no-cache llama-stack; \
        fi; \
    fi;

# Install the dependencies for the distribution
# Explicitly unset UV index env vars to ensure we only use PyPI for distribution deps
RUN set -eux; \
    unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
    if [ -z "$DISTRO_NAME" ]; then \
        echo "DISTRO_NAME must be provided" >&2; \
        exit 1; \
    fi; \
    deps="$(llama stack list-deps "$DISTRO_NAME")"; \
    if [ -n "$deps" ]; then \
        printf '%s\n' "$deps" | xargs -L1 uv pip install --no-cache; \
    fi

# Install OpenTelemetry auto-instrumentation support
RUN set -eux; \
    pip install --no-cache opentelemetry-distro opentelemetry-exporter-otlp; \
    opentelemetry-bootstrap -a install

# Cleanup
RUN set -eux; \
    pip uninstall -y uv; \
    should_remove=1; \
    if [ -n "$KEEP_WORKSPACE" ]; then should_remove=0; fi; \
    if [ "$INSTALL_MODE" = "editable" ]; then should_remove=0; fi; \
    case "$RUN_CONFIG_PATH" in \
        /workspace*) should_remove=0 ;; \
    esac; \
    if [ "$should_remove" -eq 1 ] && [ -d /workspace ]; then rm -rf /workspace; fi

RUN cat <<'EOF' >/usr/local/bin/llama-stack-entrypoint.sh
#!/bin/sh
set -e

# Enable OpenTelemetry auto-instrumentation if any OTEL_* variable is set
CMD_PREFIX=""
if env | grep -q '^OTEL_'; then
  CMD_PREFIX="opentelemetry-instrument"
fi

if [ -n "$RUN_CONFIG_PATH" ] && [ -f "$RUN_CONFIG_PATH" ]; then
  exec $CMD_PREFIX llama stack run "$RUN_CONFIG_PATH" "$@"
fi

if [ -n "$DISTRO_NAME" ]; then
  exec $CMD_PREFIX llama stack run "$DISTRO_NAME" "$@"
fi

exec $CMD_PREFIX llama stack run "$@"
EOF
RUN chmod +x /usr/local/bin/llama-stack-entrypoint.sh

RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache

ENTRYPOINT ["/usr/local/bin/llama-stack-entrypoint.sh"]
