jenkinsfile

This commit is contained in:
l.gabrysiak 2025-03-01 00:19:51 +01:00
parent f754968ea9
commit ca08125f75
2 changed files with 137 additions and 42 deletions

View File

@ -1,18 +1,27 @@
# syntax=docker/dockerfile:1
# Argumenty budowania
# Initialize device type args
# use build args in the docker build command with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
ARG USE_CUDA_VER=cu121
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
ARG USE_RERANKING_MODEL=""
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
ARG BUILD_HASH=dev-build
# Override at your own risk - non-root configurations are untested
ARG UID=0
ARG GID=0
# Etap budowania frontendu
FROM node:22-alpine3.20 AS build
######## WebUI frontend ########
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
ARG BUILD_HASH
WORKDIR /app
@ -24,10 +33,10 @@ COPY . .
ENV APP_BUILD_HASH=${BUILD_HASH}
RUN npm run build
# Etap budowania backendu
######## WebUI backend ########
FROM python:3.11-slim-bookworm AS base
# Użyj argumentów
# Use args
ARG USE_CUDA
ARG USE_OLLAMA
ARG USE_CUDA_VER
@ -36,35 +45,53 @@ ARG USE_RERANKING_MODEL
ARG UID
ARG GID
# Ustawienia środowiskowe
## Basis ##
ENV ENV=prod \
PORT=8080 \
# pass build args to the build
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
USE_CUDA_DOCKER=${USE_CUDA} \
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL} \
OLLAMA_BASE_URL="/ollama" \
OPENAI_API_BASE_URL="" \
OPENAI_API_KEY="" \
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
## Basis URL Config ##
ENV OLLAMA_BASE_URL="/ollama" \
OPENAI_API_BASE_URL=""
## API Key and Security Config ##
ENV OPENAI_API_KEY="" \
WEBUI_SECRET_KEY="" \
SCARF_NO_ANALYTICS=true \
DO_NOT_TRACK=true \
ANONYMIZED_TELEMETRY=false \
WHISPER_MODEL="base" \
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models" \
RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL" \
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL" \
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" \
TIKTOKEN_ENCODING_NAME="cl100k_base" \
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken" \
HF_HOME="/app/backend/data/cache/embedding/models"
ANONYMIZED_TELEMETRY=false
#### Other models #########################################################
## whisper TTS model settings ##
ENV WHISPER_MODEL="base" \
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
## RAG Embedding model settings ##
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
## Tiktoken model settings ##
ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
## Hugging Face download cache ##
ENV HF_HOME="/app/backend/data/cache/embedding/models"
## Torch Extensions ##
# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
#### Other models ##########################################################
WORKDIR /app/backend
ENV HOME=/root
# Tworzenie użytkownika i grupy, jeśli nie root
# Create user and group if not root
RUN if [ $UID -ne 0 ]; then \
if [ $GID -ne 0 ]; then \
addgroup --gid $GID app; \
@ -75,40 +102,65 @@ RUN if [ $UID -ne 0 ]; then \
RUN mkdir -p $HOME/.cache/chroma
RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
# Upewnij się, że użytkownik ma dostęp do katalogów
# Make sure the user has access to the app and root directory
RUN chown -R $UID:$GID /app $HOME
# Instalacja zależności systemowych
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git build-essential pandoc gcc netcat-openbsd curl jq \
python3-dev ffmpeg libsm6 libxext6 && \
if [ "$USE_OLLAMA" = "true" ]; then \
curl -fsSL https://ollama.com/install.sh | sh; \
fi && \
rm -rf /var/lib/apt/lists/*
RUN if [ "$USE_OLLAMA" = "true" ]; then \
apt-get update && \
# Install pandoc and netcat
apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
apt-get install -y --no-install-recommends gcc python3-dev && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# install helper tools
apt-get install -y --no-install-recommends curl jq && \
# install ollama
curl -fsSL https://ollama.com/install.sh | sh && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
else \
apt-get update && \
# Install pandoc, netcat and gcc
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
apt-get install -y --no-install-recommends gcc python3-dev && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
fi
# Instalacja zależności Pythona
# install python dependencies
COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
RUN pip3 install uv && \
if [ "$USE_CUDA" = "true" ]; then \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \
fi && \
# If you use CUDA the whisper and embedding model will be downloaded on first use
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
fi; \
chown -R $UID:$GID /app/backend/data/
# Kopiowanie plików frontendowych
# copy embedding weight from build
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
# copy built frontend files
COPY --chown=$UID:$GID --from=build /app/build /app/build
COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
# Kopiowanie plików backendowych
# copy backend files
COPY --chown=$UID:$GID ./backend .
EXPOSE 8080
@ -121,4 +173,4 @@ ARG BUILD_HASH
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
ENV DOCKER=true
CMD [ "bash", "start.sh"]
CMD [ "bash", "start.sh"]

43
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,43 @@
pipeline {
agent any
environment {
DOCKER_IMAGE = 'docker.cloud.pokash.pl/ably-webui'
DOCKER_REGISTRY = 'docker.cloud.pokash.pl'
GIT_REPO = 'https://repo.pokash.pl/POKASH.PL/ably.do.git'
REGISTRY_CREDENTIALS_ID = '2753fc17-5ad1-4c78-b86a-a3e54c543adc' // ID poświadczeń do lokalnego rejestru
}
stages {
stage('Checkout') {
steps {
git url: "${GIT_REPO}", branch: 'main/webui'
}
}
stage('Build Docker Image') {
steps {
script {
docker.build("${DOCKER_IMAGE}:latest")
}
}
}
stage('Push Docker Image') {
steps {
script {
// Logowanie do lokalnego rejestru
docker.withRegistry("http://${DOCKER_REGISTRY}", "${REGISTRY_CREDENTIALS_ID}") {
docker.image("${DOCKER_IMAGE}:latest").push('latest')
}
}
}
}
}
post {
always {
cleanWs() // Czyści workspace po zakończeniu builda
}
}
}