jenkinsfile
This commit is contained in:
parent
f754968ea9
commit
ca08125f75
136
Dockerfile
136
Dockerfile
|
|
@ -1,18 +1,27 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# Initialize device type args
|
||||||
# Argumenty budowania
|
# use build args in the docker build command with --build-arg="BUILDARG=true"
|
||||||
ARG USE_CUDA=false
|
ARG USE_CUDA=false
|
||||||
ARG USE_OLLAMA=false
|
ARG USE_OLLAMA=false
|
||||||
|
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
|
||||||
ARG USE_CUDA_VER=cu121
|
ARG USE_CUDA_VER=cu121
|
||||||
|
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
|
||||||
|
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
|
||||||
|
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
|
||||||
|
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
|
||||||
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
||||||
ARG USE_RERANKING_MODEL=""
|
ARG USE_RERANKING_MODEL=""
|
||||||
|
|
||||||
|
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
|
||||||
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
|
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
|
||||||
|
|
||||||
ARG BUILD_HASH=dev-build
|
ARG BUILD_HASH=dev-build
|
||||||
|
# Override at your own risk - non-root configurations are untested
|
||||||
ARG UID=0
|
ARG UID=0
|
||||||
ARG GID=0
|
ARG GID=0
|
||||||
|
|
||||||
# Etap budowania frontendu
|
######## WebUI frontend ########
|
||||||
FROM node:22-alpine3.20 AS build
|
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
|
||||||
ARG BUILD_HASH
|
ARG BUILD_HASH
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
@ -24,10 +33,10 @@ COPY . .
|
||||||
ENV APP_BUILD_HASH=${BUILD_HASH}
|
ENV APP_BUILD_HASH=${BUILD_HASH}
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
|
|
||||||
# Etap budowania backendu
|
######## WebUI backend ########
|
||||||
FROM python:3.11-slim-bookworm AS base
|
FROM python:3.11-slim-bookworm AS base
|
||||||
|
|
||||||
# Użyj argumentów
|
# Use args
|
||||||
ARG USE_CUDA
|
ARG USE_CUDA
|
||||||
ARG USE_OLLAMA
|
ARG USE_OLLAMA
|
||||||
ARG USE_CUDA_VER
|
ARG USE_CUDA_VER
|
||||||
|
|
@ -36,35 +45,53 @@ ARG USE_RERANKING_MODEL
|
||||||
ARG UID
|
ARG UID
|
||||||
ARG GID
|
ARG GID
|
||||||
|
|
||||||
# Ustawienia środowiskowe
|
## Basis ##
|
||||||
ENV ENV=prod \
|
ENV ENV=prod \
|
||||||
PORT=8080 \
|
PORT=8080 \
|
||||||
|
# pass build args to the build
|
||||||
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
|
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
|
||||||
USE_CUDA_DOCKER=${USE_CUDA} \
|
USE_CUDA_DOCKER=${USE_CUDA} \
|
||||||
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
|
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
|
||||||
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
|
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
|
||||||
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL} \
|
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
|
||||||
OLLAMA_BASE_URL="/ollama" \
|
|
||||||
OPENAI_API_BASE_URL="" \
|
## Basis URL Config ##
|
||||||
OPENAI_API_KEY="" \
|
ENV OLLAMA_BASE_URL="/ollama" \
|
||||||
|
OPENAI_API_BASE_URL=""
|
||||||
|
|
||||||
|
## API Key and Security Config ##
|
||||||
|
ENV OPENAI_API_KEY="" \
|
||||||
WEBUI_SECRET_KEY="" \
|
WEBUI_SECRET_KEY="" \
|
||||||
SCARF_NO_ANALYTICS=true \
|
SCARF_NO_ANALYTICS=true \
|
||||||
DO_NOT_TRACK=true \
|
DO_NOT_TRACK=true \
|
||||||
ANONYMIZED_TELEMETRY=false \
|
ANONYMIZED_TELEMETRY=false
|
||||||
WHISPER_MODEL="base" \
|
|
||||||
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models" \
|
#### Other models #########################################################
|
||||||
RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL" \
|
## whisper TTS model settings ##
|
||||||
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL" \
|
ENV WHISPER_MODEL="base" \
|
||||||
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" \
|
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
|
||||||
TIKTOKEN_ENCODING_NAME="cl100k_base" \
|
|
||||||
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken" \
|
## RAG Embedding model settings ##
|
||||||
HF_HOME="/app/backend/data/cache/embedding/models"
|
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
|
||||||
|
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
|
||||||
|
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
|
||||||
|
|
||||||
|
## Tiktoken model settings ##
|
||||||
|
ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
|
||||||
|
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
|
||||||
|
|
||||||
|
## Hugging Face download cache ##
|
||||||
|
ENV HF_HOME="/app/backend/data/cache/embedding/models"
|
||||||
|
|
||||||
|
## Torch Extensions ##
|
||||||
|
# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
|
||||||
|
|
||||||
|
#### Other models ##########################################################
|
||||||
|
|
||||||
WORKDIR /app/backend
|
WORKDIR /app/backend
|
||||||
|
|
||||||
ENV HOME=/root
|
ENV HOME=/root
|
||||||
|
# Create user and group if not root
|
||||||
# Tworzenie użytkownika i grupy, jeśli nie root
|
|
||||||
RUN if [ $UID -ne 0 ]; then \
|
RUN if [ $UID -ne 0 ]; then \
|
||||||
if [ $GID -ne 0 ]; then \
|
if [ $GID -ne 0 ]; then \
|
||||||
addgroup --gid $GID app; \
|
addgroup --gid $GID app; \
|
||||||
|
|
@ -75,40 +102,65 @@ RUN if [ $UID -ne 0 ]; then \
|
||||||
RUN mkdir -p $HOME/.cache/chroma
|
RUN mkdir -p $HOME/.cache/chroma
|
||||||
RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
|
RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
|
||||||
|
|
||||||
# Upewnij się, że użytkownik ma dostęp do katalogów
|
# Make sure the user has access to the app and root directory
|
||||||
RUN chown -R $UID:$GID /app $HOME
|
RUN chown -R $UID:$GID /app $HOME
|
||||||
|
|
||||||
# Instalacja zależności systemowych
|
RUN if [ "$USE_OLLAMA" = "true" ]; then \
|
||||||
RUN apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
# Install pandoc and netcat
|
||||||
git build-essential pandoc gcc netcat-openbsd curl jq \
|
apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
|
||||||
python3-dev ffmpeg libsm6 libxext6 && \
|
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||||
if [ "$USE_OLLAMA" = "true" ]; then \
|
# for RAG OCR
|
||||||
curl -fsSL https://ollama.com/install.sh | sh; \
|
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||||
fi && \
|
# install helper tools
|
||||||
rm -rf /var/lib/apt/lists/*
|
apt-get install -y --no-install-recommends curl jq && \
|
||||||
|
# install ollama
|
||||||
|
curl -fsSL https://ollama.com/install.sh | sh && \
|
||||||
|
# cleanup
|
||||||
|
rm -rf /var/lib/apt/lists/*; \
|
||||||
|
else \
|
||||||
|
apt-get update && \
|
||||||
|
# Install pandoc, netcat and gcc
|
||||||
|
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
|
||||||
|
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||||
|
# for RAG OCR
|
||||||
|
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||||
|
# cleanup
|
||||||
|
rm -rf /var/lib/apt/lists/*; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Instalacja zależności Pythona
|
# install python dependencies
|
||||||
COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
|
COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
|
||||||
|
|
||||||
RUN pip3 install uv && \
|
RUN pip3 install uv && \
|
||||||
if [ "$USE_CUDA" = "true" ]; then \
|
if [ "$USE_CUDA" = "true" ]; then \
|
||||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir; \
|
# If you use CUDA the whisper and embedding model will be downloaded on first use
|
||||||
else \
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
|
||||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \
|
|
||||||
fi && \
|
|
||||||
uv pip install --system -r requirements.txt --no-cache-dir && \
|
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||||
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||||
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
|
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||||
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])" && \
|
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||||
|
else \
|
||||||
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
|
||||||
|
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||||
|
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||||
|
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||||
|
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||||
|
fi; \
|
||||||
chown -R $UID:$GID /app/backend/data/
|
chown -R $UID:$GID /app/backend/data/
|
||||||
|
|
||||||
# Kopiowanie plików frontendowych
|
|
||||||
|
|
||||||
|
# copy embedding weight from build
|
||||||
|
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
|
||||||
|
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
|
||||||
|
|
||||||
|
# copy built frontend files
|
||||||
COPY --chown=$UID:$GID --from=build /app/build /app/build
|
COPY --chown=$UID:$GID --from=build /app/build /app/build
|
||||||
COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
|
COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
|
||||||
COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
|
COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
|
||||||
|
|
||||||
# Kopiowanie plików backendowych
|
# copy backend files
|
||||||
COPY --chown=$UID:$GID ./backend .
|
COPY --chown=$UID:$GID ./backend .
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
@ -121,4 +173,4 @@ ARG BUILD_HASH
|
||||||
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
|
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
|
||||||
ENV DOCKER=true
|
ENV DOCKER=true
|
||||||
|
|
||||||
CMD [ "bash", "start.sh"]
|
CMD [ "bash", "start.sh"]
|
||||||
|
|
@ -0,0 +1,43 @@
|
||||||
|
pipeline {
|
||||||
|
agent any
|
||||||
|
|
||||||
|
environment {
|
||||||
|
DOCKER_IMAGE = 'docker.cloud.pokash.pl/ably-webui'
|
||||||
|
DOCKER_REGISTRY = 'docker.cloud.pokash.pl'
|
||||||
|
GIT_REPO = 'https://repo.pokash.pl/POKASH.PL/ably.do.git'
|
||||||
|
REGISTRY_CREDENTIALS_ID = '2753fc17-5ad1-4c78-b86a-a3e54c543adc' // ID poświadczeń do lokalnego rejestru
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Checkout') {
|
||||||
|
steps {
|
||||||
|
git url: "${GIT_REPO}", branch: 'main/webui'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Build Docker Image') {
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
docker.build("${DOCKER_IMAGE}:latest")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Push Docker Image') {
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
// Logowanie do lokalnego rejestru
|
||||||
|
docker.withRegistry("http://${DOCKER_REGISTRY}", "${REGISTRY_CREDENTIALS_ID}") {
|
||||||
|
docker.image("${DOCKER_IMAGE}:latest").push('latest')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
cleanWs() // Czyści workspace po zakończeniu builda
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue