mirror of
https://github.com/stellarshenson/stellars-jupyterhub-ds.git
synced 2026-03-09 06:30:29 +00:00
first build
This commit is contained in:
80
Makefile
Normal file
80
Makefile
Normal file
@@ -0,0 +1,80 @@
|
||||
# This makefile helps build, push and run the jupyterhub
|
||||
|
||||
#################################################################################
|
||||
# GLOBALS #
|
||||
#################################################################################
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: help build push start clean
|
||||
|
||||
#################################################################################
|
||||
# COMMANDS #
|
||||
#################################################################################
|
||||
|
||||
## build docker containers
|
||||
build:
|
||||
@cd ./bin && ./build.sh
|
||||
|
||||
## build docker containers and output logs
|
||||
build_verbose:
|
||||
@cd ./bin && ./build_verbose.sh
|
||||
|
||||
## pull docker image from dockerhub
|
||||
pull:
|
||||
docker pull stellars/stellars-jupyterhub-ds:latest
|
||||
|
||||
## push docker containers to repo
|
||||
push:
|
||||
docker push stellars/stellars-jupyterhub-ds:latest
|
||||
|
||||
## start jupyterlab (fg)
|
||||
start:
|
||||
@cd ./bin && ./start.sh
|
||||
|
||||
## clean orphaned containers
|
||||
clean:
|
||||
@echo 'removing dangling and unused images, containers, nets and volumes'
|
||||
@docker compose --env-file .env -f compose.yml down --remove-orphans
|
||||
@yes | docker image prune
|
||||
|
||||
|
||||
## prints the list of available commands
|
||||
help:
|
||||
@echo ""
|
||||
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
|
||||
@sed -n -e "/^## / { \
|
||||
h; \
|
||||
s/.*//; \
|
||||
:doc" \
|
||||
-e "H; \
|
||||
n; \
|
||||
s/^## //; \
|
||||
t doc" \
|
||||
-e "s/:.*//; \
|
||||
G; \
|
||||
s/\\n## /---/; \
|
||||
s/\\n/ /g; \
|
||||
p; \
|
||||
}" ${MAKEFILE_LIST} \
|
||||
| LC_ALL='C' sort --ignore-case \
|
||||
| awk -F '---' \
|
||||
-v ncol=$$(tput cols) \
|
||||
-v indent=19 \
|
||||
-v col_on="$$(tput setaf 6)" \
|
||||
-v col_off="$$(tput sgr0)" \
|
||||
'{ \
|
||||
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
|
||||
n = split($$2, words, " "); \
|
||||
line_length = ncol - indent; \
|
||||
for (i = 1; i <= n; i++) { \
|
||||
line_length -= length(words[i]) + 1; \
|
||||
if (line_length <= 0) { \
|
||||
line_length = ncol - indent - length(words[i]) - 1; \
|
||||
printf "\n%*s ", -indent, " "; \
|
||||
} \
|
||||
printf "%s ", words[i]; \
|
||||
} \
|
||||
printf "\n"; \
|
||||
}'
|
||||
|
||||
|
||||
# EOF
|
||||
1
bin/build.bat
Executable file
1
bin/build.bat
Executable file
@@ -0,0 +1 @@
|
||||
docker.exe compose -f ..\compose.yml build
|
||||
4
bin/build.sh
Executable file
4
bin/build.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
export DOCKER_DEFAULT_PLATFORM=linux/amd64
|
||||
export COMPOSE_BAKE=false
|
||||
docker compose -f ../compose.yml build
|
||||
1
bin/build_verbose.bat
Executable file
1
bin/build_verbose.bat
Executable file
@@ -0,0 +1 @@
|
||||
docker.exe compose -f ..\compose.yml build --progress=plain
|
||||
4
bin/build_verbose.sh
Executable file
4
bin/build_verbose.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
export DOCKER_DEFAULT_PLATFORM=linux/amd64
|
||||
export COMPOSE_BAKE=false
|
||||
docker compose -f ../compose.yml build --progress=plain
|
||||
23
bin/start.bat
Executable file
23
bin/start.bat
Executable file
@@ -0,0 +1,23 @@
|
||||
@echo off
|
||||
|
||||
REM Change directory to where the script is
|
||||
cd /d "%~dp0"
|
||||
|
||||
REM Check for Nvidia GPU using wmic, only NVIDIA check is supported
|
||||
wmic path win32_VideoController get name | findstr /i "NVIDIA" >nul
|
||||
|
||||
REM Capture the exit code
|
||||
set gpu_available=%errorlevel%
|
||||
|
||||
REM Execute commands based on GPU availability
|
||||
if %gpu_available% equ 0 (
|
||||
echo NVIDIA GPU is available
|
||||
docker.exe compose --env-file ..\project.env -f ..\compose.yml -f ..\compose-gpu.yml up --no-recreate --no-build -d
|
||||
) else (
|
||||
echo NVIDIA GPU is not available
|
||||
docker.exe compose --env-file ..\project.env -f ..\compose.yml up --no-recreate --no-build -d
|
||||
)
|
||||
|
||||
REM EOF
|
||||
|
||||
|
||||
29
bin/start.sh
Executable file
29
bin/start.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
CURRENT_FILE=`readlink -f $0`
|
||||
CURRENT_DIR=`dirname $CURRENT_FILE`
|
||||
cd $CURRENT_DIR
|
||||
|
||||
# Check if nvidia-smi is available
|
||||
if command -v nvidia-smi &> /dev/null; then
|
||||
if nvidia-smi > /dev/null 2>&1; then
|
||||
echo "Nvidia GPU found."
|
||||
# Run the command for when GPU is available
|
||||
docker compose --env-file ../project.env \
|
||||
-f ../compose.yml -f ../compose-gpu.yml \
|
||||
up --no-recreate --no-build -d
|
||||
else
|
||||
echo "Nvidia GPU not found."
|
||||
# Run the command for when GPU is not available
|
||||
docker compose --env-file ../project.env \
|
||||
-f ../compose.yml \
|
||||
up --no-recreate --no-build -d
|
||||
fi
|
||||
else
|
||||
echo "nvidia-smi command not found. Nvidia GPU not available."
|
||||
# Run the command for when GPU is not available
|
||||
docker compose --env-file ../project.env \
|
||||
-f ../compose.yml \
|
||||
up --no-recreate --no-build -d
|
||||
fi
|
||||
|
||||
# EOF
|
||||
22
build/Dockerfile.jupyterhub
Normal file
22
build/Dockerfile.jupyterhub
Normal file
@@ -0,0 +1,22 @@
|
||||
##############################################################################################
|
||||
##############################################################################################
|
||||
|
||||
FROM jupyterhub/jupyterhub:latest AS target
|
||||
|
||||
# File Author / Maintainer
|
||||
LABEL maintainer="Konrad Jelen <konrad.jelenext@delaval.com>"
|
||||
|
||||
# Install dockerspawner, nativeauthenticator
|
||||
# hadolint ignore=DL3013
|
||||
RUN python3 -m pip install --no-cache-dir \
|
||||
dockerspawner \
|
||||
jupyterhub-nativeauthenticator
|
||||
|
||||
# expose ports
|
||||
EXPOSE 8000
|
||||
|
||||
# run with the provided config file
|
||||
CMD ["jupyterhub", "-f", "/srv/jupyterhub/jupyterhub_config.py"]
|
||||
|
||||
# EOF
|
||||
|
||||
0
build/conf/.gitkeep
Normal file
0
build/conf/.gitkeep
Normal file
47
build/conf/bin/mkcert.sh
Normal file
47
build/conf/bin/mkcert.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Validate input arguments
|
||||
if [ -z "$1" ] || [ -z "$2" ]; then
|
||||
echo "Usage: $0 <certificate_directory> <certificate_prefix>"
|
||||
echo "Example: $0 /etc/ssl/mycerts mydomain"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Directory where the certificates will be stored is provided as 1st argument#
|
||||
CERT_DIR="$1"
|
||||
CERT_PREFIX="$2"
|
||||
mkdir -p $CERT_DIR
|
||||
|
||||
# Certificate details
|
||||
COMMON_NAME=${CERT_PREFIX}
|
||||
|
||||
# Certificate file names
|
||||
CERT_FILE="${CERT_DIR}/${CERT_PREFIX}.crt"
|
||||
KEY_FILE="${CERT_DIR}/${CERT_PREFIX}.key"
|
||||
|
||||
# Generate the private key
|
||||
openssl genrsa -out $KEY_FILE 2048
|
||||
|
||||
# Generate the certificate signing request (CSR)
|
||||
openssl req -new -key $KEY_FILE -out "${CERT_DIR}/${CERT_PREFIX}.csr" \
|
||||
-subj "/CN=$COMMON_NAME"
|
||||
|
||||
# Generate the self-signed certificate
|
||||
openssl x509 -req -days 365 -in "${CERT_DIR}/${CERT_PREFIX}.csr" -signkey $KEY_FILE -out $CERT_FILE
|
||||
|
||||
# Clean up the CSR
|
||||
rm "${CERT_DIR}/${CERT_PREFIX}.csr"
|
||||
|
||||
# Change permissions of private key
|
||||
chmod 600 $CERT_FILE $KEY_FILE
|
||||
|
||||
# Output the paths of the generated certificate and key
|
||||
echo "Certificate: $CERT_FILE"
|
||||
echo "Key: $KEY_FILE"
|
||||
|
||||
# Instructions for configuring JupyterLab
|
||||
echo "To configure JupyterLab with these certificates, add the following to your jupyter_notebook_config.py:"
|
||||
echo "c.ServerApp.certfile = u'$CERT_FILE'"
|
||||
echo "c.ServerApp.keyfile = u'$KEY_FILE'"
|
||||
|
||||
#EOF
|
||||
9
build/conf/bin/mkchksum.sh
Normal file
9
build/conf/bin/mkchksum.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
SRCDIR=$1
|
||||
TGTDIR=$(dirname $2)
|
||||
TGTFILE=$2
|
||||
|
||||
# create dir
|
||||
mkdir -p $TGTDIR
|
||||
|
||||
find $SRCDIR -type f -exec sha256sum {} \; > $TGTFILE
|
||||
|
||||
15
build/conf/bin/start-platform.d/00_generate_ssl_cert.sh
Executable file
15
build/conf/bin/start-platform.d/00_generate_ssl_cert.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# Generates SSL keys used by jupyterlab & traefik
|
||||
# ----------------------------------------------------------------------------------------
|
||||
|
||||
# generate ssl keys if don't exist yet (happens first time the script is run)
|
||||
# skip this step if no certificate dir
|
||||
CERTS_DIR="/mnt/certs"
|
||||
if [[ -z $(find $CERTS_DIR -name '*.crt') ]]; then
|
||||
/mkcert.sh "$CERTS_DIR" "stellars-jupyterlab-ds"
|
||||
fi
|
||||
|
||||
|
||||
# EOF
|
||||
|
||||
14
build/conf/bin/start-platform.d/01_nvidia-smi.sh
Executable file
14
build/conf/bin/start-platform.d/01_nvidia-smi.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# prints nvidia-smi result (NVIDIA GPU status)
|
||||
# ----------------------------------------------------------------------------------------
|
||||
|
||||
# show result of nvidia-smi if nvidia GPU available
|
||||
# and if GPU_SUPPORT_ENABLED env set
|
||||
if [[ $GPU_SUPPORT_ENABLED == 1 ]]; then
|
||||
/usr/bin/nvidia-smi
|
||||
fi
|
||||
|
||||
|
||||
# EOF
|
||||
|
||||
15
build/conf/bin/start-platform.sh
Executable file
15
build/conf/bin/start-platform.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# run series of start scripts (services will need to run in background)
|
||||
START_PLATFORM_DIR='/start-platform.d'
|
||||
for file in $START_PLATFORM_DIR/*; do
|
||||
if [ -f "$file" ] && [ -x "$file" ]; then
|
||||
"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
# run jupyterhub, env params are configured in Dockerfile and docker-compose yml
|
||||
jupyterhub -f /srv/jupyterhub/jupyterhub_config.py
|
||||
|
||||
# EOF
|
||||
|
||||
2
certs/README.md
Normal file
2
certs/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
this is where certificates for jupyterlab and jupyterhub should be uploaded
|
||||
this directory will be mounted under /mnt/certs
|
||||
8
certs/certs.yml
Normal file
8
certs/certs.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# traefik certificates
|
||||
tls:
|
||||
stores:
|
||||
default:
|
||||
defaultCertificate:
|
||||
certFile: /mnt/certs/stellars-jupyterhub-ds.crt
|
||||
keyFile: /mnt/certs/stellars-jupyterhub-ds.key
|
||||
|
||||
17
certs/stellars-jupyterhub-ds.crt
Normal file
17
certs/stellars-jupyterhub-ds.crt
Normal file
@@ -0,0 +1,17 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICyTCCAbECFBBgOcnAuh/moEXjNnY2+O1NX8q2MA0GCSqGSIb3DQEBCwUAMCEx
|
||||
HzAdBgNVBAMMFnN0ZWxsYXJzLWp1cHl0ZXJsYWItZHMwHhcNMjUwNzA3MjEyMjU2
|
||||
WhcNMjYwNzA3MjEyMjU2WjAhMR8wHQYDVQQDDBZzdGVsbGFycy1qdXB5dGVybGFi
|
||||
LWRzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzpdUVhh7H2CeQbD/
|
||||
wdiDS1GANxWUv7ZBaRNkS5XkYqygtK2cKr67fBiHdA0cLetNeODdFi15TIfR68SM
|
||||
8k4hVVMxBpWNkrwf2WBC8uUi3nhmnge7z33xgD2zYSMGyjwQ2hlXUKA+wP0aqCoz
|
||||
9mm2JiscCydsLbq5DLmziSsA/eR88ZqRSG/6ulwRjBebrpKH4STj7ZgeuO/pXzt9
|
||||
00ShEXDGsXgDNB6tzwEWgnooIUqWXyrd3Xq98S2xOfDkvkfpDvNy/IJnXpNhsgh6
|
||||
AQf1i9iQ8+rqhfdVDnrC55vYNXf7ig2rcBQq9+frNh011RyDnyv89vQTRj1GWhtB
|
||||
PNRNswIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB6suvIGEbfuQTHquZHxQoSgk1O
|
||||
eM9Brrtl9JrkK4ZvPqzqbPsP0yjP4aiZ2MT7G9pvtKxOI0gb0eodT85LKQh0xDxg
|
||||
EsB41HvFWkqaeOiFFY24C4TKMQTF+iXlkTR534mRqFOLxloCdq6NLrDspGyn0jiE
|
||||
O/vUDXCpjQABVKekQNcUPu1X/GaLsaEELJxm6S2pxB88dA/dhZwuNHS9VGrBS3Fq
|
||||
ILOWSkocgTg3maqv9BiY9vspkfEZXccSuPGlsivVp55p2Ps9u9TFZMbtvXMKqUWv
|
||||
g0psGiQMhmWoM0+T9AedVSnx0BZh422nZGJYczEitazMU7xZfD+9CghQKyZc
|
||||
-----END CERTIFICATE-----
|
||||
28
certs/stellars-jupyterhub-ds.key
Normal file
28
certs/stellars-jupyterhub-ds.key
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDOl1RWGHsfYJ5B
|
||||
sP/B2INLUYA3FZS/tkFpE2RLleRirKC0rZwqvrt8GId0DRwt60144N0WLXlMh9Hr
|
||||
xIzyTiFVUzEGlY2SvB/ZYELy5SLeeGaeB7vPffGAPbNhIwbKPBDaGVdQoD7A/Rqo
|
||||
KjP2abYmKxwLJ2wturkMubOJKwD95HzxmpFIb/q6XBGMF5uukofhJOPtmB647+lf
|
||||
O33TRKERcMaxeAM0Hq3PARaCeighSpZfKt3der3xLbE58OS+R+kO83L8gmdek2Gy
|
||||
CHoBB/WL2JDz6uqF91UOesLnm9g1d/uKDatwFCr35+s2HTXVHIOfK/z29BNGPUZa
|
||||
G0E81E2zAgMBAAECggEAS1Gt3EPJo7twh0e2PyCmYY/hvHOAS1LxzxLUVqBb50PD
|
||||
xYukK4tlRE2XGM7szqsCkEb/I+ZJWocMGDaNIowsp/SQI7PjrTMsM27eibrTdyOT
|
||||
Whs8tF3Y7eXQ3+HKyHGUvUdO56S8rFS64sOcNUL+NzUxYLmrPZLdg4+yrphAoM+E
|
||||
cdtgOeXwqOeNZIKBHteZLJttHVbPnPgxV8JCxQWsR1F5e+n9GQ6+jWw0H6jsmTWY
|
||||
FOec7aOUgTTqFuCK/Wz3BlMZG7I0ltl2kLTab8HizHCASKvqVs9meZAquNda8Thi
|
||||
+P7fLeMaxy/N7m2NsjOPSxzyHcwdIl/hht9KyU9KmQKBgQDzC78+sxAWFJhM2vr2
|
||||
JMQEeLxM8FA6jd7WUoMY4CCS34O9JLIdfjMb2UaFN2kOVSoRkmle8DK32UkxF2f6
|
||||
pRrAdGC0aFLqbpr2F4aEGEyxmFOeXSamprE4bSY3AqBL2I3j9PgjSGPy9BZ42b4e
|
||||
3ufxywM2doq9jikc1dnSHjumaQKBgQDZmixoJ1O/e0k0jYq0/AdPJacP3p924riX
|
||||
1jdLC43E8+jL7aVXkI+6DPhzzGiCiF+ni8hkZrHXVrt3n+v8HVg/ECORtF98EaZj
|
||||
TFfPMS7SrVfodPCUOJGjfClArN245VBCwaxHaAIReQfeSFdlQ4msNI1u1dgX/p73
|
||||
QOhzajHnuwKBgH5bZRobzgksu+F5F3BQvPf2LS9y4MMQbwHN9ApCu6ygN8VmBpP+
|
||||
wGOCGn+3xw6EiFKKY+3Pww6CEONM3+oXXEkiOASTGEh5KGc4495ZI8ZRA2gZ1bZe
|
||||
g1Rqqqt+2geUlnAlM7xkLlJpRCWgTo+OI9g5S3pNNuRFvGoZZB0xSqHBAoGAB/O9
|
||||
abT+N4Iu46/EgOaw8bCDkW3mT/brHgljDegHL0pmmIU/xJpw1jDBJ6Ixs4t+S2Yt
|
||||
VZX3K9JsmWuLuTPtVKinVBzOXH9coEZa8yRt/J7/FmynmhMTJ3FTJJzsoWQn8biY
|
||||
Lifb432Tzwi/wOCd+GAXRxSjiATzFEAX/pfD6R8CgYBvtfZnuDk2lA2gOGK0kMe8
|
||||
biAxM1EA/fyRYrCkkGlDiMJGfpbsipLv3/m4HRcSNot/m5RVkfwDuikKlZ7frOYy
|
||||
tigsQwN8j4KU+GU6wp9aG75JI3SVQenMRNGQxUHOd7juo0eTdDIqo0IktZ0je4J1
|
||||
5Bno9EXbsJYCfq69Fm0Few==
|
||||
-----END PRIVATE KEY-----
|
||||
24
compose-gpu.yml
Normal file
24
compose-gpu.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Stellars Jupyterhub DS Platform
|
||||
# Project Home: https://github.com/stellarshenson/stellars-jupyterhub-ds
|
||||
# This file adds GPU support to compose.yml services
|
||||
#
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
|
||||
services:
|
||||
|
||||
## adds NVIDIA GPU support to Jupyterhub service
|
||||
jupyterhub:
|
||||
environment:
|
||||
- GPU_SUPPORT_ENABLED=1
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
|
||||
# EOF
|
||||
|
||||
88
compose.yml
Normal file
88
compose.yml
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
services:
|
||||
|
||||
# Proxy for smart trafic routing to make it possible to host multiple similar containers
|
||||
# exposing ports 80, 413 & 8080 to force docker to keep only one traefik service
|
||||
# traefik dashboard is available under http://localhost:8080/dashboard
|
||||
traefik:
|
||||
image: traefik:latest
|
||||
container_name: ${COMPOSE_PROJECT_NAME:-stellars-jupyterhub-ds}-traefik
|
||||
command:
|
||||
- "--entrypoints.web.address=:80"
|
||||
- "--entrypoints.websecure.address=:443"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--api.dashboard=true"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.file.filename=/mnt/certs/certs.yml" # certificates generated by jupyterlab container
|
||||
- "--serverstransport.insecureskipverify=true" # required for https passthrough
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ./certs:/mnt/certs # named volume to hold certificates
|
||||
networks:
|
||||
- jupyterhub-network
|
||||
restart: unless-stopped
|
||||
|
||||
jupyterhub:
|
||||
build:
|
||||
context: build
|
||||
dockerfile: Dockerfile.jupyterhub
|
||||
container_name: ${COMPOSE_PROJECT_NAME:-stellars-jupyterhub-ds}-jupyterhub
|
||||
volumes:
|
||||
# The JupyterHub configuration file
|
||||
- "./conf/jupyterhub_config.py:/srv/jupyterhub/jupyterhub_config.py:ro"
|
||||
# Bind Docker socket on the host so we can connect to the daemon from
|
||||
# within the container
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:rw"
|
||||
# Bind Docker volume on host for JupyterHub database and cookie secrets
|
||||
- "vol_hub_data:/data"
|
||||
# this is where certificates will be generated
|
||||
- "vol_certs:/mnt/certs"
|
||||
ports:
|
||||
- "8000:8000"
|
||||
environment:
|
||||
# This username will be a JupyterHub admin
|
||||
JUPYTERHUB_ADMIN: admin
|
||||
# All containers will join this network
|
||||
DOCKER_NETWORK_NAME: jupyterhub-network
|
||||
# JupyterHub will spawn this Notebook image for users
|
||||
DOCKER_NOTEBOOK_IMAGE: stellars/stellars-jupyterlab-ds:latest
|
||||
# Notebook directory inside user image
|
||||
DOCKER_NOTEBOOK_DIR: /home/lab/workspace
|
||||
labels:
|
||||
# Enable proxy support from Traefik
|
||||
- "traefik.enable=true"
|
||||
|
||||
# ⚙ Jupyterhub Service (8000)
|
||||
- "traefik.http.routers.jupyterhub-rtr.rule=Path(`/hub`) || PathPrefix(`/hub/`)"
|
||||
- "traefik.http.routers.jupyterhub-rtr.entrypoints=websecure"
|
||||
- "traefik.http.routers.jupyterhub-rtr.service=jupyterhub-svc"
|
||||
- "traefik.http.routers.jupyterhub-rtr.tls=true"
|
||||
- "traefik.http.services.jupyterhub-svc.loadbalancer.server.scheme=http"
|
||||
- "traefik.http.services.jupyterhub-svc.loadbalancer.server.port=8000"
|
||||
|
||||
# ⚙ Jupyterlab Service (8000)
|
||||
- "traefik.http.routers.jupyterlab-rtr.rule=Path(`/user`) || PathPrefix(`/user/`)"
|
||||
- "traefik.http.routers.jupyterlab-rtr.entrypoints=websecure"
|
||||
- "traefik.http.routers.jupyterlab-rtr.service=jupyterlab-svc"
|
||||
- "traefik.http.routers.jupyterlab-rtr.tls=true"
|
||||
- "traefik.http.services.jupyterlab-svc.loadbalancer.server.scheme=http"
|
||||
- "traefik.http.services.jupyterlab-svc.loadbalancer.server.port=8000"
|
||||
networks:
|
||||
- jupyterhub-network
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
vol_hub_data:
|
||||
vol_certs:
|
||||
|
||||
networks:
|
||||
jupyterhub-network:
|
||||
name: jupyterhub-network
|
||||
|
||||
84
conf/jupyterhub_config.py
Normal file
84
conf/jupyterhub_config.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
# Configuration file for JupyterHub
|
||||
import os
|
||||
|
||||
c = get_config()
|
||||
|
||||
# We rely on environment variables to configure JupyterHub so that we
|
||||
# avoid having to rebuild the JupyterHub container every time we change a
|
||||
# configuration parameter.
|
||||
|
||||
# Spawn single-user servers as Docker containers
|
||||
c.JupyterHub.spawner_class = "dockerspawner.DockerSpawner"
|
||||
|
||||
# Environment variables for MLflow integration
|
||||
c.DockerSpawner.environment = {
|
||||
'JUPYTERLAB_STARTUP_MODE': 'jupyterhub'
|
||||
}
|
||||
|
||||
# Spawn containers from this image
|
||||
c.DockerSpawner.image = os.environ["DOCKER_NOTEBOOK_IMAGE"]
|
||||
|
||||
# Connect containers to this Docker network
|
||||
network_name = os.environ["DOCKER_NETWORK_NAME"]
|
||||
c.DockerSpawner.use_internal_ip = True
|
||||
c.DockerSpawner.network_name = network_name
|
||||
|
||||
# Explicitly set notebook directory because we'll be mounting a volume to it.
|
||||
# Most `jupyter/docker-stacks` *-notebook images run the Notebook server as
|
||||
# user `jovyan`, and set the notebook directory to `/home/jovyan/work`.
|
||||
# We follow the same convention.
|
||||
notebook_dir = os.environ.get("DOCKER_NOTEBOOK_DIR")
|
||||
|
||||
# Force container user
|
||||
c.DockerSpawner.container_user = "lab"
|
||||
c.DockerSpawner.notebook_dir = "/home/lab/work"
|
||||
|
||||
# Modify volume mounting
|
||||
c.DockerSpawner.volumes = {"jupyterhub-shared-lab": "/mnt/shared"}
|
||||
|
||||
c.DockerSpawner.notebook_dir = notebook_dir
|
||||
|
||||
# Mount the real user's Docker volume on the host to the notebook user's
|
||||
# notebook directory in the container
|
||||
c.DockerSpawner.volumes = {"jupyterhub-user-{username}": notebook_dir}
|
||||
|
||||
# Remove containers once they are stopped
|
||||
c.DockerSpawner.remove = True
|
||||
|
||||
# For debugging arguments passed to spawned containers
|
||||
c.DockerSpawner.debug = True
|
||||
|
||||
# User containers will access hub by container name on the Docker network
|
||||
c.JupyterHub.hub_ip = "jupyterhub"
|
||||
c.JupyterHub.hub_port = 8080
|
||||
|
||||
# Custom server options to expose MLflow
|
||||
c.DockerSpawner.server_options = {
|
||||
'MLflow': {
|
||||
'display_name': 'MLflow UI',
|
||||
'port': 5000,
|
||||
'path': '/user/{username}/mlflow'
|
||||
}
|
||||
}
|
||||
|
||||
# Persist hub data on volume mounted inside container
|
||||
c.JupyterHub.cookie_secret_file = "/data/jupyterhub_cookie_secret"
|
||||
c.JupyterHub.db_url = "sqlite:////data/jupyterhub.sqlite"
|
||||
|
||||
# Allow all signed-up users to login
|
||||
c.Authenticator.allow_all = True
|
||||
|
||||
# Authenticate users with Native Authenticator
|
||||
c.JupyterHub.authenticator_class = "nativeauthenticator.NativeAuthenticator"
|
||||
|
||||
# Allow anyone to sign-up without approval
|
||||
c.NativeAuthenticator.open_signup = False
|
||||
|
||||
# Allowed admins
|
||||
admin = os.environ.get("JUPYTERHUB_ADMIN")
|
||||
if admin:
|
||||
c.Authenticator.admin_users = [admin]
|
||||
|
||||
Reference in New Issue
Block a user