Compare commits

..

19 Commits

Author SHA1 Message Date
414b05fb22 Minor tweaks 2025-01-18 10:46:47 -08:00
38966e4a63 Syncthing cleanup 2023-08-25 20:40:18 -07:00
134183029f Added ipnat container 2023-08-25 20:39:55 -07:00
d4666c5a0d Cleaned up jupyter container, fixed PATH 2023-03-07 09:03:10 -08:00
41ee2dc5b7 Improved jupyter container 2023-03-05 10:14:13 -08:00
80feb0690c Module cleanup 2023-02-12 18:56:59 -08:00
2e1beca6b0 removed useless plugin 2023-02-11 19:26:30 -08:00
2166386399 Added server plugins 2023-02-11 15:53:55 -08:00
378c2933b9 Moved qmk container to qmk repo 2022-12-27 13:24:51 -08:00
d6c4dab27f Added standalone build capability 2022-11-19 17:38:39 -08:00
f32cfa057b README 2022-11-18 23:44:02 -08:00
80b80ae74a Added gitignore 2022-11-18 23:29:57 -08:00
23d5acec75 Added QMK container 2022-11-18 23:29:49 -08:00
73c5b50dae Bound ports to loopback for security 2022-11-15 22:06:34 -08:00
2d1ca5de0f Added latex dockerfile 2022-11-13 11:17:04 -08:00
b81253c1e6 Removed manim from build 2022-11-03 11:01:06 -07:00
7e202aae3a Fixed a few errors 2022-10-13 12:45:34 -07:00
eb22f37654 Whitespace and gitignore 2022-10-13 07:59:08 -07:00
9ea5fe3b5f Added radicale container 2022-10-13 07:55:57 -07:00
28 changed files with 476 additions and 168 deletions

View File

@ -13,4 +13,4 @@ insert_final_newline = false
[*.yml]
indent_style = space
indent_size = 4
indent_size = 2

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
jupyter/jupyter
jupyter/notebooks
qmk/kb
qmk/output

0
.gitmodules vendored Normal file
View File

18
README.md Normal file
View File

@ -0,0 +1,18 @@
# Dockerfiles
This repository contains Dockerfiles I've written for various tasks. Most are my own, some are minor modifications on "official" docker containers.
## Tools
- [`latex`](./latex): A latex builder, contains all of TeXlive. Built on Arch, because installing TeX on Arch is unreasonably easy.
- [`qmk`](./qmk): A complete qmk build environment. Contains *everything*, including `qmk_firmware` and `qmk_cli`.
## Apps
- [`jupyter`](./jupyter): A Jupyterlab container, because jupyter is a pain to install.
- [`syncthing`](./syncthing): A [Syncthing](https://syncthing.net/) server, and optionally a relay. Makes installing easy & prevents syncthing from accessing your entire filesystem.
## Servers
- [`radicale`](./radicale): A [Radicale](https://radicale.org/) server

32
ipnat/build/Dockerfile Normal file
View File

@ -0,0 +1,32 @@
FROM debian:bullseye
USER root
# Install all packages
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update --yes && \
apt-get upgrade --yes && \
apt-get install --yes --no-install-recommends \
bash \
locales \
iptables \
dnsutils \
&& \
# Clean up and generate locales
apt-get clean && rm -rf /var/lib/apt/lists/* && \
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \
locale-gen
# Requires cap-add NET_ADMIN NET_RAW
EXPOSE 33
EXPOSE 993
EXPOSE 587
COPY start.sh /start.sh
COPY iptables.sh /iptables.sh
ENTRYPOINT ["bash"]
CMD ["/start.sh"]

28
ipnat/build/iptables.sh Normal file
View File

@ -0,0 +1,28 @@
nat () {
# All traffic to $THIS_PORT on this container
# will be redirected to $THAT_PORT on $THAT_IP
THAT_IP=$1
THIS_PORT=$2
THAT_PORT=$3
# Accept forward incoming traffic
iptables -I FORWARD -d $THAT_IP -m tcp -p tcp --dport $THAT_PORT -j ACCEPT
# Accept forward return traffic
iptables -I FORWARD -s $THAT_IP -m tcp -p tcp --sport $THAT_PORT -j ACCEPT
# Redirect packets to remote
iptables -t nat -I PREROUTING -m tcp -p tcp --dport $THIS_PORT -j DNAT --to-destination $THAT_IP:$THAT_PORT
}
NAT_IP=$(dig +short betalupi.com)
nat $NAT_IP 33 10013
nat $NAT_IP 993 10015
nat $NAT_IP 587 10016
# Include this line ONCE, at the end.
iptables -t nat -I POSTROUTING -d $NAT_IP -j MASQUERADE

11
ipnat/build/start.sh Normal file
View File

@ -0,0 +1,11 @@
#!/bin/bash
touch /var/log/iptables.log
bash /iptables.sh
iptables -A INPUT -j LOG --log-prefix "[I]iptables: "
iptables -A OUTPUT -j LOG --log-prefix "[O]iptables: "
iptables -A FORWARD -j LOG --log-prefix "[F]iptables: "
tail -f /var/log/iptables.log

14
ipnat/docker-compose.yml Normal file
View File

@ -0,0 +1,14 @@
version: "2"
services:
ipnat:
build: ./build
container_name: ipnat
restart: unless-stopped
ports:
- "10010:10010"
cap_add:
- NET_ADMIN
- NET_RAW

2
jupyter/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
jupyter/
notebooks/

View File

@ -6,13 +6,26 @@ This directory contains build files for a complete jupyter environment. It is ba
- https://github.com/rgriffogoes/scraper-notebook
- https://github.com/sharpTrick/sage-notebook
The images these files produce contain everything you could possibly want in a jupyter notebook. Unfortunately, this also means that they take forever to build.
The images these files produce contain everything you could possibly want in a jupyter notebook. Additional packages can be installed in notebooks with `%%bash` magic.
## Building the Image
To build this image, run `build/build.fish.` Edit the script to customize the image.
## Image contents
- jupyter + all dependencies
- Python + many packages
- Octave
- R
- Julia
## TODO
- Fix sage
- Haskell ([source](https://github.com/IHaskell/ihaskell-notebook))
- Maxima Kernel
## Configuration
Change the permissions on the configuration volume (see `docker-compose.yml`) if you get errors.
@ -34,30 +47,21 @@ The first sets `0.0.0.0` to the allowed local hostname list. This prevents a pos
The second line sets a persistent token. If this config value is unset, jupyter will generate a new one on each run, making it hard to make a one-click bookmark.
## Image contents
- jupyterlab, jupyterhub, notebook
- latex + pandoc for exporting
- mamba for packages
- Python
- Scrape tools (bs4, requests)
- scipy, scikit-learn, scikit-image
- tensorflow
- sympy
- numpy
- matplotlib
- pandas
- manim
- Octave
- R
- Julia
### As a Server
Add the following lines and remove ``c.ServerApp.token``. This allows remote access and enables password authentication. **Warning:** be careful. Anyone with access to this server has a shell in your system.
## TODO
- Auto dark theme
- Fix sage
- GPU support ([source](https://github.com/iot-salzburg/gpu-jupyter))
- Haskell ([source](https://github.com/IHaskell/ihaskell-notebook))
- Rust Kernel
- C++ Kernel
- Perl Kernel
- Maxima Kernel
```python
c.NotebookApp.allow_remote_access = True
c.NotebookApp.allow_origin = "*"
c.ServerApp.ip = "*"
c.ServerApp.password = u"pwd_str"
c.ServerApp.open_browser = False
```
Passwords can be generated with
```python
from jupyter_server.auth import passwd
passwd()
```

View File

@ -75,14 +75,15 @@ ENV CONDA_DIR=/opt/conda \
ENV PATH="${CONDA_DIR}/bin:${PATH}" \
HOME="/home/${NB_USER}"
# Copy a script that we will use to correct permissions after running certain commands
# A script that we will use to correct permissions after running certain commands
COPY fix-permissions.sh /usr/local/bin/fix-permissions
RUN chmod a+rx /usr/local/bin/fix-permissions
# Enable prompt color in the skeleton .bashrc before creating the default NB_USER
RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc && \
# Add call to conda init script see https://stackoverflow.com/a/58081608/4413446
echo 'eval "$(command conda shell.bash hook 2> /dev/null)"' >> /etc/skel/.bashrc
echo 'eval "$(command conda shell.bash hook 2> /dev/null)"' >> /etc/skel/.bashrc && \
echo "export PATH=${CONDA_DIR}/bin:\$PATH" >> /etc/skel/.bashrc
# Create user
RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
@ -103,7 +104,6 @@ RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
USER ${NB_UID}
# Pin python version here, or set it to "default"
ARG PYTHON_VERSION=3.10
@ -184,17 +184,9 @@ RUN mkdir -p "${HOME}/.jupyter" && \
chmod u+rwx "${HOME}/.jupyter"
VOLUME "${HOME}/.jupyter"
# Legacy for Jupyter Notebook Server, see: [#1205](https://github.com/jupyter/docker-stacks/issues/1205)
#RUN sed -re "s/c.ServerApp/c.NotebookApp/g" \
# /etc/jupyter/jupyter_server_config.py > /etc/jupyter/jupyter_notebook_config.py && \
# fix-permissions /etc/jupyter/
# HEALTHCHECK documentation: https://docs.docker.com/engine/reference/builder/#healthcheck
# This healtcheck works well for `lab`, `notebook`, `nbclassic`, `server` and `retro` jupyter commands
# https://github.com/jupyter/docker-stacks/issues/915#issuecomment-1068528799
HEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \
CMD wget -O- --no-verbose --tries=1 --no-check-certificate \
http${GEN_CERT:+s}://localhost:8888${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1
USER ${NB_UID}
WORKDIR "${HOME}"
WORKDIR "${HOME}"

View File

@ -2,9 +2,8 @@ import os
import stat
import subprocess
from jupyter_core.paths import jupyter_data_dir
c = get_config() # noqa: F821
c.ServerApp.ip = "0.0.0.0"
c.ServerApp.port = 8888
c.ServerApp.open_browser = False
@ -16,42 +15,6 @@ c.InlineBackend.figure_formats = {"png", "jpeg", "svg", "pdf"}
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False
# Generate a self-signed certificate
OPENSSL_CONFIG = """\
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
"""
if "GEN_CERT" in os.environ:
dir_name = jupyter_data_dir()
pem_file = os.path.join(dir_name, "notebook.pem")
os.makedirs(dir_name, exist_ok=True)
# Generate an openssl.cnf file to set the distinguished name
cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
if not os.path.isfile(cnf_file):
with open(cnf_file, "w") as fh:
fh.write(OPENSSL_CONFIG)
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-newkey=rsa:2048",
"-days=365",
"-nodes",
"-x509",
"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
f"-keyout={pem_file}",
f"-out={pem_file}",
]
)
# Restrict access to the file
os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
c.ServerApp.certfile = pem_file
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:

View File

@ -5,7 +5,8 @@ set is_quiet true
set root_dir "."
function build_image
# First argument: (optional) remove parent image? (true or false)
# First argument: remove parent image?
# true or false, optional, default is false
# Second argument: file name to build
# Parse arguments
@ -13,7 +14,7 @@ function build_image
set remove_previous $argv[1]
set build_file $argv[2]
else if test (count $argv) -eq 1
set remove_previous true
set remove_previous false
set build_file $argv[1]
end
@ -41,33 +42,33 @@ end
date "+Build started at %Y-%m-%d %T"
echo ""
# Base image MUST be built first
# Base image must be built first
printf "Building base image...\n"
docker build \
--quiet=$is_quiet \
--build-arg PYTHON_VERSION=3.9 \
--build-arg PYTHON_VERSION=3.10 \
-t betalupi/jupyter-inter-0 \
$root_dir/base
printf "Done. \n\n"
build_image false octave
build_image false r
build_image false julia
#build_image false sage (BROKEN)
#build_image octave
#build_image r
#build_image julia
#build_image sage (BROKEN)
build_image false plugins
build_image false pymodules
build_image plugins
build_image pymodules
# Manim will not install under python 3.10.
# 3.9 works (see arguments for base above)
build_image false manim
#build_image manim
# Rename final image
docker image tag betalupi/jupyter-inter-(math $img_idx) git.betalupi.com/mark/jupyter
docker image tag betalupi/jupyter-inter-(math $img_idx) git.betalupi.com/mark/jupyter_small
docker image rm betalupi/jupyter-inter-(math $img_idx)
echo ""

View File

@ -5,12 +5,23 @@ LABEL maintainer="Mark <mark@betalupi.com>"
USER $NB_UID
RUN mamba install --yes \
# Python packages
## Jupyter extensions
"jupyter_server>=2.0.0" \
## Python packages
# Scraping
"beautifulsoup4" \
"requests" \
"schedule" \
# ML and Data
"scikit-image" \
"scikit-learn" \
"scipy" \
"seaborn" \
"pandas" \
"matplotlib-base" \
# Misc
"numpy" \
"sympy" \
"altair" \
"bokeh" \
"bottleneck" \
@ -22,24 +33,15 @@ RUN mamba install --yes \
"h5py" \
"ipympl" \
"ipywidgets" \
"matplotlib-base" \
"numba" \
"numpy" \
"numexpr" \
"pandas" \
"patsy" \
"protobuf" \
"pytables" \
"scikit-image" \
"scikit-learn" \
"scipy" \
"seaborn" \
"sqlalchemy" \
"statsmodels" \
"sympy" \
"widgetsnbextension" \
"xlrd" \
"tensorflow" \
"pytest" \
"ipython" \
&& \
@ -51,34 +53,7 @@ RUN mamba install --yes \
fix-permissions "${CONDA_DIR}" && \
fix-permissions "/home/${NB_USER}"
# Import matplotlib the first time to build the font cache.
# Import matplotlib once to build the font cache.
ENV XDG_CACHE_HOME="/home/${NB_USER}/.cache/"
RUN MPLBACKEND=Agg python -c "import matplotlib.pyplot" && \
fix-permissions "/home/${NB_USER}"
#FROM pymodules as selenium
#USER ${NB_UID}
#
#RUN mamba install --yes \
# "selenium" \
# && \
# # Cleanup
# mamba clean --all -f -y && \
# npm cache clean --force && \
# jupyter lab clean && \
# rm -rf "/home/${NB_USER}/.cache/yarn" && \
# fix-permissions "${CONDA_DIR}" && \
# fix-permissions "/home/${NB_USER}"
#
## Install google chrome
#RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
#RUN sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/#sources.list.d/google-chrome.list'
#RUN apt-get -y update
#RUN apt-get install -y google-chrome-stable
#
## Install chromedriver
#RUN apt-get install -yqq unzip
#RUN wget -O /tmp/chromedriver.zip http://chromedriver.storage.googleapis.com/`curl -sS #chromedriver.storage.googleapis.com/LATEST_RELEASE`/chromedriver_linux64.zip
#RUN unzip /tmp/chromedriver.zip chromedriver -d /usr/local/bin/
fix-permissions "/home/${NB_USER}"

View File

@ -12,4 +12,14 @@ services:
# host:container
ports:
- "8888:8888"
- "127.0.0.1:8888:8888"
# Allow GPU access.
# Requires nvidia-container-toolkit
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]

40
latex/Dockerfile Normal file
View File

@ -0,0 +1,40 @@
FROM archlinux
LABEL maintiner="Mark <mark@betalupi.com>"
RUN pacman -Fyy --noconfirm && \
#pacman -Syu --noconfirm && \
pacman -Sy --noconfirm \
#tectonic \
texlive-most \
texlive-fontsextra \
texlive-lang
RUN pacman -Sy --noconfirm \
# texlive optional deps
python-pygments \
inkscape \
java-environment \
perl-tk \
ghostscript \
java-runtime \
psutils \
wdiff \
# Other tools
curl \
git \
openssh \
python
ENV HOME /work
WORKDIR /work
VOLUME /work
VOLUME /build
# CD_DIR is relative to data.
ENV LATEXMK_CD_DIR .
ENV SKIP_ENTRY false
COPY run.sh /run.sh
# Don't include this, we want the entrypoint to be called manually.
#ENTRYPOINT [ "/bin/bash", "/entrypoint.sh" ]

16
latex/README.md Normal file
View File

@ -0,0 +1,16 @@
# LaTeX Image
A docker image that contains `latexmk` and all requirements. Can be used by CI/CD, or can act as a simple replacement of a full latex installation.
## Contents
- `Dockerfile`: Builds the image
- `run.sh`: Copied into the container, runs `latexmk`
- `latexmk.sh`: Run by host system, can replace `latexmk`.
## Notes
This image has no entrypoint to make it compatible with CI/CD tools. `run.sh` is executed manually (see `latexmk.sh`).
The image takes a few envvars, they are all documented in `latexmk.sh`.

44
latex/latexmk.sh Executable file
View File

@ -0,0 +1,44 @@
#!/bin/bash
# This script is a complete replacement for latexmk,
# using this docker container.
# All arguments are passed to latexmk,
# so options are provided via envvars.
# These options should never be changed,
# use LATEX_OUT_DIR.
if [[ $@ == *"-outdir"* ]] || [[ $@ == *"-output-directory"* ]] ; then
echo "Do not set output dir while using this script." 1>&2
exit 1
fi
# What directory we should bind to /work.
if [[ -z "$LATEXMK_WORK_DIR" ]] ; then
LATEXMK_WORK_DIR="/"
fi
# CD to this path inside LATEXMK_WORK_DIR.
# Must be relative.
if [[ -z "$LATEXMK_CD_DIR" ]] ; then
LATEXMK_CD_DIR="$(realpath --relative-to="$LATEXMK_WORK_DIR" "$(pwd)")"
fi
# Default path for output
if [[ -z "$LATEX_OUT_DIR" ]] ; then
LATEX_OUT_DIR="./build"
fi
# Docker wants relative paths
# mkdir here so we don't have permission problems
LATEX_OUT_DIR=$(realpath "$LATEX_OUT_DIR")
mkdir -p "$LATEX_OUT_DIR"
docker run --rm \
--user $(id -u):$(id -g) \
-v "$LATEXMK_WORK_DIR:/work:ro" \
-v "$LATEX_OUT_DIR:/build" \
-e "LATEXMK_CD_DIR=$LATEXMK_CD_DIR" \
git.betalupi.com/mark/latex \
bash /run.sh $@

9
latex/run.sh Normal file
View File

@ -0,0 +1,9 @@
#!/bin/bash
cd "/work/$LATEXMK_CD_DIR"
if [ "$SKIP_ENTRY" = false ] ; then
latexmk -outdir=/build "$@"
else
bash
fi

2
radicale/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
data/
config/users

3
radicale/README.md Normal file
View File

@ -0,0 +1,3 @@
# Radicale Docker container
Based on [this](https://github.com/tomsquest/docker-radicale)

54
radicale/build/Dockerfile Normal file
View File

@ -0,0 +1,54 @@
FROM alpine:3.14
ARG COMMIT_ID
ENV COMMIT_ID ${COMMIT_ID}
ARG VERSION
ENV VERSION ${VERSION:-3.1.8}
ARG BUILD_UID
ENV BUILD_UID ${BUILD_UID:-2999}
ARG BUILD_GID
ENV BUILD_GID ${BUILD_GID:-2999}
ARG TAKE_FILE_OWNERSHIP
ENV TAKE_FILE_OWNERSHIP ${TAKE_FILE_OWNERSHIP:-true}
LABEL maintainer="Mark <mark@betalupi.com>" \
org.label-schema.name="Radicale Docker Image" \
org.label-schema.description="Docker image for Radicale, the CalDAV/CardDAV server"
RUN apk add --no-cache --virtual=build-dependencies \
gcc \
musl-dev \
libffi-dev \
python3-dev \
&& apk add --no-cache \
curl \
git \
openssh \
shadow \
su-exec \
tzdata \
wget \
python3 \
py3-tz \
py3-pip \
&& python3 -m pip install --upgrade pip \
&& python3 -m pip install radicale==$VERSION passlib[bcrypt] \
&& apk del --purge build-dependencies \
&& addgroup -g $BUILD_GID radicale \
&& adduser -D -s /bin/false -H -u $BUILD_UID -G radicale radicale \
&& mkdir -p /config /data \
&& chmod -R 770 /data \
&& chown -R radicale:radicale /data \
&& rm -fr /root/.cache
HEALTHCHECK --interval=30s --retries=3 CMD curl --fail http://localhost:5232 || exit 1
VOLUME /config /data
EXPOSE 5232
COPY start.sh /usr/local/bin
ENTRYPOINT ["/usr/local/bin/start.sh"]
CMD ["radicale", "--config", "/config/config"]

36
radicale/build/start.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/sh
set -e
# Change uid/gid of radicale if vars specified
if [ -n "$UID" ] || [ -n "$GID" ]; then
if [ ! "$UID" = "$(id radicale -u)" ] || [ ! "$GID" = "$(id radicale -g)" ]; then
# Fail on read-only container
if grep -e "\s/\s.*\sro[\s,]" /proc/mounts > /dev/null; then
echo "You specified custom UID/GID (UID: $UID, GID: $GID)."
echo "UID/GID can only be changed when not running the container with --read-only."
echo "Please see the README.md for how to proceed and for explanations."
exit 1
fi
if [ -n "$UID" ]; then
usermod -o -u "$UID" radicale
fi
if [ -n "$GID" ]; then
groupmod -o -g "$GID" radicale
fi
fi
fi
# If requested and running as root, mutate the ownership of bind-mounts
if [ "$(id -u)" = "0" ] && [ "$TAKE_FILE_OWNERSHIP" = "true" ]; then
chown -R radicale:radicale /data
fi
# Run radicale as the "radicale" user or any other command if provided
if [ "$(id -u)" = "0" ] && [ "$1" = "radicale" ]; then
exec su-exec radicale "$@"
else
exec "$@"
fi

20
radicale/config/config Normal file
View File

@ -0,0 +1,20 @@
[auth]
type = htpasswd
htpasswd_filename = /config/users
htpasswd_encryption = bcrypt
delay = 3
[rights]
type = owner_only
[server]
hosts = 0.0.0.0:5232
[web]
# type = none: disable web management
# type = internal: enable web management
type = internal
[storage]
type = multifilesystem
filesystem_folder = /data

View File

@ -0,0 +1,37 @@
version: "3.7"
services:
radicale:
build:
context: ./build
args:
BUILD_UID: 1000
BUILD_GID: 1000
TAKE_FILE_OWNERSHIP: "false"
container_name: radicale
restart: unless-stopped
init: true
healthcheck:
test: curl -f http://127.0.0.1:5232 || exit 1
interval: 30s
retries: 3
# Security
read_only: true
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- SETUID
- SETGID
- KILL
ports:
- 5232:5232
volumes:
- ./data:/data
- ./config:/config

View File

@ -8,19 +8,33 @@ LABEL maintiner="Mark <mark@betalupi.com>"
# The package will create its own syncthing user if one does not exist.
# We need to do it manually, to control uid and gid.
RUN groupadd -g 1000 syncthing && \
useradd -g 1000 -u 1000 syncthing -d /stdata && \
ARG ST_GID="1000"
ARG ST_UID="1000"
RUN groupadd -g ${ST_GID} syncthing && \
useradd -g ${ST_GID} -u ${ST_UID} syncthing -d /stdata && \
pacman -Fyy --noconfirm && \
pacman -Syu --noconfirm && \
pacman -S --noconfirm syncthing syncthing-relaysrv
VOLUME /syncthing
RUN mkdir /stdata && \
mkdir /stconfig && \
mkdir /stconfig/config && \
chown -R syncthing:syncthing /stdata && \
chmod -R 774 /stdata && \
chown -R syncthing:syncthing /stconfig && \
chmod -R 774 /stconfig
VOLUME /stdata
VOLUME /stconfig
# Syncthing
EXPOSE 8384
# Relay
EXPOSE 22067
COPY start.sh /start.sh
USER ${ST_UID}
WORKDIR "/stdata"
ENTRYPOINT ["/bin/bash", "start.sh"]

View File

@ -1,22 +1,5 @@
#!/bin/bash
# Files go here
mkdir /stdata
# Configs go here
mkdir /stconfig
mkdir /stconfig/config
chown -R syncthing:syncthing /stdata
chmod -R 774 /stdata
chown -R syncthing:syncthing /stconfig
chmod -R 774 /stconfig
su - syncthing -c '
cd /stconfig
syncthing-relaysrv \
-pools="" \
-keys="/stconfig" \
@ -24,13 +7,11 @@ syncthing-relaysrv \
-ping-interval="30s" \
-protocol="tcp4" \
-provided-by="Betalupi" \
-status-srv=""' &
-status-srv="" &
su - syncthing -c '
syncthing \
-gui-address=0.0.0.0:8384 \
-home="/stconfig/config" \
--no-browser
'

View File

@ -1,15 +1,17 @@
version: "2"
services:
syncserver:
build: ./build
container_name: syncthing
restart: unless-stopped
syncserver:
build: ./build
container_name: syncthing
restart: unless-stopped
volumes:
- "./stdata:/stdata"
- "./stconfig:/stconfig"
# host:container
ports:
- "8384:8384"
- "22067:22067"
volumes:
- "./stdata:/stdata"
- "./stconfig:/stconfig"
ports:
- "127.0.0.1:8384:8384"
# Only uncomment if you need a relay
# TODO: envvar to disable relay
#- "22067:22067"