move services

This commit is contained in:
Osman Faruk Bayram 2025-10-19 14:49:27 +03:00
parent 128005e354
commit feb53bc5fc
23 changed files with 28 additions and 28 deletions

View file

@ -0,0 +1,51 @@
{
config,
lib,
...
}:
let
atticPort = 7080;
in
{
options = {
osbmModules.enableAttic = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Attic nix cache service";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableAttic {
services.atticd = {
enable = true;
environmentFile = "/persist/attic.env";
settings = {
listen = "[::]:${toString atticPort}";
compression = {
type = "zstd";
level = 9;
};
# jwt = { };
# storage = {
# type = "local";
# # path = "/data/atreus/attic";
# # there is an issue
# };
};
};
networking.firewall.allowedTCPPorts = [ atticPort ];
services.cloudflared.tunnels = {
"fa301a21-b259-4149-b3d0-b1438c7c81f8" = {
default = "http_status:404";
credentialsFile = "/home/osbm/.cloudflared/fa301a21-b259-4149-b3d0-b1438c7c81f8.json";
ingress = {
"cache.osbm.dev" = {
service = "http://localhost:${toString atticPort}";
};
};
};
};
})
];
}

View file

@ -0,0 +1,67 @@
{
pkgs,
lib,
config,
...
}:
{
options = {
osbmModules.enableCaddy = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Caddy server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableCaddy {
services.caddy = {
enable = true;
# package = pkgs.caddy.withPlugins {
# # update time to time
# # last update: 2025-03-02
# plugins = [ "github.com/caddy-dns/cloudflare@v0.2.1" ];
# hash = "sha256-2D7dnG50CwtCho+U+iHmSj2w14zllQXPjmTHr6lJZ/A=";
# };
# email = "osbm@osbm.dev";
# extraConfig = ''
# (cloudflare) {
# tls {
# dns cloudflare {env.CF_API_TOKEN}
# }
# }
# '';
# globalConfig = ''
# acme_dns cloudflare {env.CF_API_TOKEN}
# '';
};
networking.firewall.allowedTCPPorts = [
80
443
3000
];
environment.systemPackages = with pkgs; [
nssTools
];
age.secrets.cloudflare = {
file = ../../secrets/cloudflare.age;
path = "/etc/caddy/.env";
owner = "caddy";
mode = "0600";
};
systemd.services.caddy.serviceConfig = {
EnvironmentFile = "/etc/caddy/.env";
};
security.acme.acceptTerms = true;
security.acme.defaults = {
email = "osbm@osbm.dev";
acceptTerms = true;
};
})
];
}

View file

@ -0,0 +1,53 @@
{
lib,
config,
pkgs,
...
}:
let
# https://github.com/NixOS/nixpkgs/pull/394352
cloudflare-dyndns-5-3 = pkgs.cloudflare-dyndns.overridePythonAttrs rec {
version = lib.warnIfNot (
pkgs.cloudflare-dyndns.version == "5.0"
) "The cloudflare-dyndns package is updated, you should remove this override" "5.3";
src = pkgs.fetchFromGitHub {
owner = "kissgyorgy";
repo = "cloudflare-dyndns";
rev = "v${version}";
hash = "sha256-t0MqH9lDfl+cAnPYSG7P32OGO8Qpo1ep0Hj3Xl76lhU=";
};
build-system = with pkgs.python3Packages; [
hatchling
];
dependencies = with pkgs.python3Packages; [
click
httpx
pydantic
truststore
];
};
in
{
options = {
osbmModules.enableCloudflareDyndns = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable a service to push my public IP address to my Cloudflare domain.";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableCloudflareDyndns {
services.cloudflare-dyndns = {
package = cloudflare-dyndns-5-3;
enable = true;
apiTokenFile = "/persist/cloudflare-dyndns";
proxied = false; # TODO please revert
domains = [
"git.osbm.dev"
"aifred.osbm.dev"
];
};
})
];
}

View file

@ -0,0 +1,23 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableCloudflared = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Cloudflare tunnels";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableCloudflared {
services.cloudflared = {
enable = true;
certificateFile = "/home/osbm/.cloudflared/cert.pem";
};
})
];
}

View file

@ -0,0 +1,23 @@
{
imports = [
./attic.nix
./caddy.nix
./cloudflare-dyndns.nix
./cloudflared.nix
./nextcloud.nix
./ollama.nix
./openssh.nix
./forgejo.nix
./glance.nix
./hydra.nix
./jellyfin.nix
./system-logger
./syncthing.nix
./tailscale.nix
./vaultwarden.nix
./vscode-server.nix
./wanikani-bypass-lessons.nix
./wanikani-fetch-data
./wanikani-stats
];
}

View file

@ -0,0 +1,57 @@
{
lib,
config,
...
}:
{
options = {
osbmModules.enableForgejo = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Forgejo server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableForgejo {
services.forgejo = {
enable = true;
lfs.enable = true;
dump = {
enable = true;
type = "zip";
interval = "01:01";
};
settings = {
DEFAULT = {
APP_NAME = "osbm's self hosted git service";
};
server = {
DOMAIN = "git.osbm.dev";
ROOT_URL = "https://git.osbm.dev/";
};
"ui.meta" = {
AUTHOR = "osbm";
DESCRIPTION = "\"After all, all devices have their dangers. The discovery of speech introduced communication and lies.\" -Isaac Asimov";
KEYWORDS = "git,self-hosted,gitea,forgejo,osbm,open-source,nix,nixos";
};
service = {
DISABLE_REGISTRATION = true;
LANDING_PAGE = "/osbm";
};
};
};
services.cloudflared.tunnels = {
"eb9052aa-9867-482f-80e3-97a7d7e2ef04" = {
default = "http_status:404";
credentialsFile = "/home/osbm/.cloudflared/eb9052aa-9867-482f-80e3-97a7d7e2ef04.json";
ingress = {
"git.osbm.dev" = {
service = "http://localhost:3000";
};
};
};
};
})
];
}

View file

@ -0,0 +1,187 @@
{
lib,
config,
...
}:
{
options = {
osbmModules.enableGlance = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Glance server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableGlance {
services.glance = {
enable = true;
openFirewall = true;
settings = {
server = {
port = 3838;
host = "0.0.0.0";
};
branding = {
# stolen from notohh/snowflake but i love it so much
custom-footer = ''<b><p></p></b>'';
};
pages = [
{
columns = [
{
size = "small";
widgets = [
{ type = "calendar"; }
{
type = "bookmarks";
groups = [
{
title = "My Profiles";
same-tab = true;
color = "200 50 50";
links = [
{
title = "GitHub";
url = "https://github.com/osbm";
}
{
title = "Gitlab";
url = "https://gitlab.com/osbm";
}
{
title = "Crates.io";
url = "https://crates.io/users/osbm";
}
{
title = "HuggingFace";
url = "https://huggingface.co/osbm";
}
{
title = "Bluesky";
url = "https://bsky.app/profile/osbm.dev";
}
{
title = "Docker Hub";
url = "https://hub.docker.com/u/osbm";
}
{
title = "Kaggle";
url = "https://www.kaggle.com/osmanf";
}
];
}
{
title = "Documents";
links = [
{
title = "Nixos Search";
url = "https://search.nixos.org";
}
];
}
];
}
];
}
{
size = "full";
widgets = [
{
type = "search";
search-engine = "google";
bangs = [
{
title = "youtube";
shortcut = "!yt";
url = "https://www.youtube.com/results?search_query={QUERY}";
}
{
title = "nixpkgs";
shortcut = "!np";
url = "https://search.nixos.org/packages?channel=unstable&query={QUERY}";
}
{
title = "nixos";
shortcut = "!no";
url = "https://search.nixos.org/options?channel=unstable&query={QUERY}";
}
];
}
{
cache = "1m";
sites = [
{
icon = "sh:forgejo";
title = "Forgejo git server";
url = "https://git.osbm.dev";
}
{
icon = "si:ollama";
title = "Open Webui";
url = "http://ymir.curl-boga.ts.net:7070/";
}
{
icon = "sh:jellyfin";
title = "Jellyfin";
url = "http://ymir.curl-boga.ts.net:8096/";
}
{
icon = "sh:nixos";
title = "Hydra";
url = "http://wallfacer.curl-boga.ts.net:3000";
}
{
icon = "sh:nixos";
title = "Attix Binary Cache";
url = "https://cache.osbm.dev";
}
{
icon = "sh:visual-studio-code";
title = "Ymir Remote VSCode";
url = "http://ymir.curl-boga.ts.net:4444/";
}
{
icon = "sh:visual-studio-code";
title = "Tartarus Remote VSCode";
url = "http://tartarus.curl-boga.ts.net:4444/";
}
{
icon = "sh:visual-studio-code";
title = "Wallfacer Remote VSCode";
url = "http://wallfacer.curl-boga.ts.net:4444/";
}
{
icon = "si:json";
title = "Wanikani Stats";
url = "http://pochita:8501";
}
];
title = "Services";
type = "monitor";
}
];
}
];
name = "Home";
content = "Welcome to Pochita's home page!";
}
];
};
};
networking.firewall.allowedTCPPorts = [ config.services.glance.settings.server.port ];
services.cloudflared.tunnels = {
"91b13f9b-81be-46e1-bca0-db2640bf2d0a" = {
default = "http_status:404";
credentialsFile = "/home/osbm/.cloudflared/91b13f9b-81be-46e1-bca0-db2640bf2d0a.json";
ingress = {
"home.osbm.dev" = {
service = "http://localhost:${toString config.services.glance.settings.server.port}";
};
};
};
};
})
];
}

View file

@ -0,0 +1,30 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableHydra = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Hydra continuous integration server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableHydra {
services.hydra = {
enable = true;
port = 3000;
hydraURL = "http://${config.networking.hostName}.curl-boga.ts.net/hydra/";
notificationSender = "hydra@localhost";
buildMachinesFiles = [ ];
useSubstitutes = true;
};
networking.firewall.allowedTCPPorts = [
config.services.hydra.port
];
})
];
}

View file

@ -0,0 +1,28 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableJellyfin = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Jellyfin media server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableJellyfin {
services.jellyfin = {
enable = true;
openFirewall = true;
user = "osbm";
group = "users";
dataDir = "/home/osbm/.local/share/jellyfin";
};
networking.firewall.allowedTCPPorts = [ 8096 ];
})
];
}

View file

@ -0,0 +1,33 @@
{
lib,
config,
pkgs,
...
}:
{
options = {
osbmModules.enableNextcloud = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Nextcloud server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableNextcloud {
environment.etc."nextcloud-admin-pass".text = "m7eJ4KJ1NK33JE%51";
services.nextcloud = {
enable = true;
package = pkgs.nextcloud31;
hostName = "localhost/nextcloud";
config.adminpassFile = "/etc/nextcloud-admin-pass";
config.dbtype = "sqlite";
database.createLocally = true;
settings.trusted_domains = [
"wallfacer.curl-boga.ts.net"
"localhost"
];
};
})
];
}

View file

@ -0,0 +1,43 @@
{
lib,
config,
...
}:
{
options = {
osbmModules = {
enableOllama = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Ollama services.";
};
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableOllama {
services.ollama = {
enable = true;
acceleration = "cuda";
# loadModels = [
# "deepseek-r1:7b"
# "deepseek-r1:14b"
# ];
};
services.open-webui = {
enable = false; # TODO gives error fix later
port = 7070;
host = "0.0.0.0";
openFirewall = true;
environment = {
SCARF_NO_ANALYTICS = "True";
DO_NOT_TRACK = "True";
ANONYMIZED_TELEMETRY = "False";
WEBUI_AUTH = "False";
ENABLE_LOGIN_FORM = "False";
};
};
})
];
}

View file

@ -0,0 +1,39 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableOpenssh = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Enable OpenSSH service";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableOpenssh {
services.openssh = {
enable = true;
startWhenNeeded = true;
settings = {
PermitRootLogin = "no";
# only allow key based logins and not password
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
AuthenticationMethods = "publickey";
PubkeyAuthentication = "yes";
ChallengeResponseAuthentication = "no";
UsePAM = false;
# kick out inactive sessions
ClientAliveCountMax = 5;
ClientAliveInterval = 60;
};
};
})
];
}

View file

@ -0,0 +1,24 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableSyncthing = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Syncthing file synchronization service";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableSyncthing {
services.syncthing = {
enable = true;
openDefaultPorts = true;
# port is 8384
};
})
];
}

View file

@ -0,0 +1,68 @@
{
pkgs,
config,
lib,
...
}:
let
system-logger = pkgs.writeShellApplication {
name = "system-logger";
runtimeInputs = with pkgs; [
curl
jq
zip
gawk
systemd
];
text = builtins.readFile ./system-logger.sh;
};
in
{
options.services.system-logger = {
enable = lib.mkEnableOption {
description = "Enable System Logger Service";
default = false;
};
logDirectory = lib.mkOption {
type = lib.types.path;
default = "/var/lib/system-logger";
description = "Directory to store log archives";
};
maxSizeMB = lib.mkOption {
type = lib.types.int;
default = 1;
description = "Maximum size of daily log archive in megabytes";
};
retentionDays = lib.mkOption {
type = lib.types.int;
default = 30;
description = "Number of days to retain log archives";
};
};
config = lib.mkIf config.services.system-logger.enable {
systemd.timers.system-logger = {
description = "System Logger Timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "daily";
Persistent = true;
};
};
systemd.services.system-logger = {
description = "System Logger Service";
serviceConfig = {
Type = "oneshot";
ExecStart = "${lib.getExe system-logger}";
Restart = "on-failure";
RestartSec = 60;
User = "root";
Group = "root";
};
};
};
}

View file

@ -0,0 +1,136 @@
#!/usr/bin/env bash
set -euo pipefail
# Configuration
LOG_DIR="/var/lib/system-logger"
MAX_SIZE_MB=1
RETENTION_DAYS=30
DATE=$(date +%Y-%m-%d)
HOSTNAME=$(hostname)
TEMP_DIR=$(mktemp -d)
# Create log directory if it doesn't exist
mkdir -p "$LOG_DIR"
# Check if today's log already exists
if [ -f "$LOG_DIR/${DATE}-logs-${HOSTNAME}.zip" ]; then
echo "Logs for today already exist. Exiting..."
exit 0
fi
echo "Starting system log collection for $DATE"
# Function to collect logs with size limit
collect_logs() {
local source="$1"
local output="$2"
local max_lines="$3"
if [ -f "$source" ]; then
# Get the last N lines to stay within size limit
tail -n "$max_lines" "$source" > "$output" 2>/dev/null || true
echo "Collected from $source"
else
echo "Source $source not found, skipping..."
fi
}
# Function to get journal logs with filtering
get_journal_logs() {
local output="$1"
local filter="$2"
local max_lines="$3"
journalctl --since "00:00:00" --until "23:59:59" \
--no-pager --output=short \
| grep -i "$filter" | tail -n "$max_lines" > "$output" 2>/dev/null || true
echo "Collected journal logs for $filter"
}
# Calculate approximate lines per log type to stay under 1MB
# Assuming average line is ~100 bytes, we aim for ~10,000 total lines
TOTAL_LINES=10000
SSH_LINES=2000
KERNEL_LINES=2000
LOGIN_LINES=1000
SYSTEM_LINES=2000
AUTH_LINES=1000
FAILED_LOGIN_LINES=500
DISK_LINES=500
NETWORK_LINES=500
MEMORY_LINES=500
# Collect SSH connections
get_journal_logs "$TEMP_DIR/ssh.log" "sshd" "$SSH_LINES"
# Collect kernel warnings and errors
get_journal_logs "$TEMP_DIR/kernel.log" "kernel.*warning\|kernel.*error" "$KERNEL_LINES"
# Collect login/logout events
get_journal_logs "$TEMP_DIR/login.log" "session.*opened\|session.*closed\|login\|logout" "$LOGIN_LINES"
# Collect system messages
get_journal_logs "$TEMP_DIR/system.log" "systemd\|daemon" "$SYSTEM_LINES"
# Collect authentication events
get_journal_logs "$TEMP_DIR/auth.log" "authentication\|auth" "$AUTH_LINES"
# Collect failed login attempts
get_journal_logs "$TEMP_DIR/failed_login.log" "failed\|failure\|denied" "$FAILED_LOGIN_LINES"
# Collect disk usage and errors
get_journal_logs "$TEMP_DIR/disk.log" "disk\|storage\|iostat" "$DISK_LINES"
# Collect network events
get_journal_logs "$TEMP_DIR/network.log" "network\|connection\|interface" "$NETWORK_LINES"
# Collect memory usage
get_journal_logs "$TEMP_DIR/memory.log" "memory\|oom\|swap" "$MEMORY_LINES"
# Collect traditional log files if they exist
collect_logs "/var/log/auth.log" "$TEMP_DIR/auth_traditional.log" 1000
collect_logs "/var/log/syslog" "$TEMP_DIR/syslog_traditional.log" 1000
collect_logs "/var/log/messages" "$TEMP_DIR/messages_traditional.log" 1000
# Create a summary file
{
echo "=== System Log Summary for $DATE ==="
echo "Hostname: $HOSTNAME"
echo "Collection time: $(date)"
echo "Total lines collected:"
wc -l "$TEMP_DIR"/*.log 2>/dev/null || true
echo ""
echo "=== System Information ==="
echo "Uptime: $(uptime)"
echo "Load average: $(cat /proc/loadavg)"
echo "Memory usage:"
free -h
echo ""
echo "Disk usage:"
df -h
echo ""
echo "Active users:"
who
} > "$TEMP_DIR/summary.txt"
# Create the zip file
cd "$TEMP_DIR"
zip -r "$LOG_DIR/${DATE}-logs-${HOSTNAME}.zip" ./* > /dev/null
# Check file size and warn if too large
FILE_SIZE=$(stat -c%s "$LOG_DIR/${DATE}-logs-${HOSTNAME}.zip")
FILE_SIZE_MB=$((FILE_SIZE / 1024 / 1024))
if [ "$FILE_SIZE_MB" -gt "$MAX_SIZE_MB" ]; then
echo "WARNING: Log file size ($FILE_SIZE_MB MB) exceeds limit ($MAX_SIZE_MB MB)"
fi
echo "Log collection completed: $LOG_DIR/${DATE}-logs-${HOSTNAME}.zip ($FILE_SIZE_MB MB)"
# Clean up old logs (older than RETENTION_DAYS)
find "$LOG_DIR" -name "*-logs-*.zip" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
# Clean up temporary directory
rm -rf "$TEMP_DIR"
echo "System log collection finished successfully"

View file

@ -0,0 +1,33 @@
{
config,
lib,
pkgs,
...
}:
{
options = {
osbmModules.enableTailscale = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Enable Tailscale VPN";
};
};
# i have 4 machines, 2 of them are always at home
# pochita (raspberry pi 5) and ymir (desktop)
# pochita will be on all the time, ymir can be wake on lan
# and i have a laptop named tartarus
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableTailscale {
services.tailscale = {
enable = true;
port = 51513;
};
networking.firewall.allowedUDPPorts = [ config.services.tailscale.port ];
environment.systemPackages = [ pkgs.tailscale ];
})
];
}

View file

@ -0,0 +1,22 @@
{
config,
lib,
...
}:
{
options = {
osbmModules.enableVaultwarden = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Vaultwarden server";
};
};
config = lib.mkMerge [
(lib.mkIf config.osbmModules.enableVaultwarden {
services.vaultwarden = {
enable = true;
};
})
];
}

View file

@ -0,0 +1,53 @@
{
config,
pkgs,
...
}:
{
services.code-server = {
# only true if the machine is not pochita
enable = config.networking.hostName != "pochita";
port = 4444;
disableTelemetry = true;
disableUpdateCheck = true;
user = "osbm";
group = "users";
# auth = "none";
host = "${config.networking.hostName}.curl-boga.ts.net";
hashedPassword = "$argon2i$v=19$m=4096,t=3,p=1$dGc0TStGMDNzSS9JRkJYUFp3d091Q2p0bXlzPQ$zvdE9BkclkJmyFaenzPy2E99SEqsyDMt4IQNZfcfFFQ";
package = pkgs.vscode-with-extensions.override {
vscode = pkgs.code-server;
vscodeExtensions =
with pkgs.vscode-extensions;
[
bbenoist.nix
catppuccin.catppuccin-vsc
catppuccin.catppuccin-vsc-icons
charliermarsh.ruff
davidanson.vscode-markdownlint
esbenp.prettier-vscode
foxundermoon.shell-format
github.copilot
github.vscode-github-actions
github.vscode-pull-request-github
jnoortheen.nix-ide
kamadorueda.alejandra
ms-azuretools.vscode-docker
ms-python.python
# ms-vscode-remote.remote-ssh
timonwong.shellcheck
tyriar.sort-lines
]
++ pkgs.vscode-utils.extensionsFromVscodeMarketplace [
{
# Available in nixpkgs, but outdated (0.4.0) at the time of adding
name = "vscode-tailscale";
publisher = "tailscale";
sha256 = "sha256-MKiCZ4Vu+0HS2Kl5+60cWnOtb3udyEriwc+qb/7qgUg=";
version = "1.0.0";
}
];
};
};
networking.firewall.allowedTCPPorts = [ config.services.code-server.port ];
}

View file

@ -0,0 +1,109 @@
{
lib,
config,
pkgs,
...
}:
let
waniKani-bypass-lessons = pkgs.writeShellApplication {
name = "wanikani-bypass-lessons";
runtimeInputs = with pkgs; [
curl
jq
];
text = ''
#!/usr/bin/env bash
# this token that starts with "2da24" is read only so i am keeping it public, i have nothing secret on my wanikani account
# but i need a write token for the second part of this script
# i am going to read it from /persist/wanikani
[ ! -e /persist/wanikani ] && echo "/persist/wanikani doesnt exist here :(" && exit 1
WANIKANI_TOKEN=$(< /persist/wanikani)
# Maximum number of reviews to maintain
MAX_REVIEWS=200
echo "=== Checking current reviews ==="
# Get current reviews (SRS stages 0-4)
current_reviews=0
for i in {0..4}; do
stage_count=$(curl -s -H "Authorization: Bearer 2da24e4a-ba89-4c4a-9047-d08f21e9dd01" "https://api.wanikani.com/v2/assignments?srs_stages=$i" | jq '.total_count')
current_reviews=$((current_reviews + stage_count))
echo "SRS stage $i: $stage_count items"
done
echo "Current total reviews: $current_reviews"
echo "Maximum reviews target: $MAX_REVIEWS"
if [ "$current_reviews" -ge "$MAX_REVIEWS" ]; then
echo "Reviews ($current_reviews) >= max ($MAX_REVIEWS). No lessons to bypass."
sleep 3600
exit 0
fi
lessons_to_bypass=$((MAX_REVIEWS - current_reviews))
echo "Need to bypass $lessons_to_bypass lessons to reach $MAX_REVIEWS total"
# Get available lessons (limited to what we need)
ASSIGNMENT_IDS=$(curl -s -H "Authorization: Bearer 2da24e4a-ba89-4c4a-9047-d08f21e9dd01" "https://api.wanikani.com/v2/assignments?immediately_available_for_lessons=true" | jq -r ".data[] | .id" | head -n "$lessons_to_bypass")
available_lessons=$(echo "$ASSIGNMENT_IDS" | wc -l)
echo "Available lessons: $available_lessons"
if [ "$available_lessons" -eq 0 ]; then
echo "No lessons available to bypass."
sleep 3600
exit 0
fi
# Limit to what we actually need
actual_bypass=$(echo "$ASSIGNMENT_IDS" | wc -l)
echo "Will bypass $actual_bypass lessons"
# "2017-09-05T23:41:28.980679Z" i need to create this from current time
TIME_STRING=$(date -u +"%Y-%m-%dT%H:%M:%S.%6NZ")
echo "Current time: $TIME_STRING"
echo "=== Starting assignments ==="
for assignment_id in $ASSIGNMENT_IDS; do
echo "Starting assignment $assignment_id"
curl -s "https://api.wanikani.com/v2/assignments/$assignment_id/start" \
-X "PUT" \
-H "Wanikani-Revision: 20170710" \
-H "Content-Type: application/json; charset=utf-8" \
-H "Authorization: Bearer $WANIKANI_TOKEN" \
-d "{\"assignment\": {\"started_at\": \"$TIME_STRING\" }}"
echo
sleep 1
done
echo "Successfully bypassed $actual_bypass lessons"
echo "New total should be approximately: $((current_reviews + actual_bypass))"
sleep 3600
'';
};
in
{
options.services.wanikani-bypass-lessons.enable = lib.mkEnableOption {
description = "Enable WaniKani Bypass Lessons";
default = false;
};
config = lib.mkIf config.services.wanikani-bypass-lessons.enable {
systemd.services.wanikani-bypass-lessons = {
description = "WaniKani Bypass Lessons";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${lib.getExe waniKani-bypass-lessons}";
Restart = "always";
RestartSec = 60 * 60;
};
};
};
}

View file

@ -0,0 +1,42 @@
{
pkgs,
config,
lib,
...
}:
let
wanikani-fetcher = pkgs.writeShellApplication {
name = "wanikani-fetcher";
runtimeInputs = with pkgs; [
curl
jq
zip
];
text = builtins.readFile ./wanikani-fetcher.sh;
};
in
{
options.services.wanikani-fetch-data.enable = lib.mkEnableOption {
description = "Enable WaniKani Fetch Data";
default = false;
};
config = lib.mkIf config.services.wanikani-fetch-data.enable {
systemd.timers.wanikani-fetch-data = {
description = "WaniKani Fetch Data";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "02:00";
};
};
systemd.services.wanikani-fetch-data = {
description = "WaniKani Fetch Data";
serviceConfig = {
Type = "oneshot";
ExecStart = "${lib.getExe wanikani-fetcher}";
Restart = "on-failure";
RestartSec = 60;
};
};
};
}

View file

@ -0,0 +1,82 @@
#!/usr/bin/env bash
shopt -s nullglob
API_TOKEN="2da24e4a-ba89-4c4a-9047-d08f21e9dd01"
date=$(date +%Y-%m-%d)
# check if todays date is already in the logs folder
if [ -f "/var/lib/wanikani-logs/wanikani_data_$date.zip" ]; then
echo "Data for today already exists. Exiting..."
exit 0
fi
tmp_dir=$(mktemp -d)
echo "Temporary directory created at $tmp_dir"
mkdir "$tmp_dir/data"
mkdir -p "/var/lib/wanikani-logs"
fetch_and_merge() {
local topic="$1"
local counter=0
local url="https://api.wanikani.com/v2/$topic"
local output_file="$tmp_dir/data/$topic.json"
local next_url="$url"
echo "Fetching from $url..."
while [[ -n "$next_url" ]]; do
local resp_file="$tmp_dir/$topic-page-$counter.json"
curl -s "$next_url" \
-H "Wanikani-Revision: 20170710" \
-H "Authorization: Bearer $API_TOKEN" \
-o "$resp_file"
echo -e "\n--- Page $((counter + 1)) (First 20 lines) ---"
head -n 20 <(jq . "$resp_file")
# jq . "$resp_file" 2>/dev/null | head -n 20
next_url=$(jq -r '.pages.next_url // empty' "$resp_file")
counter=$((counter + 1))
done
echo "Merging data..."
local meta
meta=$(jq '{object, total_count, data_updated_at}' "$resp_file")
local files=("$tmp_dir/$topic-page-"*.json)
jq -cn \
--argjson meta "$meta" \
--slurpfile data <(jq -s '[.[] | .data[]]' "${files[@]}") \
'$meta + {data: $data[0]}' > "$output_file"
echo "Saved to $output_file"
}
fetch_and_merge assignments
fetch_and_merge level_progressions
fetch_and_merge resets
fetch_and_merge reviews
fetch_and_merge review_statistics
fetch_and_merge spaced_repetition_systems
fetch_and_merge study_materials
fetch_and_merge subjects
curl -s "https://api.wanikani.com/v2/summary" \
-H "Wanikani-Revision: 20170710" \
-H "Authorization: Bearer $API_TOKEN" \
-o "$tmp_dir/data/summary.json"
curl -s "https://api.wanikani.com/v2/user" \
-H "Wanikani-Revision: 20170710" \
-H "Authorization: Bearer $API_TOKEN" \
-o "$tmp_dir/data/user.json"
# get the date as a variable and use it to zip the data folder
zip -j -r "/var/lib/wanikani-logs/wanikani_data_$date.zip" "$tmp_dir/data"
echo "Data zipped to /var/lib/wanikani-logs/wanikani_data_$date.zip"
echo "Cleaning up temporary files..."
rm -r "$tmp_dir"

View file

@ -0,0 +1,455 @@
import zipfile
import json
from pathlib import Path
from flask import Flask, render_template_string, Response
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
import functools
# Set Plotly dark theme
pio.templates.default = "plotly_dark"
app = Flask(__name__)
DATA_DIR = Path("/var/lib/wanikani-logs")
def get_zip_file_names():
"""Get a list of zip files in the data directory."""
return [f for f in DATA_DIR.glob("*.zip") if f.is_file()]
# this is an expensive function so we will cache the results
@functools.lru_cache(maxsize=None)
def load_zip(zip_path):
print(f"Processing {zip_path}")
"""Load a zip file and return its contents as a dictionary."""
with zipfile.ZipFile(zip_path, "r") as z:
data = {}
# just read summary.json
with z.open("summary.json") as f:
summary_data = json.load(f)
num_reviews = len(summary_data["data"]["reviews"][0]["subject_ids"])
num_lessons = len(summary_data["data"]["lessons"][0]["subject_ids"])
data["num_reviews"] = num_reviews
data["num_lessons"] = num_lessons
# wanikani_data_2025-05-18.zip
data["date"] = zip_path.stem.split("_")[-1].replace(".zip", "")
# with z.open("subjects.json") as f:
# subjects_data = json.load(f)
# print(f"Found total data subjects: {subjects_data['total_count']}")
# data["total_subjects"] = subjects_data['total_count']
# so the subjects.json file is about 50 mb so we are just not gonna care if this value changes (doesnt change much)
data["total_subjects"] = 9300
with z.open("assignments.json") as f:
assignments_data = json.load(f)
print(f"Found total assignments: {assignments_data['total_count']}")
data["total_assignments"] = assignments_data["total_count"]
# now the data key will give us all the srs stages
srs_stages = [0 for _ in range(10)] # 10 SRS stages
for assignment in assignments_data["data"]:
srs_stage = assignment["data"]["srs_stage"]
srs_stages[srs_stage] += 1
# add srs stages to data
for i, count in enumerate(srs_stages):
data[f"srs_stage_{i}"] = count
print(data)
return data
def get_dataframe(list_of_daily_data):
"""Convert a list of daily data dictionaries into a pandas DataFrame."""
df = pd.DataFrame(list_of_daily_data)
df["progression"] = df.apply(
lambda row: sum(row[f"srs_stage_{i}"] * (i + 1) for i in range(10))
/ (row["total_subjects"] * 10)
* 100,
axis=1,
)
df["apprentice"] = df.apply(
lambda row: row["srs_stage_1"]
+ row["srs_stage_2"]
+ row["srs_stage_3"]
+ row["srs_stage_4"],
axis=1,
)
# Individual apprentice stages for distribution analysis
df["apprentice_1"] = df["srs_stage_1"]
df["apprentice_2"] = df["srs_stage_2"]
df["apprentice_3"] = df["srs_stage_3"]
df["apprentice_4"] = df["srs_stage_4"]
df["unlocked"] = df["srs_stage_0"]
df["guru"] = df.apply(lambda row: row["srs_stage_5"] + row["srs_stage_6"], axis=1)
df["master"] = df["srs_stage_7"]
df["enlightened"] = df["srs_stage_8"]
df["burned"] = df["srs_stage_9"]
return df
def get_plotly_html(df, column, title, ylabel):
"""Generate an interactive Plotly HTML for a given DataFrame column."""
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df["date"],
y=df[column],
mode='lines+markers',
name=column.capitalize(),
line=dict(width=2),
marker=dict(size=6)
))
fig.update_layout(
title=title,
xaxis_title="Date",
yaxis_title=ylabel,
template="plotly_dark",
plot_bgcolor='#151519',
paper_bgcolor='#151519',
width=1200,
height=600,
margin=dict(l=50, r=50, t=50, b=50)
)
# Show every 10th date label for better readability
date_indices = list(range(0, len(df), 10))
fig.update_xaxes(
tickmode='array',
tickvals=[df.iloc[i]["date"] for i in date_indices],
ticktext=[df.iloc[i]["date"] for i in date_indices],
tickangle=45
)
return fig.to_html(include_plotlyjs=True, div_id=f"plot_{column}")
def get_apprentice_distribution_html(df):
"""Generate a stacked area chart showing apprentice stage distribution over time."""
fig = go.Figure()
# Add stacked area traces
fig.add_trace(go.Scatter(
x=df["date"],
y=df["apprentice_1"],
mode='lines',
name='Apprentice I',
stackgroup='one',
fillcolor='rgba(255, 107, 107, 0.8)',
line=dict(width=0.5, color='#ff6b6b')
))
fig.add_trace(go.Scatter(
x=df["date"],
y=df["apprentice_2"],
mode='lines',
name='Apprentice II',
stackgroup='one',
fillcolor='rgba(78, 205, 196, 0.8)',
line=dict(width=0.5, color='#4ecdc4')
))
fig.add_trace(go.Scatter(
x=df["date"],
y=df["apprentice_3"],
mode='lines',
name='Apprentice III',
stackgroup='one',
fillcolor='rgba(69, 183, 209, 0.8)',
line=dict(width=0.5, color='#45b7d1')
))
fig.add_trace(go.Scatter(
x=df["date"],
y=df["apprentice_4"],
mode='lines',
name='Apprentice IV',
stackgroup='one',
fillcolor='rgba(150, 206, 180, 0.8)',
line=dict(width=0.5, color='#96ceb4')
))
fig.update_layout(
title="Apprentice Stage Distribution Over Time",
xaxis_title="Date",
yaxis_title="Number of Items",
template="plotly_dark",
plot_bgcolor='#151519',
paper_bgcolor='#151519',
width=1200,
height=600,
margin=dict(l=50, r=50, t=50, b=50),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
# Show every 10th date label for better readability
date_indices = list(range(0, len(df), 10))
fig.update_xaxes(
tickmode='array',
tickvals=[df.iloc[i]["date"] for i in date_indices],
ticktext=[df.iloc[i]["date"] for i in date_indices],
tickangle=45
)
return fig.to_html(include_plotlyjs=True, div_id="apprentice_distribution")
def generate_standalone_html(df, output_path=None):
"""Generate a completely self-contained HTML file with all charts."""
# Generate all chart HTML
reviews_html = get_plotly_html(df, "num_reviews", "Daily Reviews", "Number of Reviews")
lessons_html = get_plotly_html(df, "num_lessons", "Daily Lessons", "Number of Lessons")
progression_html = get_plotly_html(
df, "progression", "SRS Progression", "Progression (%)"
)
apprentice_distribution_html = get_apprentice_distribution_html(df)
srs_stage_apprentice_html = get_plotly_html(
df, "apprentice", "Apprentice Stage", "Number of Subjects"
)
srs_stage_guru_html = get_plotly_html(df, "guru", "Guru Stage", "Number of Subjects")
srs_stage_master_html = get_plotly_html(
df, "master", "Master Stage", "Number of Subjects"
)
srs_stage_enlightened_html = get_plotly_html(
df, "enlightened", "Enlightened Stage", "Number of Subjects"
)
srs_stage_burned_html = get_plotly_html(
df, "burned", "Burned Stage", "Number of Subjects"
)
# Create complete standalone HTML
html_content = f"""
<!DOCTYPE html>
<html>
<head>
<title>WaniKani Statistics Dashboard</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
body {{
background-color: #151519;
color: #8b8b9c;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 20px;
line-height: 1.6;
}}
.chart-container {{
margin: 20px auto;
padding: 15px;
border-radius: 8px;
border: 1px solid #1e1e24;
background-color: #1a1a1f;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
}}
h1 {{
text-align: center;
color: #ffffff;
margin-bottom: 40px;
font-size: 2.5em;
font-weight: 300;
}}
.dashboard-info {{
text-align: center;
margin-bottom: 30px;
color: #888;
font-size: 0.9em;
}}
</style>
</head>
<body>
<h1>WaniKani Statistics Dashboard</h1>
<div class="dashboard-info">
Interactive dashboard showing your WaniKani learning progress over time
</div>
<div class="chart-container">{reviews_html}</div>
<div class="chart-container">{lessons_html}</div>
<div class="chart-container">{progression_html}</div>
<div class="chart-container">{apprentice_distribution_html}</div>
<div class="chart-container">{srs_stage_apprentice_html}</div>
<div class="chart-container">{srs_stage_guru_html}</div>
<div class="chart-container">{srs_stage_master_html}</div>
<div class="chart-container">{srs_stage_enlightened_html}</div>
<div class="chart-container">{srs_stage_burned_html}</div>
</body>
</html>
"""
# Save to file if output_path is provided
if output_path:
with open(output_path, 'w', encoding='utf-8') as f:
f.write(html_content)
print(f"Standalone HTML dashboard saved to: {output_path}")
return html_content
@app.route("/download")
def download_dashboard():
"""Route to download a standalone HTML file."""
file_names = get_zip_file_names()
print(f"Found {len(file_names)} zip files in {DATA_DIR}")
list_of_daily_data = []
for file_name in file_names:
daily_data = load_zip(file_name)
list_of_daily_data.append(daily_data)
df = get_dataframe(list_of_daily_data)
df.sort_values(by="date", inplace=True)
html_content = generate_standalone_html(df)
response = Response(html_content, content_type="text/html")
response.headers["Content-Disposition"] = "attachment; filename=wanikani_dashboard.html"
return response
def render_html(df):
"""Render the DataFrame as HTML with interactive Plotly charts."""
reviews_html = get_plotly_html(df, "num_reviews", "Daily Reviews", "Number of Reviews")
lessons_html = get_plotly_html(df, "num_lessons", "Daily Lessons", "Number of Lessons")
progression_html = get_plotly_html(
df, "progression", "SRS Progression", "Progression (%)"
)
# apprentice distribution chart
apprentice_distribution_html = get_apprentice_distribution_html(df)
# srs stages
srs_stage_apprentice_html = get_plotly_html(
df, "apprentice", "Apprentice Stage", "Number of Subjects"
)
srs_stage_guru_html = get_plotly_html(df, "guru", "Guru Stage", "Number of Subjects")
srs_stage_master_html = get_plotly_html(
df, "master", "Master Stage", "Number of Subjects"
)
srs_stage_enlightened_html = get_plotly_html(
df, "enlightened", "Enlightened Stage", "Number of Subjects"
)
srs_stage_burned_html = get_plotly_html(
df, "burned", "Burned Stage", "Number of Subjects"
)
# Render HTML with embedded Plotly charts
html_content = f"""
<!DOCTYPE html>
<html>
<head>
<title>WaniKani Stats</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
body {{
background-color: #151519;
color: #8b8b9c;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 20px;
}}
.chart-container {{
margin: 20px auto;
padding: 10px;
border-radius: 5px;
border: 1px solid #1e1e24;
background-color: #151519;
}}
h1 {{
text-align: center;
color: #8b8b9c;
margin-bottom: 30px;
}}
</style>
</head>
<body>
<h1>WaniKani Statistics Dashboard</h1>
<div class="chart-container">{reviews_html}</div>
<div class="chart-container">{lessons_html}</div>
<div class="chart-container">{progression_html}</div>
<div class="chart-container">{apprentice_distribution_html}</div>
<div class="chart-container">{srs_stage_apprentice_html}</div>
<div class="chart-container">{srs_stage_guru_html}</div>
<div class="chart-container">{srs_stage_master_html}</div>
<div class="chart-container">{srs_stage_enlightened_html}</div>
<div class="chart-container">{srs_stage_burned_html}</div>
</body>
</html>
"""
return html_content
@app.route("/")
def index():
"""Index route"""
file_names = get_zip_file_names()
print(f"Found {len(file_names)} zip files in {DATA_DIR}")
list_of_daily_data = []
for file_name in file_names:
daily_data = load_zip(file_name)
list_of_daily_data.append(daily_data)
df = get_dataframe(list_of_daily_data)
# sort by date string
df.sort_values(by="date", inplace=True)
response = Response(render_html(df), content_type="text/html")
response.headers["Widget-Content-Type"] = "html"
response.headers["Widget-Title"] = "WaniKani Statistics"
return response
@app.route("/health")
def health():
"""Health check endpoint"""
return {"status": "ok", "service": "wanikani-stats"}
if __name__ == "__main__":
import sys
# Check if user wants to generate standalone HTML
if len(sys.argv) > 1 and sys.argv[1] == "generate":
output_file = sys.argv[2] if len(sys.argv) > 2 else "wanikani_dashboard.html"
print("Generating standalone HTML dashboard...")
file_names = get_zip_file_names()
print(f"Found {len(file_names)} zip files in {DATA_DIR}")
list_of_daily_data = []
for file_name in file_names:
daily_data = load_zip(file_name)
list_of_daily_data.append(daily_data)
df = get_dataframe(list_of_daily_data)
df.sort_values(by="date", inplace=True)
generate_standalone_html(df, output_file)
print(f"✅ Standalone HTML dashboard generated: {output_file}")
print("📊 You can now open this file in any web browser to view your interactive WaniKani stats!")
else:
# Start Flask server
port = int(sys.argv[1]) if len(sys.argv) > 1 else 8501
print(f"Starting WaniKani Stats Flask app on port {port}")
print(f"📊 View dashboard at: http://localhost:{port}")
print(f"💾 Download standalone HTML at: http://localhost:{port}/download")
app.run(host="0.0.0.0", port=port, debug=False)

View file

@ -0,0 +1,111 @@
{
pkgs,
config,
lib,
...
}:
let
python =
let
packageOverrides = self: super: {
imageio = super.imageio.overridePythonAttrs (old: {
disabledTests = [
"test_read_stream"
"test_uri_reading"
"test_trim_filter"
"test_process_termination"
];
});
plotly = super.plotly.overridePythonAttrs (old: {
disabledTestPaths = (old.disabledTestPaths or [ ]) ++ [
"tests/test_optional/test_kaleido/test_kaleido.py"
];
});
};
in
pkgs.python313.override {
inherit packageOverrides;
self = python;
};
wanikani-stats-flask = pkgs.writeShellApplication {
name = "wanikani-stats-flask";
runtimeInputs = [
(python.withPackages (
ppkgs: with python.pkgs; [
flask
pandas
numpy
jinja2
matplotlib
seaborn
plotly
]
))
];
text = ''
#!/usr/bin/env bash
echo "Starting WaniKani Stats Flask app..."
exec python ${./app.py} ${toString config.services.wanikani-stats.port}
'';
};
in
{
options.services.wanikani-stats = {
enable = lib.mkEnableOption {
description = "Enable WaniKani Stats Service";
default = false;
};
logDirectory = lib.mkOption {
type = lib.types.path;
default = "/var/lib/wanikani-logs";
description = "Directory to get the log archives";
};
port = lib.mkOption {
type = lib.types.port;
default = 8501;
description = "Port for the WaniKani Stats service";
};
};
config = lib.mkIf config.services.wanikani-stats.enable {
networking.firewall.allowedTCPPorts = [
config.services.wanikani-stats.port
];
systemd.services.wanikani-stats = {
description = "WaniKani Stats Service";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${lib.getExe wanikani-stats-flask}";
StateDirectory = "/var/lib/wanikani-stats";
Restart = "on-failure";
User = "root";
Group = "root";
};
};
# Timer to restart the service every 12 hours
systemd.services.wanikani-stats-restart = {
description = "Restart WaniKani Stats Service";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.systemd}/bin/systemctl restart wanikani-stats.service";
User = "root";
};
};
systemd.timers.wanikani-stats-restart = {
description = "Timer to restart WaniKani Stats Service every 12 hours";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 00,12:00:00";
Persistent = true;
RandomizedDelaySec = "5m";
};
};
};
}