Added statusbar to indexing on /dashboard

This commit is contained in:
2025-09-06 08:39:53 +02:00
parent 1e176fce01
commit d90faf4cc9
3 changed files with 385 additions and 150 deletions

View File

@@ -1,111 +1,188 @@
from dataclasses import dataclass, field
from __future__ import annotations
import json
import os
import re
import zipfile
from dataclasses import dataclass, asdict
from pathlib import Path
from typing import List, Iterable, Dict, Any
import json, os
from typing import Any, Dict, Iterable, List, Optional
from xml.etree import ElementTree as ET
from .meta import read_comicinfo
WARM_INDEX_PATH = Path("/data/index.json")
VALID_EXTS = {".cbz"}
COMIC_EXTS = {".cbz"}
@dataclass
class Item:
path: Path
is_dir: bool
name: str
rel: str
name: str
is_dir: bool
size: int = 0
mtime: float = 0.0
meta: Dict[str, Any] = field(default_factory=dict)
meta: Optional[Dict[str, Any]] = None
def _cache_file() -> Path:
return Path("/data/index.json")
def to_json(self) -> Dict[str, Any]:
return {
"rel": self.rel,
"name": self.name,
"is_dir": self.is_dir,
"size": self.size,
"mtime": self.mtime,
"meta": self.meta or {},
}
def load_cache() -> Dict[str, dict]:
p = _cache_file()
if p.exists():
try:
return json.loads(p.read_text(encoding="utf-8"))
except Exception:
return {}
def _relpath(root: Path, p: Path) -> str:
rel = p.relative_to(root).as_posix()
return rel
def _read_comicinfo_from_cbz(cbz_path: Path, prev_meta: Optional[dict] = None) -> Dict[str, Any]:
"""
Read ComicInfo.xml from a CBZ. Returns {} if not present.
"""
meta: Dict[str, Any] = {}
try:
with zipfile.ZipFile(cbz_path, "r") as zf:
# find ComicInfo.xml (case-insensitive)
xml_name = None
for n in zf.namelist():
if n.lower().endswith("comicinfo.xml") and not n.endswith("/"):
xml_name = n
break
if not xml_name:
return meta
with zf.open(xml_name) as fp:
tree = ET.parse(fp)
root = tree.getroot()
for el in root:
key = el.tag.lower()
val = (el.text or "").strip()
if not val:
continue
# normalize common fields
meta[key] = val
# convenience aliases
if "title" not in meta and "booktitle" in meta:
meta["title"] = meta.get("booktitle")
# prefer Number/Year/Month/Day as simple scalars
for k in ("number", "volume", "year", "month", "day"):
if k in meta:
meta[k] = meta[k].strip()
return meta
except Exception:
# return whatever we could parse (or empty)
return meta
def _load_warm_index_map() -> Dict[str, Dict[str, Any]]:
"""
Return a map: rel -> {size, mtime, meta}
"""
if not WARM_INDEX_PATH.exists():
return {}
try:
data = json.loads(WARM_INDEX_PATH.read_text(encoding="utf-8"))
# data may be list or dict, normalize to map by rel
if isinstance(data, list):
return {d.get("rel"): {"size": d.get("size"), "mtime": d.get("mtime"), "meta": d.get("meta")} for d in data if d.get("rel")}
elif isinstance(data, dict):
return data
except Exception:
pass
return {}
def save_cache(items: List[Item]):
p = _cache_file()
data = {
it.rel: {
"is_dir": it.is_dir,
"name": it.name,
"size": it.size,
"mtime": it.mtime,
"meta": it.meta
}
for it in items
}
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(json.dumps(data), encoding="utf-8")
def scan(root: Path) -> List[Item]:
def _save_warm_index(items: List[Item]) -> None:
WARM_INDEX_PATH.parent.mkdir(parents=True, exist_ok=True)
payload = [it.to_json() for it in items]
WARM_INDEX_PATH.write_text(json.dumps(payload, ensure_ascii=False, separators=(",", ":")), encoding="utf-8")
def scan(root: Path, progress_cb=None) -> List[Item]:
"""
Walk the library and build the index (dirs + files).
Uses warm index to avoid re-reading CBZ metadata if size/mtime unchanged.
Calls progress_cb(dict) after each FILE item if provided.
"""
root = root.resolve()
cached = load_cache()
items: List[Item] = []
items.append(Item(path=root, is_dir=True, name=root.name or "/", rel=""))
prev = _load_warm_index_map()
for dirpath, _, filenames in os.walk(root):
base = Path(dirpath)
rel_dir = str(base.relative_to(root)).replace("\\", "/")
if rel_dir != ".":
items.append(Item(path=base, is_dir=True, name=base.name, rel=rel_dir))
# Collect directories first (skip root itself)
for dirpath, dirnames, filenames in os.walk(root):
dpath = Path(dirpath)
if dpath == root:
# Don't add root as an item
pass
else:
rel = _relpath(root, dpath)
st = dpath.stat()
items.append(Item(
path=dpath,
rel=rel,
name=dpath.name,
is_dir=True,
size=0,
mtime=st.st_mtime,
meta=None
))
# Files in this folder
for fn in filenames:
p = base / fn
p = dpath / fn
ext = p.suffix.lower()
if ext not in COMIC_EXTS:
if ext not in VALID_EXTS:
continue
rel = str(p.relative_to(root)).replace("\\", "/")
try:
st = p.stat()
except FileNotFoundError:
continue
size, mtime = st.st_size, st.st_mtime
c = cached.get(rel)
meta, name = {}, p.stem
if c and not c.get("is_dir") and c.get("mtime") == mtime and c.get("size") == size:
name = c.get("name") or name
meta = c.get("meta") or {}
rel = _relpath(root, p)
st = p.stat()
key = rel
meta = None
prev_rec = prev.get(key)
if prev_rec and prev_rec.get("size") == st.st_size and int(prev_rec.get("mtime", 0)) == int(st.st_mtime):
# unchanged — reuse cached meta
meta = prev_rec.get("meta") or {}
else:
meta = read_comicinfo(p)
if meta.get("title"):
name = meta["title"]
meta = _read_comicinfo_from_cbz(p)
items.append(Item(path=p, is_dir=False, name=name, rel=rel, size=size, mtime=mtime, meta=meta))
it = Item(
path=p,
rel=rel,
name=p.stem,
is_dir=False,
size=st.st_size,
mtime=st.st_mtime,
meta=meta or {}
)
items.append(it)
if progress_cb:
try:
progress_cb({"rel": it.rel, "size": it.size, "mtime": it.mtime})
except Exception:
pass
save_cache(items)
# Save warm index
_save_warm_index(items)
return items
def children(items: list[Item], rel_folder: str):
"""
Yield direct children (files or dirs) of rel_folder.
Works with spaces, parentheses, and deep trees.
"""
base = rel_folder.strip("/")
if base == "":
# top-level children: anything with no "/" in its rel (and not empty)
for it in items:
if it.rel and "/" not in it.rel:
yield it
return
prefix = base + "/"
plen = len(prefix)
for it in items:
rel = it.rel
if not rel.startswith(prefix) or rel == base:
continue
remainder = rel[plen:]
# direct child has no further "/"
if remainder and "/" not in remainder:
yield it
def children(items: List[Item], rel_path: str) -> Iterable[Item]:
"""
Return immediate children of a given folder rel_path.
rel_path: "" for root, else "Folder/Subfolder"
"""
rel_path = (rel_path or "").strip("/")
def parent_of(rel: str) -> str:
if "/" not in rel:
return ""
return rel.rsplit("/", 1)[0]
# Directories whose parent == rel_path
dirs = [it for it in items if it.is_dir and parent_of(it.rel) == rel_path]
# Files whose parent == rel_path
files = [it for it in items if (not it.is_dir) and parent_of(it.rel) == rel_path]
return dirs + files

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from fastapi import FastAPI, Query, HTTPException, Request, Response, Depends, Header
from fastapi.responses import (
StreamingResponse,
@@ -11,13 +13,13 @@ from typing import List, Dict, Any, Optional
from jinja2 import Environment, FileSystemLoader, select_autoescape
from urllib.parse import quote
from collections import Counter
import os
import threading
import time
import re
import json
import math
import datetime as dt
from .config import LIBRARY_DIR, PAGE_SIZE, SERVER_BASE, URL_PREFIX, ENABLE_WATCH
from .config import LIBRARY_DIR, PAGE_SIZE, SERVER_BASE, URL_PREFIX
from . import fs_index
from .opds import now_rfc3339, mime_for
from .auth import require_basic
@@ -25,26 +27,80 @@ from .thumbs import have_thumb, generate_thumb
app = FastAPI(title="ComicOPDS")
# IMPORTANT: include ".j2" so everything is auto-escaped (fixes & in titles, etc.)
# Jinja: force UTF-8 + auto-escape .xml/.html/.j2
env = Environment(
loader=FileSystemLoader(str(Path(__file__).parent / "templates")),
loader=FileSystemLoader(str(Path(__file__).parent / "templates"), encoding="utf-8"),
autoescape=select_autoescape(enabled_extensions=("xml", "html", "j2"), default=True),
)
# -------------------- Index state (background) --------------------
INDEX: List[fs_index.Item] = []
_INDEX_LOCK = threading.Lock()
_INDEX_STATUS = {
"running": False,
"phase": "idle", # "counting" | "indexing" | "idle"
"total": 0,
"done": 0,
"current": "",
"started_at": 0.0,
"ended_at": 0.0,
}
def _abs_path(p: str) -> str:
"""URL prefix helper"""
return (URL_PREFIX + p) if URL_PREFIX else p
def _count_target_files(root: Path) -> int:
exts = {".cbz"}
n = 0
for p in root.rglob("*"):
if p.is_file() and p.suffix.lower() in exts:
n += 1
return n
def _set_status(**kw):
_INDEX_STATUS.update(kw)
def _index_progress_tick(info: dict):
_INDEX_STATUS["done"] += 1
_INDEX_STATUS["current"] = info.get("rel") or ""
def _run_indexing():
global INDEX
try:
_set_status(running=True, phase="counting", done=0, total=0, current="", started_at=time.time(), ended_at=0.0)
total = _count_target_files(LIBRARY_DIR)
_set_status(total=total, phase="indexing")
items = fs_index.scan(LIBRARY_DIR, progress_cb=_index_progress_tick)
with _INDEX_LOCK:
INDEX = items
_set_status(phase="idle", running=False, ended_at=time.time(), current="")
except Exception:
_set_status(phase="idle", running=False, ended_at=time.time())
def _start_indexing_if_needed(force=False):
if not force and _INDEX_STATUS["running"]:
return
if not force and INDEX:
return
t = threading.Thread(target=_run_indexing, daemon=True)
t.start()
@app.on_event("startup")
def build_index():
def startup():
if not LIBRARY_DIR.exists():
raise RuntimeError(f"CONTENT_BASE_DIR does not exist: {LIBRARY_DIR}")
global INDEX
INDEX = fs_index.scan(LIBRARY_DIR)
_start_indexing_if_needed(force=True)
# ---------- OPDS helpers ----------
# -------------------- OPDS helpers --------------------
def _display_title(item: fs_index.Item) -> str:
m = item.meta or {}
series, number, volume = m.get("series"), m.get("number"), m.get("volume")
@@ -55,6 +111,7 @@ def _display_title(item: fs_index.Item) -> str:
return f"{series}{vol} #{number}{suffix}"
return title
def _authors_from_meta(meta: dict) -> list[str]:
authors = []
for key in ("writer", "coverartist", "penciller", "inker", "colorist", "letterer"):
@@ -70,6 +127,7 @@ def _authors_from_meta(meta: dict) -> list[str]:
out.append(a)
return out
def _issued_from_meta(meta: dict) -> Optional[str]:
y = meta.get("year")
if not y:
@@ -81,6 +139,7 @@ def _issued_from_meta(meta: dict) -> Optional[str]:
except Exception:
return None
def _categories_from_meta(meta: dict) -> list[str]:
cats = []
for k in ("genre", "tags", "characters", "teams", "locations"):
@@ -97,6 +156,7 @@ def _categories_from_meta(meta: dict) -> list[str]:
out.append(c)
return out
def _feed(entries_xml: List[str], title: str, self_href: str, next_href: Optional[str] = None):
tpl = env.get_template("feed.xml.j2")
base = SERVER_BASE.rstrip("/")
@@ -111,6 +171,7 @@ def _feed(entries_xml: List[str], title: str, self_href: str, next_href: Optiona
entries=entries_xml,
)
def _entry_xml(item: fs_index.Item) -> str:
tpl = env.get_template("entry.xml.j2")
if item.is_dir:
@@ -124,7 +185,7 @@ def _entry_xml(item: fs_index.Item) -> str:
)
else:
download_href = f"/download?path={quote(item.rel)}"
stream_href = f"/stream?path={quote(item.rel)}" # we still provide it; most clients use /download
stream_href = f"/stream?path={quote(item.rel)}"
meta = item.meta or {}
comicvine_issue = meta.get("comicvineissue")
@@ -153,17 +214,18 @@ def _entry_xml(item: fs_index.Item) -> str:
)
# ---------- core routes (OPDS browsing/search) ----------
# -------------------- Core routes --------------------
@app.get("/healthz")
def health():
return PlainTextResponse("ok")
@app.get("/opds", response_class=Response)
def browse(path: str = Query("", description="Relative folder path"), page: int = 1, _=Depends(require_basic)):
path = path.strip("/")
children = list(fs_index.children(INDEX, path))
# Sort: dirs first by name; files by series + number when present
# Sort: dirs first; files by series + number
def sort_key(it: fs_index.Item):
if it.is_dir:
return (0, it.name.lower(), 0)
@@ -182,7 +244,7 @@ def browse(path: str = Query("", description="Relative folder path"), page: int
page_items = children[start:end]
entries_xml = [_entry_xml(it) for it in page_items]
# Inject "Smart Lists" virtual folder at root page 1
# "Smart Lists" virtual folder at root page 1
if path == "" and page == 1:
tpl = env.get_template("entry.xml.j2")
smart_href = _abs_path("/opds/smart")
@@ -196,22 +258,23 @@ def browse(path: str = Query("", description="Relative folder path"), page: int
entries_xml = [smart_entry] + entries_xml
self_href = f"/opds?path={quote(path)}&page={page}" if path else f"/opds?page={page}"
next_href = None
if end < len(children):
next_href = f"/opds?path={quote(path)}&page={page+1}" if path else f"/opds?page={page+1}"
next_href = f"/opds?path={quote(path)}&page={page+1}" if end < len(children) else None
xml = _feed(entries_xml, title=f"/{path}" if path else "Library", self_href=self_href, next_href=next_href)
return Response(content=xml, media_type="application/atom+xml;profile=opds-catalog")
@app.get("/", response_class=Response)
def root(_=Depends(require_basic)):
return browse(path="", page=1)
@app.get("/opds/search.xml", response_class=Response)
def opensearch_description(_=Depends(require_basic)):
tpl = env.get_template("search-description.xml.j2")
xml = tpl.render(base=SERVER_BASE.rstrip("/"))
return Response(content=xml, media_type="application/opensearchdescription+xml")
@app.get("/opds/search", response_class=Response)
def opds_search(q: str = Query("", alias="q"), page: int = 1, _=Depends(require_basic)):
terms = [t.lower() for t in q.split() if t.strip()]
@@ -235,13 +298,14 @@ def opds_search(q: str = Query("", alias="q"), page: int = 1, _=Depends(require_
return Response(content=xml, media_type="application/atom+xml;profile=opds-catalog")
# ---------- file endpoints (download/stream/thumb) ----------
# -------------------- File endpoints --------------------
def _abspath(rel: str) -> Path:
p = (LIBRARY_DIR / rel).resolve()
if LIBRARY_DIR not in p.parents and p != LIBRARY_DIR:
raise HTTPException(400, "Invalid path")
return p
def _common_file_headers(p: Path) -> dict:
return {
"Accept-Ranges": "bytes",
@@ -249,6 +313,7 @@ def _common_file_headers(p: Path) -> dict:
"Content-Disposition": f'inline; filename="{p.name}"',
}
@app.head("/download")
def download_head(path: str, _=Depends(require_basic)):
p = _abspath(path)
@@ -259,6 +324,7 @@ def download_head(path: str, _=Depends(require_basic)):
headers["Content-Length"] = str(st.st_size)
return Response(status_code=200, headers=headers)
@app.get("/download")
def download(path: str, request: Request, range: str | None = Header(default=None), _=Depends(require_basic)):
p = _abspath(path)
@@ -322,6 +388,7 @@ def download(path: str, request: Request, range: str | None = Header(default=Non
})
return StreamingResponse(iter_file(p, start, end), status_code=206, headers=headers)
@app.head("/stream")
def stream_head(path: str, _=Depends(require_basic)):
p = _abspath(path)
@@ -332,11 +399,13 @@ def stream_head(path: str, _=Depends(require_basic)):
headers["Content-Length"] = str(st.st_size)
return Response(status_code=200, headers=headers)
@app.get("/stream")
def stream(path: str, request: Request, range: str | None = Header(default=None), _=Depends(require_basic)):
# Alias of download with Range support (Panels uses /download)
return download(path=path, request=request, range=range)
@app.get("/thumb")
def thumb(path: str, _=Depends(require_basic)):
abs_p = _abspath(path)
@@ -352,12 +421,13 @@ def thumb(path: str, _=Depends(require_basic)):
return FileResponse(p, media_type="image/jpeg")
# ---------- Dashboard & stats ----------
# -------------------- Dashboard & stats --------------------
@app.get("/dashboard", response_class=HTMLResponse)
def dashboard(_=Depends(require_basic)):
tpl = env.get_template("dashboard.html")
return HTMLResponse(tpl.render())
@app.get("/stats.json", response_class=JSONResponse)
def stats(_=Depends(require_basic)):
files = [it for it in INDEX if not it.is_dir]
@@ -366,7 +436,7 @@ def stats(_=Depends(require_basic)):
publishers = Counter()
formats = Counter()
writers = Counter()
timeline = Counter() # year -> count
timeline = Counter()
last_updated = 0.0
for it in files:
@@ -401,7 +471,7 @@ def stats(_=Depends(require_basic)):
pub_labels = [k for k, _ in top]
pub_values = [v for _, v in top]
if other:
pub_labels.append("Other")
#pub_labels.append("Other")
pub_values.append(other)
years = sorted(timeline.keys())
@@ -417,7 +487,7 @@ def stats(_=Depends(require_basic)):
"total_comics": total_comics,
"unique_series": len(series_set),
"unique_publishers": len(publishers),
"formats": dict(formats) or {"cbz": 0},
"formats": dict(formats) or {"cbz": 0},
"publishers": {"labels": pub_labels, "values": pub_values},
"timeline": {"labels": years, "values": year_values},
"top_writers": {"labels": w_labels, "values": w_values},
@@ -425,7 +495,7 @@ def stats(_=Depends(require_basic)):
return JSONResponse(payload)
# ---------- Debug helper ----------
# -------------------- Debug --------------------
@app.get("/debug/children", response_class=JSONResponse)
def debug_children(path: str = ""):
ch = list(fs_index.children(INDEX, path.strip("/")))
@@ -434,13 +504,15 @@ def debug_children(path: str = ""):
)
# ---------- Smart Lists (advanced) ----------
# -------------------- Smart Lists (advanced) --------------------
SMARTLISTS_PATH = Path("/data/smartlists.json")
def _slugify(name: str) -> str:
slug = re.sub(r"[^a-z0-9]+", "-", (name or "").lower()).strip("-")
return slug or "list"
def _load_smartlists() -> list[dict]:
if SMARTLISTS_PATH.exists():
try:
@@ -449,10 +521,12 @@ def _load_smartlists() -> list[dict]:
return []
return []
def _save_smartlists(lists: list[dict]) -> None:
SMARTLISTS_PATH.parent.mkdir(parents=True, exist_ok=True)
SMARTLISTS_PATH.write_text(json.dumps(lists, ensure_ascii=False, indent=0), encoding="utf-8")
def _issued_tuple(meta: dict) -> Optional[tuple[int, int, int]]:
y = meta.get("year")
if not y:
@@ -462,6 +536,7 @@ def _issued_tuple(meta: dict) -> Optional[tuple[int, int, int]]:
except Exception:
return None
def _get_field_value(it: fs_index.Item, field: str):
f = (field or "").lower()
m = it.meta or {}
@@ -492,12 +567,14 @@ def _get_field_value(it: fs_index.Item, field: str):
if f == "has_meta": return bool(m)
return None
def _to_float(x) -> Optional[float]:
try:
return float(str(x))
except Exception:
return None
def _to_date(s: str) -> Optional[dt.date]:
s = (s or "").strip()
if not s:
@@ -514,6 +591,7 @@ def _to_date(s: str) -> Optional[dt.date]:
return None
return None
def _val_to_date(val) -> Optional[dt.date]:
if isinstance(val, (int, float)):
try:
@@ -524,6 +602,7 @@ def _val_to_date(val) -> Optional[dt.date]:
return _to_date(val)
return None
def _rule_true(it: fs_index.Item, r: dict) -> bool:
field = (r.get("field") or "").lower()
op = (r.get("op") or "contains").lower()
@@ -608,6 +687,7 @@ def _rule_true(it: fs_index.Item, r: dict) -> bool:
)
return (not result) if negate else result
def _matches_groups(it: fs_index.Item, groups: list[dict]) -> bool:
valid_groups = [g for g in (groups or []) if g.get("rules")]
if not valid_groups:
@@ -618,6 +698,7 @@ def _matches_groups(it: fs_index.Item, groups: list[dict]) -> bool:
return True
return False
def _sort_key(item: fs_index.Item, name: str):
n = (name or "").lower()
m = item.meta or {}
@@ -637,8 +718,10 @@ def _sort_key(item: fs_index.Item, name: str):
return ((m.get("publisher") or "").lower(), (m.get("series") or "").lower())
return ((item.name or "").lower(),)
def _distinct_latest_by_series(items: list[fs_index.Item]) -> list[fs_index.Item]:
best: Dict[str, fs_index.Item] = {}
def rank(x: fs_index.Item):
m = x.meta or {}
t = _issued_tuple(m) or (0, 0, 0)
@@ -647,6 +730,7 @@ def _distinct_latest_by_series(items: list[fs_index.Item]) -> list[fs_index.Item
except ValueError:
num = -1
return (t[0], t[1], t[2], num)
for it in items:
series = (it.meta or {}).get("series")
if not series:
@@ -659,7 +743,6 @@ def _distinct_latest_by_series(items: list[fs_index.Item]) -> list[fs_index.Item
return list(best.values()) + no_series
# OPDS Smart Lists navigation & feeds
@app.get("/opds/smart", response_class=Response)
def opds_smart_lists(_=Depends(require_basic)):
lists = _load_smartlists()
@@ -679,6 +762,7 @@ def opds_smart_lists(_=Depends(require_basic)):
xml = _feed(entries, title="Smart Lists", self_href="/opds/smart")
return Response(content=xml, media_type="application/atom+xml;profile=opds-catalog")
@app.get("/opds/smart/{slug}", response_class=Response)
def opds_smart_list(slug: str, page: int = 1, _=Depends(require_basic)):
lists = _load_smartlists()
@@ -711,16 +795,17 @@ def opds_smart_list(slug: str, page: int = 1, _=Depends(require_basic)):
return Response(content=xml, media_type="application/atom+xml;profile=opds-catalog")
# Smart Lists HTML & JSON API
@app.get("/search", response_class=HTMLResponse)
def smartlists_page(_=Depends(require_basic)):
tpl = env.get_template("smartlists.html")
return HTMLResponse(tpl.render())
@app.get("/smartlists.json", response_class=JSONResponse)
def smartlists_get(_=Depends(require_basic)):
return JSONResponse(_load_smartlists())
@app.post("/smartlists.json", response_class=JSONResponse)
def smartlists_post(payload: list[dict], _=Depends(require_basic)):
lists: list[dict] = []
@@ -763,15 +848,16 @@ def smartlists_post(payload: list[dict], _=Depends(require_basic)):
)
_save_smartlists(lists)
return JSONResponse({"ok": True, "count": len(lists)})
# ---------- Admin: reindex ----------
# -------------------- Index status + Reindex --------------------
@app.get("/index/status", response_class=JSONResponse)
def index_status(_=Depends(require_basic)):
usable = bool(INDEX)
return JSONResponse({**_INDEX_STATUS, "usable": usable})
@app.post("/admin/reindex", response_class=JSONResponse)
def admin_reindex(_=Depends(require_basic)):
"""
Rescan the CONTENT_BASE_DIR and rebuild the in-memory index.
Also refreshes the warm index file on disk (handled by fs_index.scan).
"""
global INDEX
INDEX = fs_index.scan(LIBRARY_DIR)
files = sum(1 for it in INDEX if not it.is_dir)
return JSONResponse({"ok": True, "total_items": len(INDEX), "total_files": files})
_start_indexing_if_needed(force=True)
return JSONResponse({"ok": True, "started": True})

View File

@@ -41,6 +41,26 @@
</nav>
<main class="container my-4">
<!-- Indexing progress (shown while running / first run) -->
<div id="indexProgress" class="alert alert-secondary d-none" role="alert">
<div class="d-flex justify-content-between">
<div>
<strong>Indexing your library…</strong>
<div class="small text-secondary">
<span id="idxPhase">indexing</span>
<span id="idxCounts" class="ms-2">(0 / 0)</span>
</div>
<div class="small text-secondary" id="idxCurrent"></div>
</div>
<div class="text-end">
<span class="badge text-bg-light" id="idxEta"></span>
</div>
</div>
<div class="progress mt-2" role="progressbar" aria-label="Index progress" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100">
<div id="idxBar" class="progress-bar progress-bar-striped progress-bar-animated" style="width:0%"></div>
</div>
</div>
<!-- KPIs -->
<div class="row g-3 kpis">
<div class="col-12 col-md-6 col-xl-3">
@@ -159,7 +179,7 @@
}
};
// Chart registry to safely re-render on reindex
// Chart registry to safely re-render
const charts = {};
function upsertChart(canvasId, config) {
const existing = Chart.getChart(canvasId) || charts[canvasId];
@@ -176,25 +196,22 @@
// KPIs
document.getElementById("lastUpdated").textContent =
new Date(data.last_updated * 1000).toLocaleString();
document.getElementById("covers").textContent = data.total_covers;
document.getElementById("totalComics").textContent = data.total_comics;
document.getElementById("uniqueSeries").textContent = data.unique_series;
document.getElementById("uniquePublishers").textContent = data.unique_publishers;
data.last_updated ? new Date(data.last_updated * 1000).toLocaleString() : "—";
document.getElementById("covers").textContent = data.total_covers ?? "0";
document.getElementById("totalComics").textContent = data.total_comics ?? "0";
document.getElementById("uniqueSeries").textContent = data.unique_series ?? "0";
document.getElementById("uniquePublishers").textContent = data.unique_publishers ?? "0";
const fmt = data.formats || {};
document.getElementById("formats").textContent =
Object.entries(data.formats).map(([k,v]) => `${k.toUpperCase()}: ${v}`).join(" ");
Object.keys(fmt).length ? Object.entries(fmt).map(([k,v]) => `${k.toUpperCase()}: ${v}`).join(" ") : "—";
// Charts
// 1) Publishers doughnut (sorted)
const pubs = data.publishers;
const pubsSorted = (pubs.labels || []).map((l,i)=>({l, v: pubs.values[i]}))
.sort((a,b)=>b.v-a.v);
const pubs = data.publishers || {labels:[], values:[]};
const pubsSorted = (pubs.labels || []).map((l,i)=>({l, v: pubs.values[i]})).sort((a,b)=>b.v-a.v);
upsertChart("publishersChart", {
type: "doughnut",
data: {
labels: pubsSorted.map(x=>x.l),
datasets: [{ data: pubsSorted.map(x=>x.v) }]
},
data: { labels: pubsSorted.map(x=>x.l), datasets: [{ data: pubsSorted.map(x=>x.v) }] },
options: { ...baseOptions, cutout: "60%", scales: {} }
});
@@ -202,21 +219,15 @@
upsertChart("timelineChart", {
type: "line",
data: {
labels: data.timeline.labels,
datasets: [{
label: "Issues per year",
data: data.timeline.values,
fill: true,
tension: 0.25,
pointRadius: 2
}]
labels: (data.timeline && data.timeline.labels) || [],
datasets: [{ label: "Issues per year", data: (data.timeline && data.timeline.values) || [], fill: true, tension: 0.25, pointRadius: 2 }]
},
options: { ...baseOptions }
});
// 3) Formats bar
const fmtLabels = Object.keys(data.formats || {});
const fmtValues = Object.values(data.formats || {});
const fmtLabels = Object.keys(fmt);
const fmtValues = Object.values(fmt);
upsertChart("formatsChart", {
type: "bar",
data: { labels: fmtLabels, datasets: [{ label: "Files", data: fmtValues }] },
@@ -224,16 +235,80 @@
});
// 4) Top writers (horizontal bar)
const tw = data.top_writers || {labels:[], values:[]};
upsertChart("writersChart", {
type: "bar",
data: {
labels: data.top_writers.labels,
datasets: [{ label: "Issues", data: data.top_writers.values }]
},
data: { labels: tw.labels, datasets: [{ label: "Issues", data: tw.values }] },
options: { ...baseOptions, indexAxis: "y" }
});
}
// Progress bar UI
function showProgressUI(s) {
const box = document.getElementById("indexProgress");
if (s.running || (!s.usable && s.phase !== "idle")) {
box.classList.remove("d-none");
const done = s.done || 0, total = Math.max(s.total || 0, 1);
const pct = Math.min(100, Math.floor((done / total) * 100));
document.getElementById("idxPhase").textContent = s.phase || "indexing";
document.getElementById("idxCounts").textContent = `(${done} / ${s.total || 0})`;
document.getElementById("idxCurrent").textContent = s.current || "";
const bar = document.getElementById("idxBar");
bar.style.width = pct + "%";
bar.setAttribute("aria-valuenow", pct);
// naive ETA
let eta = "—";
if (s.started_at && done > 5 && total > done) {
const elapsed = (Date.now()/1000 - s.started_at);
const per = elapsed / done;
const secs = Math.round(per * (total - done));
const m = Math.floor(secs/60), sec = secs%60;
eta = `~${m}m ${sec}s left`;
}
document.getElementById("idxEta").textContent = eta;
} else {
box.classList.add("d-none");
}
}
let lastIdxStatus = null;
function shouldRefreshCharts(newStatus, oldStatus) {
if (!newStatus) return false;
// Only refresh when we go from running -> idle, or ended_at changes
if (!oldStatus) return (!newStatus.running && newStatus.usable);
const finishedNow = (oldStatus.running && !newStatus.running);
const endedChanged = (newStatus.ended_at && newStatus.ended_at !== oldStatus.ended_at);
const becameUsable = (!oldStatus.usable && newStatus.usable);
return finishedNow || endedChanged || becameUsable;
}
async function pollIndex() {
let nextDelay = 5000; // idle default
try {
const r = await fetch("/index/status", { credentials: "include", cache: "no-store" });
const s = await r.json();
showProgressUI(s);
if (shouldRefreshCharts(s, lastIdxStatus)) {
await load(); // fetch /stats.json once on transition
}
// While running, poll faster; when idle, slower
nextDelay = s.running ? 800 : 5000;
lastIdxStatus = s;
} catch (_) {
nextDelay = 5000;
} finally {
setTimeout(pollIndex, nextDelay);
}
}
document.getElementById("reindexBtn").addEventListener("click", reindex);
// Start polling; charts render when indexing first completes (or immediately if already idle & usable)
pollIndex();
async function reindex() {
const btn = document.getElementById("reindexBtn");
const original = btn.innerHTML;
@@ -244,10 +319,7 @@
if (!r.ok) {
const msg = await r.text().catch(()=>r.statusText);
alert("Reindex failed: " + msg);
return;
}
await load(); // refresh KPIs/charts
btn.innerHTML = '<i class="bi bi-check2 me-1"></i> Done';
setTimeout(() => { btn.innerHTML = original; btn.disabled = false; }, 800);
} catch (e) {
alert("Reindex error: " + (e?.message || e));
@@ -258,8 +330,8 @@
document.getElementById("reindexBtn").addEventListener("click", reindex);
// Initial load
load();
// start polling; charts will render when index is ready/idle
pollIndex();
// Clean up charts if the page unloads
window.addEventListener("beforeunload", () => {