initial commit
This commit is contained in:
commit
a25dff10ec
8
Dockerfile
Normal file
8
Dockerfile
Normal file
@ -0,0 +1,8 @@
|
||||
FROM python:3.8
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
ADD . /app/
|
||||
RUN pip install -r requirements.txt
|
||||
EXPOSE 5000
|
||||
ENV FLASK_APP=main
|
||||
CMD ["python", "main.py"]
|
16
config.py
Normal file
16
config.py
Normal file
@ -0,0 +1,16 @@
|
||||
import os
|
||||
from werkzeug.security import generate_password_hash
|
||||
|
||||
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/library")
|
||||
CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/home/drudoo/ComicsTest/Comics")
|
||||
|
||||
TEENYOPDS_ADMIN_PASSWORD = os.getenv("TEENYOPDS_ADMIN_PASSWORD", None)
|
||||
users = {}
|
||||
if TEENYOPDS_ADMIN_PASSWORD:
|
||||
users = {
|
||||
"admin": generate_password_hash(TEENYOPDS_ADMIN_PASSWORD),
|
||||
}
|
||||
else:
|
||||
print(
|
||||
"WANRNING: admin password not configured - catalog will be exposed was public"
|
||||
)
|
89
main.py
Normal file
89
main.py
Normal file
@ -0,0 +1,89 @@
|
||||
from flask import Flask, send_from_directory, request
|
||||
from flask_httpauth import HTTPBasicAuth
|
||||
from werkzeug.security import check_password_hash
|
||||
from gevent.pywsgi import WSGIServer
|
||||
import timeit
|
||||
import sqlite3
|
||||
import os
|
||||
import zipfile
|
||||
from bs4 import BeautifulSoup
|
||||
import re
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
from opds import fromdir
|
||||
import config
|
||||
|
||||
app = Flask(__name__, static_url_path="", static_folder="static")
|
||||
auth = HTTPBasicAuth()
|
||||
|
||||
|
||||
@auth.verify_password
|
||||
def verify_password(username, password):
|
||||
if not config.TEENYOPDS_ADMIN_PASSWORD:
|
||||
return True
|
||||
elif username in config.users and check_password_hash(
|
||||
config.users.get(username), password
|
||||
):
|
||||
return username
|
||||
|
||||
|
||||
@app.route("/")
|
||||
@app.route("/healthz")
|
||||
def healthz():
|
||||
return "ok"
|
||||
|
||||
@app.route('/import')
|
||||
def import2sql():
|
||||
conn = sqlite3.connect('app.db')
|
||||
list = []
|
||||
|
||||
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
|
||||
for file in files:
|
||||
f = os.path.join(root, file)
|
||||
s = zipfile.ZipFile(f)
|
||||
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#print(Bs_data.select('Series')[0].text, file=sys.stderr)
|
||||
#print(Bs_data.select('Title')[0].text, file=sys.stderr)
|
||||
CVDB=re.findall('(?<=\[CVDB)(.*)(?=].)', Bs_data.select('Notes')[0].text)
|
||||
#list.append('CVDB'+CVDB[0] + ': ' + Bs_data.select('Series')[0].text + "(" + Bs_data.select('Volume')[0].text + ") : " + Bs_data.select('Number')[0].text )
|
||||
#print(list, file=sys.stdout)
|
||||
|
||||
ISSUE=Bs_data.select('Number')[0].text
|
||||
SERIES=Bs_data.select('Series')[0].text
|
||||
VOLUME=Bs_data.select('Volume')[0].text
|
||||
PUBLISHER=Bs_data.select('Publisher')[0].text
|
||||
TITLE=Bs_data.select('Title')[0].text
|
||||
PATH=f
|
||||
UPDATED=str(datetime.datetime.now())
|
||||
print(UPDATED,file=sys.stdout)
|
||||
sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
|
||||
print(sql,file=sys.stdout)
|
||||
conn.execute(sql);
|
||||
conn.commit()
|
||||
|
||||
conn.close()
|
||||
return "yay"
|
||||
|
||||
@app.route("/content/<path:path>")
|
||||
@auth.login_required
|
||||
def send_content(path):
|
||||
return send_from_directory(config.CONTENT_BASE_DIR, path)
|
||||
|
||||
@app.route("/catalog")
|
||||
@app.route("/catalog/<path:path>")
|
||||
@auth.login_required
|
||||
def catalog(path=""):
|
||||
start_time = timeit.default_timer()
|
||||
print(request.root_url)
|
||||
c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path)
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
print(elapsed)
|
||||
|
||||
return c.render()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
#http_server = WSGIServer(("", 5000), app)
|
||||
#http_server.serve_forever()
|
||||
app.run(debug=True,host='0.0.0.0')
|
16
metadata.py
Normal file
16
metadata.py
Normal file
@ -0,0 +1,16 @@
|
||||
import requests
|
||||
|
||||
|
||||
def fromisbn(isbn: str):
|
||||
isbn = "".join(filter(str.isnumeric, isbn))
|
||||
api = f"https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}"
|
||||
resp = requests.get(api)
|
||||
return resp.json()["items"][0]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from pprint import pprint
|
||||
|
||||
pprint(fromisbn("9780316029193"))
|
||||
pprint(fromisbn("978-0316029193"))
|
||||
pprint(fromisbn("0316029193"))
|
1
opds/__init__.py
Normal file
1
opds/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .catalog import Catalog, fromdir
|
136
opds/catalog.py
Normal file
136
opds/catalog.py
Normal file
@ -0,0 +1,136 @@
|
||||
import os
|
||||
from uuid import uuid4
|
||||
from urllib.parse import quote
|
||||
from jinja2 import Environment, FileSystemLoader, select_autoescape
|
||||
from .entry import Entry
|
||||
from .link import Link
|
||||
import sqlite3
|
||||
|
||||
|
||||
class Catalog(object):
|
||||
def __init__(
|
||||
self,
|
||||
title,
|
||||
id=None,
|
||||
author_name=None,
|
||||
author_uri=None,
|
||||
root_url=None,
|
||||
url=None,
|
||||
):
|
||||
self.title = title
|
||||
self.id = id or uuid4()
|
||||
self.author_name = author_name
|
||||
self.author_uri = author_uri
|
||||
self.root_url = root_url
|
||||
self.url = url
|
||||
self.entries = []
|
||||
|
||||
def add_entry(self, entry):
|
||||
self.entries.append(entry)
|
||||
|
||||
def render(self):
|
||||
env = Environment(
|
||||
loader=FileSystemLoader(
|
||||
searchpath=os.path.join(os.path.dirname(__file__), "templates")
|
||||
),
|
||||
autoescape=select_autoescape(["html", "xml"]),
|
||||
)
|
||||
template = env.get_template("catalog.opds.jinja2")
|
||||
return template.render(catalog=self)
|
||||
|
||||
def fromsearch(root_url, url, content_base_path, content_relative_path):
|
||||
|
||||
c = Catalog(
|
||||
title="test"
|
||||
)
|
||||
|
||||
return c
|
||||
|
||||
def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
|
||||
path = os.path.join(content_base_path, content_relative_path)
|
||||
#print(path)
|
||||
c = Catalog(
|
||||
title=os.path.basename(os.path.dirname(path)), root_url=root_url, url=url
|
||||
)
|
||||
#print(c.url)
|
||||
if not "search" in c.url:
|
||||
onlydirs = [
|
||||
f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))
|
||||
]
|
||||
#print(onlydirs)
|
||||
for dirname in onlydirs:
|
||||
link = Link(
|
||||
href=quote(f"/catalog/{content_relative_path}/{dirname}"),
|
||||
rel="subsection",
|
||||
rpath=path,
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
|
||||
)
|
||||
c.add_entry(Entry(title=dirname, id=uuid4(), links=[link]))
|
||||
|
||||
|
||||
if c.url.endswith("/catalog"):
|
||||
link2 = Link(
|
||||
href=quote(f"/catalog/search"),
|
||||
rel="subsection",
|
||||
rpath=path,
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
|
||||
)
|
||||
c.add_entry(Entry(title="Search",id=uuid4(),links=[link2]))
|
||||
|
||||
if not "search" in c.url:
|
||||
onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
|
||||
#print(onlyfiles)
|
||||
for filename in onlyfiles:
|
||||
link = Link(
|
||||
href=quote(f"/content/{content_relative_path}/{filename}"),
|
||||
rel="http://opds-spec.org/acquisition",
|
||||
rpath=path,
|
||||
type=mimetype(filename),
|
||||
)
|
||||
c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
#fixed issue with multiple . in filename
|
||||
#print(c.render())
|
||||
else:
|
||||
search="Man"
|
||||
conn = sqlite3.connect('app.db')
|
||||
sql="SELECT * from COMICS where SERIES like '%" + search+ "%' or Title like '%" + search+ "%';"
|
||||
|
||||
s = conn.execute(sql)
|
||||
list=[]
|
||||
for r in s:
|
||||
#print(r)
|
||||
tUrl=f""+r[7].replace("/home/drudoo/ComicsTest/Comics/","/content/")
|
||||
tTitle=r[6]
|
||||
link3 = Link(
|
||||
#href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"),
|
||||
href=quote(tUrl),
|
||||
rel="http://opds-spec.org/acquisition",
|
||||
rpath=path,
|
||||
type="application/x-cbz",
|
||||
)
|
||||
c.add_entry(
|
||||
Entry(
|
||||
title=tTitle,
|
||||
id=uuid4(),
|
||||
links=[link3]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
return c
|
||||
|
||||
|
||||
|
||||
def mimetype(path):
|
||||
extension = path.split(".")[-1].lower()
|
||||
if extension == "pdf":
|
||||
return "application/pdf"
|
||||
elif extension == "epub":
|
||||
return "application/epub"
|
||||
elif extension == "mobi":
|
||||
return "application/mobi"
|
||||
elif extension == "cbz":
|
||||
return "application/x-cbz"
|
||||
else:
|
||||
return "application/unknown"
|
77
opds/entry.py
Normal file
77
opds/entry.py
Normal file
@ -0,0 +1,77 @@
|
||||
import zipfile
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
|
||||
class Entry(object):
|
||||
valid_keys = (
|
||||
"id",
|
||||
"url",
|
||||
"title",
|
||||
"content",
|
||||
"downloadsPerMonth",
|
||||
"updated",
|
||||
"identifier",
|
||||
"date",
|
||||
"rights",
|
||||
"summary",
|
||||
"dcterms_source",
|
||||
"provider",
|
||||
"publishers",
|
||||
"contributors",
|
||||
"languages",
|
||||
"subjects",
|
||||
"oai_updatedates",
|
||||
"authors",
|
||||
"formats",
|
||||
"links",
|
||||
)
|
||||
|
||||
required_keys = ("id", "title", "links")
|
||||
|
||||
def validate(self, key, value):
|
||||
if key not in Entry.valid_keys:
|
||||
raise KeyError("invalid key in opds.catalog.Entry: %s" % (key))
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for key, val in kwargs.items():
|
||||
self.validate(key, val)
|
||||
|
||||
for req_key in Entry.required_keys:
|
||||
if not req_key in kwargs:
|
||||
raise KeyError("required key %s not supplied for Entry!" % (req_key))
|
||||
self.id = kwargs["id"]
|
||||
self.title = kwargs["title"]
|
||||
self.links = kwargs["links"]
|
||||
self._data = kwargs
|
||||
|
||||
#print(">>entry.py")
|
||||
#print(kwargs)
|
||||
#print(kwargs["links"][0].get("rpath"))
|
||||
#print("--end entry.py")
|
||||
|
||||
if kwargs["links"][0].get("type") == 'application/x-cbz':
|
||||
f=self.links[0].get("rpath")+"/"+self.title+".cbz"
|
||||
if os.path.exists(f):
|
||||
s = zipfile.ZipFile(f)
|
||||
data=BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#print(data)
|
||||
#print(kwargs["links"][0])
|
||||
#print(data.select('Series')[0].text)
|
||||
#print(kwargs["links"][0].get("rpath"))
|
||||
if data.select('Series')[0].text in kwargs["links"][0].get("rpath"):
|
||||
releasedate=data.select('Year')[0].text+"-"+data.select('Month')[0].text.zfill(2)+"-"+data.select('Day')[0].text.zfill(2)
|
||||
self.title = "#"+data.select('Number')[0].text.zfill(2) + ": " + data.select('Title')[0].text + " (" + releasedate + ")"
|
||||
#print(self.title)
|
||||
else:
|
||||
self.title = kwargs["title"]
|
||||
else:
|
||||
self.title = kwargs["title"]
|
||||
#self.title = data.select('Title')[0].text
|
||||
|
||||
|
||||
def get(self, key):
|
||||
return self._data.get(key, None)
|
||||
|
||||
def set(self, key, value):
|
||||
self.validate(key, value)
|
||||
self._data[key] = value
|
31
opds/link.py
Normal file
31
opds/link.py
Normal file
@ -0,0 +1,31 @@
|
||||
class Link(object):
|
||||
valid_keys = ("href", "type", "rel", "rpath", "price", "currencycode", "formats")
|
||||
required_keys = ("href", "type", "rel")
|
||||
|
||||
def validate(self, key, value):
|
||||
if key not in Link.valid_keys:
|
||||
raise KeyError("invalid key in opds.Link: %s" % (key))
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for key, val in kwargs.items():
|
||||
self.validate(key, val)
|
||||
|
||||
for req_key in Link.required_keys:
|
||||
if not req_key in kwargs:
|
||||
raise KeyError("required key %s not supplied for Link!" % (req_key))
|
||||
|
||||
self.href = kwargs["href"]
|
||||
self.type = kwargs["type"]
|
||||
self.rel = kwargs["rel"]
|
||||
self._data = kwargs
|
||||
|
||||
#print(">>link.py")
|
||||
#print(kwargs)
|
||||
#print("--end link.py")
|
||||
|
||||
def get(self, key):
|
||||
return self._data.get(key, None)
|
||||
|
||||
def set(self, key, value):
|
||||
self.validate(key, value)
|
||||
self._data[key] = value
|
39
opds/templates/catalog.opds.jinja2
Normal file
39
opds/templates/catalog.opds.jinja2
Normal file
@ -0,0 +1,39 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<feed xmlns="http://www.w3.org/2005/Atom"
|
||||
xmlns:dc="http://purl.org/dc/terms/"
|
||||
xmlns:ov="http://open.vocab.org/terms/"
|
||||
xmlns:oz="http://openzim.org/terms/"
|
||||
xmlns:opds="http://opds-spec.org/2010/catalog">
|
||||
<id>urn:uuid:{{ catalog.id }}</id>
|
||||
<title>{{ catalog.title }}</title>
|
||||
{% if catalog.author_name or catalog.author_url %}
|
||||
<author>
|
||||
{% if catalog.author_name %}
|
||||
<name>{{ catalog.author_name }}</name>
|
||||
{% endif %}
|
||||
{% if catalog.author_url %}
|
||||
<uri>{{ catalog.author_url }}</uri>
|
||||
{% endif %}
|
||||
</author>
|
||||
{% endif %}
|
||||
<link rel="start"
|
||||
href="{{ catalog.root_url }}"
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition"/>
|
||||
<link rel="self"
|
||||
href="{{ catalog.url }}"
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition"/>
|
||||
|
||||
{% for entry in catalog.entries %}
|
||||
<entry>
|
||||
<title>{{ entry.title }}</title>
|
||||
<id>{{ entry.id }}</id>
|
||||
{% if entry.updated %} <updated>{{ entry.updated }}</updated> {% endif %}
|
||||
{% for link in entry.links %}
|
||||
<link rel="{{ link.rel }}"
|
||||
href="{{ link.href }}"
|
||||
type="{{ link.type }}"/>
|
||||
{% endfor %}
|
||||
</entry>
|
||||
{% endfor %}
|
||||
|
||||
</feed>
|
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@ -0,0 +1,7 @@
|
||||
Flask==2.0.2
|
||||
Jinja2==3.0.2
|
||||
requests==2.26.0
|
||||
Flask-HTTPAuth==4.5.0
|
||||
gevent==21.8.0
|
||||
bs4
|
||||
lxml
|
BIN
static/favicon.ico
Executable file
BIN
static/favicon.ico
Executable file
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
2
static/robots.txt
Normal file
2
static/robots.txt
Normal file
@ -0,0 +1,2 @@
|
||||
User-agent: *
|
||||
Disallow: /
|
Loading…
Reference in New Issue
Block a user