Compare commits

..

53 Commits

Author SHA1 Message Date
41382cf1cd Started working on #15. Added status page for Generate 2024-02-03 18:09:31 -05:00
ace4e9bcba Started work on search 2024-02-02 09:06:52 -05:00
18e9575799 Fixed showing non-cbz files 2024-01-29 14:16:28 -05:00
c85185d975 Fixed xml escape issue 2024-01-29 13:59:33 -05:00
0ef53ab48c Fixed read-only issue 2024-01-29 05:35:44 -05:00
c55e0e97ad Fixed compose thumbnails folder and docker config and requirements updated 2024-01-29 05:34:17 -05:00
04b2354d51 Fixed random 2023-05-17 22:42:53 +02:00
d7f38882f2 Fixed png/jpg thumbnail generation 2023-05-17 22:17:20 +02:00
f1aba58732 Fixed ampersand 2023-05-17 21:47:14 +02:00
11d1418c09 Updated gitignore 2023-05-17 21:01:38 +02:00
8c69a958de Trying to fix issues with summary and Ampersand in title 2023-05-17 20:29:52 +02:00
c9dc8307b7 Added check for empty db 2023-05-17 19:47:49 +02:00
e239eb7159 Added graphs 2023-02-03 21:48:57 +01:00
ae8c7b7024 testing fixes for special signs (eg & in url/path) 2023-01-21 12:29:40 +01:00
892bfc21bd forgot to scale on forced generate. Now fixed 2023-01-20 15:25:38 +01:00
3ba034ea02 Added thumbnail scaling and opds thumbnail preview 2023-01-20 15:17:50 +01:00
18377b474a Cleanup of files 2023-01-20 11:45:09 +01:00
6cea6852ea Fixed #7. Added thumbs generation using /generate?force= 2023-01-20 11:38:38 +01:00
b7aa41025a Fixed #6. Imported files should now generate covers on import instead of being skipped 2023-01-20 10:07:05 +01:00
dd3cce602c added supported readers section 2023-01-20 09:42:03 +01:00
35587b5eb6 added supported readers section 2023-01-20 09:41:28 +01:00
92dd16328e added supported readers section 2023-01-20 09:40:55 +01:00
2215e5e925 added summary section 2023-01-20 09:26:53 +01:00
0e9a90e92a added platform test 2023-01-20 09:26:32 +01:00
663ab4a095 added summary section 2023-01-20 09:26:12 +01:00
36ff9c7d01 added debug to catalog 2023-01-20 09:08:36 +01:00
7f33098f5e moved cvdb extractor to extras.py 2023-01-19 22:33:11 +01:00
e8e2ef20d5 Added new json search cases 2023-01-19 22:22:04 +01:00
34f734d399 added file cover support, where thumbs are cached locally. Added limit to search results 2023-01-19 22:21:46 +01:00
f6acbfa3e4 added thumbnails dir and default search limit 2023-01-19 22:20:51 +01:00
89e68e3f8e added windows fixes 2023-01-19 15:41:27 +01:00
59256c35ac Added windows library location 2023-01-17 16:17:22 +01:00
fc3ddd47d4 Added .py file for support functions 2023-01-17 15:01:32 +01:00
8a10fba79f Added Writer credits and fixed feed entry 2023-01-17 15:01:16 +01:00
be7791443a Fixed path issue on linux 2023-01-17 15:00:37 +01:00
a1d3be4fed Updated /import with file mod check. 2023-01-17 14:59:50 +01:00
0d38808e16 Updated docker-compose 2023-01-15 10:37:05 +01:00
8ef613b155 Fixed #5. If the opds title is empty it should show 'Comics', else it will populate with the base url name 2022-07-13 19:15:35 +02:00
40f521543a Trying to start a webui 2022-07-13 13:53:16 +02:00
2b19398465 Fixed #2. Comics are now sorted by series, then issue. Might needs tested if issue number contains or or other non-integer values. Git issue can be reopened if needed 2022-07-13 13:52:30 +02:00
2c7895561b Fixed #3. Title will now only show if there is one 2022-07-13 13:32:16 +02:00
bf2a3bcf1d added ignore files 2022-06-23 09:16:57 +02:00
a04b119ce5 working on comicdb.xml import 2022-06-09 15:43:50 +02:00
9c797eeddb trying to import ComicRack database 2022-06-08 21:36:41 +02:00
c232276faa updated note in readme with correct timed import of comics. The previous number was with lots of skipped comics 2022-06-07 22:03:40 +02:00
Frederik Baerentsen
8c971b044d fixed zip issue (#1) 2022-06-07 21:59:03 +02:00
Frederik Baerentsen
4f3a51c911 removed gzip requirement 2022-06-07 21:46:09 +02:00
f54414c300 pushed tentative fix for import 2022-06-07 21:44:57 +02:00
d921c92aed added manual import test 2022-06-07 21:41:36 +02:00
Frederik Baerentsen
cab1dc2dcb Testing zip errors 2022-06-07 21:07:28 +02:00
e847d27809 added timer to import 2022-06-07 20:10:59 +02:00
Frederik Baerentsen
5467aeeb0a changed config to docker usage 2022-06-07 20:09:52 +02:00
Frederik Baerentsen
d9e66d2bb9 added docker-compose file 2022-06-07 20:09:14 +02:00
19 changed files with 1221 additions and 178 deletions

8
.dockerignore Normal file
View File

@ -0,0 +1,8 @@
.venv/
__pycache__/
.env
.git
.gitignore
deploy.sh
Dockerfile
env

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
.venv/
__pycache__/
.env
deploy.sh
env
thumbnails
*.db

View File

@ -65,3 +65,20 @@ In the `config.py` file you need to change like 4 from `"/library"` to your comi
python3 main.py python3 main.py
## Supported Readers
Any reader that supports OPDS should work, however the following have been verified to work/not work
| App | iOS |
| ---------------------------------------------------------------------------- | --- |
| KyBook 3 (iOS) | ✔️ |
| Aldiko Next (iOS) | ❌ |
| PocketBook (iOS) | ✔️ |
| Moon+ Reader (Android) | ✔️ |
| Panels (iOS) | ✔️ |
| Marvin (iOS) | ✔️ |
| Chunky (iOS) | ✔️ |
# Notes
5865 files in 359 seconds

View File

@ -1,8 +1,39 @@
import os import os
from werkzeug.security import generate_password_hash from werkzeug.security import generate_password_hash
from sys import platform
import sys
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/library") CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/library") #docker
CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/home/drudoo/ComicsTest/Comics")
#if platform == "linux" or platform == "linux2":
# CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/home/drudoo/ComicsTest/Comics") #linux
#elif platform == "win32":
# CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/Comics/ComicRack") #windows
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "testlibrary") #windows test library
# Added folder for thumbnails. These are loaded as covers for the files.
THUMBNAIL_DIR = os.getenv("THUMBNAIL_DIR",'/thumbnails')
# If using Windows, insert the drive letter of your comics here.
# Both the script and comics needs to be on the same drive.
WIN_DRIVE_LETTER = 'B'
# If using custom searches, then insert the default amout of results here.
# It is also possible to override this in the json file.
DEFAULT_SEARCH_NUMBER = 10
# Debug output
# False: no print out in terminal
# True: logs are printet to terminal
DEBUG = True
# Max thumbnail size
MAXSIZE = (500,500)
def _print(arg):
if DEBUG:
print(arg,file=sys.stderr)
TEENYOPDS_ADMIN_PASSWORD = os.getenv("TEENYOPDS_ADMIN_PASSWORD", None) TEENYOPDS_ADMIN_PASSWORD = os.getenv("TEENYOPDS_ADMIN_PASSWORD", None)
users = {} users = {}

142
db.py Normal file
View File

@ -0,0 +1,142 @@
import sqlite3
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
import re
import datetime
def createdb():
conn = sqlite3.connect('../test_database.db')
c = conn.cursor()
c.execute('''
CREATE TABLE IF NOT EXISTS comics
(
[book_id] TEXT PRIMARY KEY,
[book_path] TEXT,
[series] TEXT,
[seriesgroup] TEXT,
[number] TEXT,
[count] INTEGER,
[volume] TEXT,
[notes] TEXT,
[year] INTEGER,
[month] INTEGER,
[day] INTEGER,
[writer] TEXT,
[penciller] TEXT,
[inker] TEXT,
[letterer] TEXT,
[colorist] TEXT,
[coverartist] TEXT,
[publisher] TEXT,
[genre] TEXT,
[pagecount] INTEGER,
[languageiso] TEXT,
[scaninformation] TEXT,
[pages] INTEGER,
[added] TEXT,
[filesize] INTEGER,
[filemodifiedtime] TEXT,
[filecreationtime] TEXT
)
''')
conn.commit()
def dropdb():
conn = sqlite3.connect('../test_database.db')
c = conn.cursor()
c.execute('DROP TABLE COMICS')
conn.commit()
def checkempty(v,t):
r=""
try:
r=v.find(t).text
except:
pass
return r
def loaddata():
conn = sqlite3.connect('../test_database.db')
c = conn.cursor()
book_id,book_path,series,seriesgroup,number="","","","",""
count=0
volume,seriesgroup,notes="","",""
year,month,day=0,0,0
writer,penciller,inker,letterer,colorist,coverartist,publiser,genre="","","","","","","",""
pagecount=0
languageiso,scaninformation="",""
pages=0
added=""
filesize=0
filemodificationtime,filecreationtime="",""
tree = ET.parse('../ComicDb_small.xml')
root = tree.getroot()
for child in root:
#print("child: ", child.tag,child.attrib)
if child.tag == 'Books':
for grandchild in child:
#print("grandchild: ",grandchild.tag,grandchild.attrib)
#print(grandchild.attrib)
#print(type(grandchild.attrib))
book_id=grandchild.attrib['Id']
book_path=grandchild.attrib['File']
#for i,j in grandchild.attrib.items():
# print(i,j)
# #print(i,i["Id"])
#series=grandchild.attrib['Series'].text
#print(series)
#print(grandchild[0].tag)
#series=grandchild.find('Series').text
series=checkempty(grandchild,'Series')
number=checkempty(grandchild,'Number')
count=checkempty(grandchild,'Count')
seriesgroup=checkempty(grandchild,'SeriesGroup')
notes=checkempty(grandchild,'Notes')
year=checkempty(grandchild,'Year')
month=checkempty(grandchild,'Month')
day=checkempty(grandchild,'Day')
writer=checkempty(grandchild,'Writer')
penciller=checkempty(grandchild,'Penciller')
inker=checkempty(grandchild,'Inker')
letterer=checkempty(grandchild,'Letterer')
c.execute("INSERT OR REPLACE INTO COMICS (book_id,book_path,series,number,count,seriesgroup,notes,year,month,day,writer,penciller, inker,letterer) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)",(book_id,book_path,series,number,count,seriesgroup,notes,year,month,day,writer,penciller,inker,letterer))
conn.commit()
#for ggchild in grandchild:
# print(ggchild.tag)
# print(ggchild.text)
#print("----")
#for books in child.findall('Book'):
#print(books,type(books))
#print(books.tag, books.attrib)
#with open('ComicDb_small.xml', 'r') as f:
# contents = f.read()
# Bs_data = BeautifulSoup(contents, 'xml')
# for i in Bs_data.find_all('Book'):
# #print(i)
# try:
# book_id = i.find('Book',{"Id"}).text
# print(book_id)
# except:
# pass
# try:
# series=i.select('Series')[0].text
# except:
# pass
#dropdb()
#createdb()
loaddata()

14
docker-compose.yml Normal file
View File

@ -0,0 +1,14 @@
version: '3.3'
services:
comicopds:
image: comicopds
container_name: comicopds
restart: unless-stopped
ports:
- '5000:5000'
volumes:
#- '/opt/data/Comics/ComicRack:/library:ro'
#- '/home/drudoo/Pi1/Comics/ComicRack:/library:ro'
- '${PWD}/CT/:/library:ro'
- '${PWD}/thumbnails:/thumbnails'
- '${PWD}/:/app'

24
extras.py Normal file
View File

@ -0,0 +1,24 @@
import os,re
table = str.maketrans({
"<": "&lt;",
">": "&gt;",
"&": "&amp;",
"'": "&apos;",
'"': "&quot;",
})
def xmlesc(txt):
return txt.translate(table)
def get_size(file_path, unit='bytes'):
file_size = os.path.getsize(file_path)
exponents_map = {'bytes': 0, 'kb': 1, 'mb': 2, 'gb': 3}
if unit not in exponents_map:
raise ValueError("Must select from \
['bytes', 'kb', 'mb', 'gb']")
else:
size = file_size / 1024 ** exponents_map[unit]
return round(size, 1)
def get_cvdb(string):
return re.findall('(?<=\[CVDB)(.*)(?=].)', string[0].text)[0]

51
import.py Normal file
View File

@ -0,0 +1,51 @@
import zipfile
from bs4 import BeautifulSoup
import time
import config
import os,sys
import time
import sqlite3
import timeit
import re
import datetime
conn = sqlite3.connect('app.db')
list = []
start_time = timeit.default_timer()
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
for file in files:
f = os.path.join(root, file)
#try:
if f.endswith(".cbz"):
print("CBZ: " + f)
s = zipfile.ZipFile(f)
#s = gzip.GzipFile(f)
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
#print(Bs_data.select('Series')[0].text, file=sys.stderr)
#print(Bs_data.select('Title')[0].text, file=sys.stderr)
CVDB=re.findall('(?<=\[CVDB)(.*)(?=].)', Bs_data.select('Notes')[0].text)
#list.append('CVDB'+CVDB[0] + ': ' + Bs_data.select('Series')[0].text + "(" + Bs_data.select('Volume')[0].text + ") : " + Bs_data.select('Number')[0].text )
#print(list, file=sys.stdout)
ISSUE=Bs_data.select('Number')[0].text
SERIES=Bs_data.select('Series')[0].text
VOLUME=Bs_data.select('Volume')[0].text
PUBLISHER=Bs_data.select('Publisher')[0].text
try:
TITLE=Bs_data.select('Title')[0].text
except:
TITLE=""
PATH=f
UPDATED=str(datetime.datetime.now())
#print(UPDATED,file=sys.stdout)
#sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
#print(sql,file=sys.stdout)
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?)", (CVDB[0], ISSUE, SERIES, VOLUME, PUBLISHER, TITLE, file, f, UPDATED))
conn.commit()
else:
print("NOT CBZ: " + f)
conn.close()
elapsed = timeit.default_timer() - start_time
print(elapsed)

303
main.py
View File

@ -1,23 +1,36 @@
from flask import Flask, send_from_directory, request from flask import Flask, redirect,url_for, render_template, send_from_directory, request
from flask_httpauth import HTTPBasicAuth from flask_httpauth import HTTPBasicAuth
from werkzeug.security import check_password_hash from werkzeug.security import check_password_hash
from gevent.pywsgi import WSGIServer from gevent.pywsgi import WSGIServer
import timeit import timeit
import sqlite3 import sqlite3
import os import os
from PIL import Image
import zipfile import zipfile
import gzip
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import re import re
import datetime import datetime
import sys import sys
import time
import json
import numpy as np
from pathlib import Path
from io import BytesIO
from threading import Thread
# for debugging
from pprint import pprint
####
generated = None
from opds import fromdir from opds import fromdir
import config import config,extras
app = Flask(__name__, static_url_path="", static_folder="static") app = Flask(__name__, static_url_path="", static_folder="static")
auth = HTTPBasicAuth() auth = HTTPBasicAuth()
@auth.verify_password @auth.verify_password
def verify_password(username, password): def verify_password(username, password):
if not config.TEENYOPDS_ADMIN_PASSWORD: if not config.TEENYOPDS_ADMIN_PASSWORD:
@ -27,58 +40,304 @@ def verify_password(username, password):
): ):
return username return username
@app.route("/", methods=['POST','GET'])
def startpage():
#result = "Hello, World!"
config._print(request.method)
if request.method == 'POST':
if request.form.get('Create') == 'Create':
# pass
config._print("open")
conn = sqlite3.connect('app.db')
cursor = conn.cursor()
cursor.execute("create table COMICS (CVDB,ISSUE,SERIES,VOLUME, YEAR, PUBLISHER, TITLE, FILE,PATH,UPDATED,PRIMARY KEY(CVDB))")
result = cursor.fetchall()
conn.close()
config._print("Encrypted")
elif request.form.get('Import') == 'Import':
# pass # do something else
config._print("Decrypted")
return redirect(url_for('import2sql'))
elif request.form.get('Generate') == 'Generate':
config._print("Generate Covers from Start page")
return redirect(url_for('generate2'))
else:
# pass # unknown
return render_template("first.html")
elif request.method == 'GET':
# return render_template("index.html")
config._print("No Post Back Call")
conn = sqlite3.connect('app.db')
cursor = conn.cursor()
try:
cursor.execute("select * from comics where CVDB in (SELECT CVDB from comics order by RANDOM() LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ");")
result = cursor.fetchall()
pub_list = ["Marvel", "DC Comics","Dark Horse Comics", "Dynamite Entertainment", "Oni Press"]
count = []
for i in pub_list:
cursor.execute("select count(*) from comics where Publisher = '" + i + "';")
count.append(cursor.fetchone()[0])
#cursor.execute("SELECT volume, COUNT(volume) FROM comics GROUP BY volume ORDER BY volume;")
cursor.execute("SELECT year, COUNT(year) FROM comics GROUP BY year ORDER BY year;")
volume = cursor.fetchall()
x = []
y = []
for i in volume:
x.append(i[0])
y.append(i[1])
conn.close()
try:
total = np.sum(np.array(volume).astype('int')[:,1],axis=0)
dir_path = r'thumbnails'
covers = 0
for path in os.listdir(dir_path):
if os.path.isfile(os.path.join(dir_path,path)):
covers += 1
config._print("covers: " + str(covers))
except Exception as e:
config._print(e)
return render_template("start.html", first=False,result=result,pub_list=pub_list,count=count,x=x,y=y,total=total,covers=covers)
except:
conn.close()
config._print('first')
return render_template("start.html",first=True)
#@app.route("/first", methods=['GET', 'POST'])
#def first():
# return render_template('first.html',result=result)
@app.route("/")
@app.route("/healthz") @app.route("/healthz")
def healthz(): def healthz():
return "ok" return "ok"
@app.route('/search')
def search():
args = request.args.get('q')
print(args)
conn = sqlite3.connect('app.db')
cursor = conn.cursor()
result = 'no good'
try:
cursor.execute("select TITLE, PATH from comics where TITLE like '%" + str(args) + "%';")
result = cursor.fetchall()
cursor.close()
for i in result:
print(i)
except Exception as e:
config._print(e)
return str(result)
total = None
#@app.route("/generate")
def generate():
config._print('GENERATES NOW!!!')
force = 'True' #request.args.get('force')
global generated
global total
total = 0
generated = 0
comiccount = 0
files_without_comicinfo = 0
errorcount = 0
skippedcount = 0
errormsg = ""
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
for file in files:
f = os.path.join(root,file)
if f.endswith('.cbz'):
total = total + 1
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
for file in files:
f = os.path.join(root, file)
if f.endswith('.cbz'):
config._print(generated)
try:
comiccount = comiccount + 1
s = zipfile.ZipFile(f)
filelist = zipfile.ZipFile.namelist(s)
if 'ComicInfo.xml' in filelist:
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
CVDB=extras.get_cvdb(Bs_data.select('Notes'))
if force == 'True':
ext = [i for i, x in enumerate(filelist) if re.search("(?i)\.jpg|png|jpeg$", x)]
cover = s.open(filelist[ext[0]]).read()
image = Image.open(BytesIO(cover))
rgb_im = image.convert("RGB")
image.thumbnail(config.MAXSIZE,Image.LANCZOS)
image.save(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg")
# Old way of saving without resize
#c = open(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg", 'wb+')
#c.write(cover)
#c.close()
generated = generated + 1
if Path(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg").exists() == False:
config._print("generating for " + str(CVDB))
try:
ext = [i for i, x in enumerate(filelist) if re.search("(?i)\.jpg|png|jpeg$", x)]
#config._print(filelist)
#config._print(ext)
#config._print(filelist[ext[0]])
cover = s.open(filelist[ext[0]]).read()
#xyz = [i for i, x in enumerate(filelist) if re.match('*\.py$',x)]
#config._print(xyz)
image = Image.open(BytesIO(cover))
image.thumbnail(config.MAXSIZE,Image.LANCZOS)
image.save(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg")
generated = generated + 1
except Exception as e:
errormsg = str(e)
config._print(e)
else:
if not force:
skippedcount = skippedcount + 1
else:
print("Error at: " + str(CVDB) + " " + str(f))
files_withtout_comicinfo = files_without_comicinfo + 1
except Exception as e:
errorcount = errorcount + 1
config._print("Error (/generate): " + str(e))
config._print(f)
errormsg = str(e)
return "Forced generation: " + str(force) + "<br>Comics: " + str(comiccount) + "<br>Generated: " + str(generated) + "<br>CBZ files without ComicInfo.xml: " + str(files_without_comicinfo) + "<br>Errors: " + str(errorcount) + "<br>Skipped: " + str(skippedcount) + "<br>" + errormsg
config._print( "Forced generation: " + str(force) + "<br>Comics: " + str(comiccount) + "<br>Generated: " + str(generated) + "<br>CBZ files without ComicInfo.xml: " + str(files_without_comicinfo) + "<br>Errors: " + str(errorcount) + "<br>Skipped: " + str(skippedcount) + "<br>" + errormsg)
@app.route("/generate2")
def generate2():
t1 = Thread(target=generate)
t1.start()
return render_template('status.html')
@app.route("/t2")
def index():
t1 = Thread(target=generate)
t1.start()
return render_template('status.html')
@app.route('/status',methods=['GET'])
def getStatus():
statusList = {'status':generated,'total':total}
return json.dumps(statusList)
@app.route('/import') @app.route('/import')
def import2sql(): def import2sql():
conn = sqlite3.connect('app.db') conn = sqlite3.connect('app.db')
list = [] list = []
comiccount = 0
importcount = 0
coverscount = 0
skippedcount = 0
errorcount = 0
comics_with_errors = []
start_time = timeit.default_timer()
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)): for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
for file in files: for file in files:
f = os.path.join(root, file) f = os.path.join(root, file)
if f.endswith('.cbz'):
try:
comiccount = comiccount + 1
s = zipfile.ZipFile(f) s = zipfile.ZipFile(f)
filelist = zipfile.ZipFile.namelist(s)
if filelist[0] == 'ComicInfo.xml':
filemodtime = os.path.getmtime(f)
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml") Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
#print(Bs_data.select('Series')[0].text, file=sys.stderr) CVDB=extras.get_cvdb(Bs_data.select('Notes'))
#print(Bs_data.select('Title')[0].text, file=sys.stderr)
CVDB=re.findall('(?<=\[CVDB)(.*)(?=].)', Bs_data.select('Notes')[0].text)
#list.append('CVDB'+CVDB[0] + ': ' + Bs_data.select('Series')[0].text + "(" + Bs_data.select('Volume')[0].text + ") : " + Bs_data.select('Number')[0].text )
#print(list, file=sys.stdout)
ISSUE=Bs_data.select('Number')[0].text ISSUE=Bs_data.select('Number')[0].text
SERIES=Bs_data.select('Series')[0].text SERIES=Bs_data.select('Series')[0].text
VOLUME=Bs_data.select('Volume')[0].text VOLUME=Bs_data.select('Volume')[0].text
YEAR=Bs_data.select('Year')[0].text
PUBLISHER=Bs_data.select('Publisher')[0].text PUBLISHER=Bs_data.select('Publisher')[0].text
try:
TITLE=Bs_data.select('Title')[0].text TITLE=Bs_data.select('Title')[0].text
except:
TITLE="" #sometimes title is blank.
PATH=f PATH=f
UPDATED=str(datetime.datetime.now()) UPDATED=filemodtime
print(UPDATED,file=sys.stdout) #print(UPDATED,file=sys.stdout)
sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')" #sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
print(sql,file=sys.stdout) #print(sql,file=sys.stdout)
conn.execute(sql); #conn.execute(sql);
conn.commit()
# CREATE TABLE IF MISSING
# create table COMICS (CVDB, ISSUE, SERIES,VOLUME,PUBLISHER,TITLE,FILE,PATH,UPDATED,PRIMARY KEY(CVDB))
try:
query = "SELECT UPDATED FROM COMICS WHERE CVDB = '" + str(CVDB) + "';"
savedmodtime = conn.execute(query).fetchone()[0]
except:
savedmodtime = 0
if savedmodtime < filemodtime:
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, YEAR, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?,?)", (CVDB, ISSUE, SERIES, VOLUME, YEAR, PUBLISHER, TITLE, file, f, UPDATED))
conn.commit()
config._print("Adding: " + str(CVDB))
importcount = importcount + 1
elif Path(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg").exists() == False:
cover = s.open(filelist[1]).read()
c = open(config.THUMBNAIL_DIR + "/" + str(CVDB) + ".jpg", 'wb+')
c.write(cover)
c.close()
coverscount = coverscount + 1
else:
config._print("Skipping: " + f)
skippedcount = skippedcount + 1
except Exception as e:
errorcount = errorcount + 1
comics_with_errors.append(f)
config._print(e)
config._print(comics_with_errors)
conn.close() conn.close()
return "yay" elapsed = timeit.default_timer() - start_time
elapsed_time = "IMPORTED IN: " + str(round(elapsed,2)) + "s"
import_stats = elapsed_time + "<br>Comics: " + str(comiccount) + "<br>Imported: " + str(importcount) + "<br>Covers: " + str(coverscount) + "<br>Skipped: " + str(skippedcount) + "<br>Errors: " + str(errorcount)
return import_stats #+ "<br>" + ['<li>' + x + '</li>' for x in comics_with_errors]
@app.route("/content/<path:path>") @app.route("/content/<path:path>")
@auth.login_required @auth.login_required
def send_content(path): def send_content(path):
#print('content')
return send_from_directory(config.CONTENT_BASE_DIR, path) return send_from_directory(config.CONTENT_BASE_DIR, path)
@app.route("/image/<path:path>")
def image(path):
return send_from_directory(config.THUMBNAIL_DIR,path)
@app.route("/catalog") @app.route("/catalog")
@app.route("/catalog/")
@app.route("/catalog/<path:path>") @app.route("/catalog/<path:path>")
@auth.login_required @auth.login_required
def catalog(path=""): def catalog(path=""):
start_time = timeit.default_timer() config._print("path: " + path)
print(request.root_url) config._print("root_url: " + request.root_url)
config._print("url: " + request.url)
config._print("CONTENT_BASE_DIR: " + config.CONTENT_BASE_DIR)
#print("PRESSED ON")
#start_time = timeit.default_timer()
#print(request.root_url)
c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path) c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path)
elapsed = timeit.default_timer() - start_time #print("c: ")
print(elapsed) #pprint(vars(c))
#for x in c.entries:
# for y in x.links:
# pprint(y.href)
#print("------")
#elapsed = timeit.default_timer() - start_time
#print("-----------------------------------------------------------------------------------------------------------------------")
#print("RENDERED IN: " + str(round(elapsed,2))+"s")
return c.render() return c.render()

View File

@ -5,7 +5,8 @@ from jinja2 import Environment, FileSystemLoader, select_autoescape
from .entry import Entry from .entry import Entry
from .link import Link from .link import Link
import sqlite3,json import sqlite3,json
import config
import extras
class Catalog(object): class Catalog(object):
def __init__( def __init__(
@ -48,14 +49,21 @@ def fromsearch(root_url, url, content_base_path, content_relative_path):
def fromdir(root_url, url, content_base_path, content_relative_path): def fromdir(root_url, url, content_base_path, content_relative_path):
path = os.path.join(content_base_path, content_relative_path) path = os.path.join(content_base_path, content_relative_path)
#print(path) if os.path.basename(content_relative_path) == "":
c = Catalog( c = Catalog(
title=os.path.basename(os.path.dirname(path)), root_url=root_url, url=url title="Comics",
root_url=root_url,
url=url
) )
#print(c.url) else:
c = Catalog(
title=extras.xmlesc(os.path.basename(content_relative_path)),
root_url=root_url,
url=url
)
#title=os.path.basename(os.path.dirname(path)), root_url=root_url, url=url
##########WORKING AREA########### ##########WORKING AREA###########
searchArr=[] searchArr=[]
@ -72,19 +80,23 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
###################### ######################
if not "search" in c.url: if not "search" in c.url:
onlydirs = [ onlydirs = [
f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f)) f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))
] ]
#print(onlydirs) onlydirs.sort()
print(onlydirs)
for dirname in onlydirs: for dirname in onlydirs:
print(dirname)
link = Link( link = Link(
href=quote(f"/catalog/{content_relative_path}/{dirname}"), href=quote(f"/catalog/{content_relative_path}/{dirname}").replace('//','/'), #windows fix
rel="subsection", rel="subsection",
rpath=path, rpath=path,
type="application/atom+xml;profile=opds-catalog;kind=acquisition", type="application/atom+xml;profile=opds-catalog;kind=acquisition",
) )
c.add_entry(Entry(title=dirname, id=uuid4(), links=[link])) c.add_entry(Entry(title=extras.xmlesc(dirname), id=uuid4(), links=[link]))
if c.url.endswith("/catalog"): if c.url.endswith("/catalog"):
@ -97,86 +109,120 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
rpath=path, rpath=path,
type="application/atom+xml;profile=opds-catalog;kind=acquisition", type="application/atom+xml;profile=opds-catalog;kind=acquisition",
) )
c.add_entry(Entry(title="Search["+i+"]",id=uuid4(),links=[link2])) c.add_entry(Entry(title="["+i+"]",id=uuid4(),links=[link2]))
if not "search" in c.url: if not "search" in c.url:
onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
#print(onlyfiles) onlyfiles.sort()
for filename in onlyfiles: for filename in onlyfiles:
if not filename.endswith('cbz'):
continue
link = Link( link = Link(
href=quote(f"/content/{content_relative_path}/{filename}"), href=quote(f"/content/{content_relative_path}/{filename}"),
rel="http://opds-spec.org/acquisition", rel="http://opds-spec.org/acquisition",
rpath=path, rpath=path,
type=mimetype(filename), type=mimetype(filename),
) )
c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
#c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
c.add_entry(Entry(title=extras.xmlesc(filename).rsplit(".",1)[0], id=uuid4(), links=[link]))
#fixed issue with multiple . in filename #fixed issue with multiple . in filename
#print(c.render()) #print(c.render())
else: else:
with open('test.json') as fi: with open('test.json') as fi:
data=json.load(fi) data=json.load(fi)
print("--> LOADED 2 FILE") # try and get this as low as possible. config._print("--> LOADED 2 FILE") # try and get this as low as possible.
for e in data: for e in data:
for key, value in e.items(): for key, value in e.items():
print(key) config._print(key)
searchArr.append(key) searchArr.append(key)
for i in searchArr: for i in searchArr:
print(i) config._print("i (in searchArr): " + i)
config._print("quote i: " + quote(f""+i))
if quote(f""+i) in c.url: if quote(f""+i) in c.url:
conn = sqlite3.connect('app.db') conn = sqlite3.connect('app.db')
print(data)
for e in data: for e in data:
config._print("e (in data): " + str(e))
for key, value in e.items(): for key, value in e.items():
print(key) config._print("key: " + key)
if key == i: if key == i:
config._print("key <" + str(key) + "> matches <" + str(i) + ">")
query="SELECT * FROM COMICS where " query="SELECT * FROM COMICS where "
for i in value: for h in value:
first=True first=True
for j,k in i.items(): for j,k in h.items():
if j == 'SQL': if j == 'SQL':
query = query + k query = query + k
if k != '' and j != "SQL": if k != '' and j != "SQL":
# print(j,k) config._print(j)
if not first: config._print(k)
config._print(query)
if not first and j != 'limit':
query = query + "and " query = query + "and "
config._print(query)
if type(k) == list: if type(k) == list:
# print(k) config._print(k)
if j == "series" or j == "title": if j == "series" or j == "title":
firstS = True firstS = True
query = query + "(" query = query + "("
config._print(query)
for l in k: for l in k:
if not firstS: if not firstS:
query = query + "or " query = query + "or "
config._print(query)
query = query + j + " like '%" + l + "%' " query = query + j + " like '%" + l + "%' "
config._print(query)
if firstS: if firstS:
firstS = False firstS = False
query = query + ") " query = query + ") "
config._print(query)
else: else:
query = query + j + " in (" query = query + j + " in ("
config._print(query)
firstL = True firstL = True
for l in k: for l in k:
if not firstL: if not firstL:
query = query + "," query = query + ","
query = query + "'" + l + "'" config._print(query)
query = query + "'" + str(l) + "'"
config._print(query)
if firstL: if firstL:
firstL = False firstL = False
query = query + ") " query = query + ") "
config._print(query)
elif j != 'limit':
query = query + j + " like '%" + str(k) + "%' "
config._print(query)
elif j == 'limit':
config.DEFAULT_SEARCH_NUMBER = k
else: else:
query = query + j + " like '%" + k + "%' " print(">>>>>>>>>>>ERROR THIS SHOULD NOT HAPPEN<<<<<<<<<<<")
if first: if first:
first = False first = False
query = query + " order by series asc, cast(issue as unsigned) asc "
if config.DEFAULT_SEARCH_NUMBER != 0:
query = query + "LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";"
else:
query = query + ";" query = query + ";"
print("----> " + query) break
else:
config._print("key <" + str(key) + "> DOES NOT match <" + str(i) + ">")
config._print("----> " + query)
sql = query sql = query
#sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';" #sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';"
print(sql) #config._print(sql)
s = conn.execute(sql) s = conn.execute(sql)
#list=[] #list=[]
for r in s: for r in s:
#print(r) #config._print(r)
tUrl=f""+r[7].replace("/home/drudoo/ComicsTest/Comics/","/content/") tUrl=f""+r[7].replace('\\','/').replace(config.WIN_DRIVE_LETTER + ':','').replace(config.CONTENT_BASE_DIR,"/content")
#config._print(tUrl)
tTitle=r[6] tTitle=r[6]
link3 = Link( link3 = Link(
#href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"), #href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"),
@ -185,6 +231,7 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
rpath=path, rpath=path,
type="application/x-cbz", type="application/x-cbz",
) )
#config._print(link3.href)
c.add_entry( c.add_entry(
Entry( Entry(
title=tTitle, title=tTitle,
@ -192,12 +239,12 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
links=[link3] links=[link3]
) )
) )
#print(c.title)
return c return c
def mimetype(path): def mimetype(path):
extension = path.split(".")[-1].lower() extension = path.split(".")[-1].lower()
if extension == "pdf": if extension == "pdf":

View File

@ -1,6 +1,10 @@
import zipfile import zipfile
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import os import os
import re
import extras
import config
class Entry(object): class Entry(object):
valid_keys = ( valid_keys = (
@ -23,7 +27,10 @@ class Entry(object):
"oai_updatedates", "oai_updatedates",
"authors", "authors",
"formats", "formats",
"size",
"links", "links",
"cover",
"covertype"
) )
required_keys = ("id", "title", "links") required_keys = ("id", "title", "links")
@ -46,29 +53,57 @@ class Entry(object):
#print(">>entry.py") #print(">>entry.py")
#print(kwargs) #print(kwargs)
print(kwargs["title"])
#print(kwargs["links"][0].get("rpath")) #print(kwargs["links"][0].get("rpath"))
#print("--end entry.py") #print("--end entry.py")
try:
if kwargs["links"][0].get("type") == 'application/x-cbz': if kwargs["links"][0].get("type") == 'application/x-cbz':
f=self.links[0].get("rpath")+"/"+self.title+".cbz" f=self.links[0].get("rpath")+"/"+self.title+".cbz"
if os.path.exists(f): if os.path.exists(f):
s = zipfile.ZipFile(f) s = zipfile.ZipFile(f)
data=BeautifulSoup(s.open('ComicInfo.xml').read(), "xml") self.size = extras.get_size(f, 'mb')
data=BeautifulSoup(s.open('ComicInfo.xml').read(), features="xml")
#self.cover=s.open('P00001.jpg').read()
if data.select('Writer') != []:
self.authors = data.select('Writer')[0].text.split(",")
else:
config._print("No Writer found: " + str(data.select('Writer')))
self.cover = "/image/" + extras.get_cvdb(data.select('Notes')) + ".jpg"
#if data.select('Title') != []:
# self.title = data.select('Title')[0]
# print(data.select('Title')[0])
title = data.select('Title')[0].text.replace("&","&amp;")
kwargs["title"] = title
print(title)
if data.select('Summary') != []:
#print(data.select('Summary')[0].text)
self.summary = data.select('Summary')[0]
else:
config._print("No Summary found: " + str(data.select('Summary')))
#print(data) #print(data)
#print(kwargs["links"][0]) #print(kwargs["links"][0])
#print(data.select('Series')[0].text) #print(data.select('Series')[0].text)
#print(kwargs["links"][0].get("rpath")) #print(kwargs["links"][0].get("rpath"))
if data.select('Series')[0].text in kwargs["links"][0].get("rpath"): if data.select('Series')[0].text in kwargs["links"][0].get("rpath"):
releasedate=data.select('Year')[0].text+"-"+data.select('Month')[0].text.zfill(2)+"-"+data.select('Day')[0].text.zfill(2) releasedate=data.select('Year')[0].text+"-"+data.select('Month')[0].text.zfill(2)+"-"+data.select('Day')[0].text.zfill(2)
self.title = "#"+data.select('Number')[0].text.zfill(2) + ": " + data.select('Title')[0].text + " (" + releasedate + ")" try:
self.title = "#"+data.select('Number')[0].text.zfill(2) + ": " + title + " (" + releasedate + ") [" + str(self.size) + "MB]"
except:
self.title = "#"+data.select('Number')[0].text.zfill(2) + " (" + releasedate + ") [" + str(self.size) + "MB]"
#print(self.title) #print(self.title)
else: else:
self.title = kwargs["title"] self.title = title
else: else:
self.title = kwargs["title"] self.title = kwargs["title"]
#self.title = data.select('Title')[0].text #self.title = data.select('Title')[0].text
except Exception as e:
config._print(e)
def get(self, key): def get(self, key):
return self._data.get(key, None) return self._data.get(key, None)

237
opds/search.py Normal file
View File

@ -0,0 +1,237 @@
import os
from uuid import uuid4
from urllib.parse import quote
from jinja2 import Environment, FileSystemLoader, select_autoescape
from .entry import Entry
from .link import Link
import sqlite3,json
import config
import extras
class Search(object):
def __init__(
self,
title,
):
self.title = title
def render(self):
env = Environment(
loader=FileSystemLoader(
searchpath=os.path.join(os.path.dirname(__file__), "templates")
),
autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("catalog.opds.jinja2")
return template.render(catalog=self)
def fromdir(root_url, url, content_base_path, content_relative_path):
path = os.path.join(content_base_path, content_relative_path)
if os.path.basename(content_relative_path) == "":
c = Catalog(
title="Comics",
root_url=root_url,
url=url
)
else:
c = Catalog(
title=extras.xmlesc(os.path.basename(content_relative_path)),
root_url=root_url,
url=url
)
#title=os.path.basename(os.path.dirname(path)), root_url=root_url, url=url
##########WORKING AREA###########
searchArr=[]
if c.url.endswith("/catalog"):
with open('test.json') as fi:
data=json.load(fi)
print("--> LOADED FILE") # try and get this as low as possible.
#searchArr=["Girl","Bat","Part One"]
for e in data:
for key, value in e.items():
searchArr.append(key)
print(searchArr)
######################
if not "search" in c.url:
onlydirs = [
f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))
]
onlydirs.sort()
print(onlydirs)
for dirname in onlydirs:
print(dirname)
link = Link(
href=quote(f"/catalog/{content_relative_path}/{dirname}").replace('//','/'), #windows fix
rel="subsection",
rpath=path,
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
)
c.add_entry(Entry(title=extras.xmlesc(dirname), id=uuid4(), links=[link]))
if c.url.endswith("/catalog"):
for i in searchArr:
link2 = Link(
href=quote(f"/catalog/search["+i+"]"),
rel="subsection",
rpath=path,
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
)
c.add_entry(Entry(title="["+i+"]",id=uuid4(),links=[link2]))
if not "search" in c.url:
onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
onlyfiles.sort()
for filename in onlyfiles:
if not filename.endswith('cbz'):
continue
link = Link(
href=quote(f"/content/{content_relative_path}/{filename}"),
rel="http://opds-spec.org/acquisition",
rpath=path,
type=mimetype(filename),
)
#c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
c.add_entry(Entry(title=extras.xmlesc(filename).rsplit(".",1)[0], id=uuid4(), links=[link]))
#fixed issue with multiple . in filename
#print(c.render())
else:
with open('test.json') as fi:
data=json.load(fi)
config._print("--> LOADED 2 FILE") # try and get this as low as possible.
for e in data:
for key, value in e.items():
config._print(key)
searchArr.append(key)
for i in searchArr:
config._print("i (in searchArr): " + i)
config._print("quote i: " + quote(f""+i))
if quote(f""+i) in c.url:
conn = sqlite3.connect('app.db')
for e in data:
config._print("e (in data): " + str(e))
for key, value in e.items():
config._print("key: " + key)
if key == i:
config._print("key <" + str(key) + "> matches <" + str(i) + ">")
query="SELECT * FROM COMICS where "
for h in value:
first=True
for j,k in h.items():
if j == 'SQL':
query = query + k
if k != '' and j != "SQL":
config._print(j)
config._print(k)
config._print(query)
if not first and j != 'limit':
query = query + "and "
config._print(query)
if type(k) == list:
config._print(k)
if j == "series" or j == "title":
firstS = True
query = query + "("
config._print(query)
for l in k:
if not firstS:
query = query + "or "
config._print(query)
query = query + j + " like '%" + l + "%' "
config._print(query)
if firstS:
firstS = False
query = query + ") "
config._print(query)
else:
query = query + j + " in ("
config._print(query)
firstL = True
for l in k:
if not firstL:
query = query + ","
config._print(query)
query = query + "'" + str(l) + "'"
config._print(query)
if firstL:
firstL = False
query = query + ") "
config._print(query)
elif j != 'limit':
query = query + j + " like '%" + str(k) + "%' "
config._print(query)
elif j == 'limit':
config.DEFAULT_SEARCH_NUMBER = k
else:
print(">>>>>>>>>>>ERROR THIS SHOULD NOT HAPPEN<<<<<<<<<<<")
if first:
first = False
query = query + " order by series asc, cast(issue as unsigned) asc "
if config.DEFAULT_SEARCH_NUMBER != 0:
query = query + "LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";"
else:
query = query + ";"
break
else:
config._print("key <" + str(key) + "> DOES NOT match <" + str(i) + ">")
config._print("----> " + query)
sql = query
#sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';"
#config._print(sql)
s = conn.execute(sql)
#list=[]
for r in s:
#config._print(r)
tUrl=f""+r[7].replace('\\','/').replace(config.WIN_DRIVE_LETTER + ':','').replace(config.CONTENT_BASE_DIR,"/content")
#config._print(tUrl)
tTitle=r[6]
link3 = Link(
#href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"),
href=quote(tUrl),
rel="http://opds-spec.org/acquisition",
rpath=path,
type="application/x-cbz",
)
#config._print(link3.href)
c.add_entry(
Entry(
title=tTitle,
id=uuid4(),
links=[link3]
)
)
#print(c.title)
return c
def mimetype(path):
extension = path.split(".")[-1].lower()
if extension == "pdf":
return "application/pdf"
elif extension == "epub":
return "application/epub"
elif extension == "mobi":
return "application/mobi"
elif extension == "cbz":
return "application/x-cbz"
else:
return "application/unknown"

View File

@ -27,7 +27,19 @@
<entry> <entry>
<title>{{ entry.title }}</title> <title>{{ entry.title }}</title>
<id>{{ entry.id }}</id> <id>{{ entry.id }}</id>
<summary type="text">{{ entry.summary }}</summary>
{% for author in entry.authors %}
<author>
<name>{{ author }}</name>
</author>
{% endfor %}
{% if entry.updated %} <updated>{{ entry.updated }}</updated> {% endif %} {% if entry.updated %} <updated>{{ entry.updated }}</updated> {% endif %}
<link rel="http://opds-spec.org/image"
href="{{ entry.cover }}"
type="image/jpg"/>
<link rel="http://opds-spec.org/image/thumbnail"
href="{{ entry.cover }}"
type="image/jpg"/>
{% for link in entry.links %} {% for link in entry.links %}
<link rel="{{ link.rel }}" <link rel="{{ link.rel }}"
href="{{ link.href }}" href="{{ link.href }}"

View File

@ -1,77 +0,0 @@
[
{
"SQL TEST": [
{
"SQL": "(series like '%Aqua%' or series like '%girl%') and issue in ('1','2','5','10') and title not like '%Annual%'"
}
]
},{
"Man 2020,2019": [
{
"title": "Man",
"volume": [
"2020",
"2019"
],
"publisher": "",
"series": "",
"issue": ""
}
]
},
{
"DC (BAT)": [
{
"title": "",
"volume": "",
"publisher": "DC Comics",
"series": "Bat",
"issue": ""
}
]
},{
"Marvel": [
{
"title": "",
"volume": "",
"publisher": "marvel",
"series": "",
"issue": ""
}
]
},
{
"Girl": [
{
"title": ["girl","man","World"],
"volume": "",
"publisher": "",
"series": "girl",
"issue": ""
}
]
},
{
"Aquaman": [
{
"title": "",
"volume": "",
"publisher": "",
"series": "aquaman",
"issue": ["2","3","5","10","22"]
}
]
}
,
{
"Girl series": [
{
"title": "",
"volume": "",
"publisher": "",
"series": "girl",
"issue": "2"
}
]
}
]

View File

@ -1,7 +1,10 @@
Flask==2.0.2 Flask==2.0.2
Werkzeug==2.2.2
numpy
Jinja2==3.0.2 Jinja2==3.0.2
requests==2.26.0 requests==2.26.0
Flask-HTTPAuth==4.5.0 Flask-HTTPAuth==4.5.0
gevent==21.8.0 gevent==21.8.0
bs4 bs4
lxml lxml
Pillow

14
templates/first.html Normal file
View File

@ -0,0 +1,14 @@
<html>
<body>
<form method="post" action="/">
<input type="submit" value="Encrypt" name="Encrypt"/>
<input type="submit" value="Decrypt" name="Decrypt" />
</form>
</body>
</html>
<p>{{ result }}</p>

91
templates/start.html Normal file
View File

@ -0,0 +1,91 @@
<html>
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.5.0/Chart.min.js"></script>
<body>
{% if first and request.args.get('first') == None %}
<form method="post">
<p>DB is missing table. <input type="submit" value="Create" name="Create"/>
</form>
{% endif %}
{% if result == [] %}
<form method="post">
<p>No comics imported. <input type="submit" value="Import" name="Import"/>
</form>
{% endif %}
{% if total != covers %}
<form method="post">
<p>Some covers missing <input type="submit" value="Generate" name="Generate"/>
</form>
{% endif %}
<h1>Total Comics: {{ total }}</h1>
<canvas id="myChart" style="width:100%;max-width:600px"></canvas>
<script>
var xValues = {{ pub_list | safe }};
var yValues = {{ count }};
var barColors = ["red", "green","blue","orange", "purple"];
new Chart("myChart", {
type: "bar",
data: {
labels: xValues,
datasets: [{
backgroundColor: barColors,
data: yValues
}]
},
options: {
legend: {display: false},
title: {
display: true,
text: "Publishers"
}
}
});
</script>
<canvas id="myChart3" style="width:100%;max-width:600px"></canvas>
<script>
var xValues = {{ x | safe }};
var yValues = {{ y | safe }};
new Chart("myChart3", {
type: "line",
data: {
labels: xValues,
datasets: [{
fill: false,
backgroundColor: "rgba(0,0,255,1.0)",
borderColor: "rgba(0,0,255,0.1)",
data: yValues
}]
},
options: {
legend: {display: false},
}
});
</script>
<table id="comics">
{% for i in result %}
<tr>
{% for j in range(0,9) %}
<td>{{ i[j] }}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
</body>
</html>

75
templates/status.html Normal file
View File

@ -0,0 +1,75 @@
<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<style>
body {
background-color: #D64F2A;
}
.progress {
display: flex;
position: absolute;
height: 100%;
width: 100%;
}
.status {
color: white;
margin: auto;
}
.status h2 {
padding: 50px;
font-size: 80px;
font-weight: bold;
}
</style>
<title>Status Update</title>
</head>
<body>
<div class="progress">
<div class="status">
<h2 id="innerStatus">Loading...</h2>
</div>
</div>
</body>
<script>
var timeout;
async function getStatus() {
let get;
try {
const res = await fetch("/status");
get = await res.json();
} catch (e) {
console.error("Error: ", e);
}
document.getElementById("innerStatus").innerHTML = Math.round(get.status / get.total * 100,0) + "&percnt;";
if (get.status == get.total){
document.getElementById("innerStatus").innerHTML += " Done.";
clearTimeout(timeout);
// Simulate a mouse click:
window.location.href = "/";
return false;
}
timeout = setTimeout(getStatus, 1000);
}
getStatus();
</script>
</html>

View File

@ -1,12 +1,24 @@
[ [
{ {
"SQL TEST": [ "Amazons": [
{ {
"SQL": "(series like '%Aqua%' or series like '%girl%') and issue in ('1','2','5','10') and title not like '%Annual%'" "SQL": "(series = 'Nubia & the Amazons' and issue in ('1','2','3','4','5','6')) or (series like 'Trial of the Amazons%' and issue in ('1','2')) or (series = 'Wonder Woman' and issue in ('785','786','787'))"
} }
] ]
},{ },
"Man 2020,2019": [ {
"Letter 44": [
{
"title": "",
"volume": "",
"publisher": "",
"series": "Letter 44",
"issue": ""
}
]
},
{
"Man 2020 or 2019": [
{ {
"title": "Man", "title": "Man",
"volume": [ "volume": [
@ -20,7 +32,7 @@
] ]
}, },
{ {
"DC (BAT)": [ "DC BAT": [
{ {
"title": "", "title": "",
"volume": "", "volume": "",
@ -29,7 +41,8 @@
"issue": "" "issue": ""
} }
] ]
},{ },
{
"Marvel": [ "Marvel": [
{ {
"title": "", "title": "",
@ -43,7 +56,11 @@
{ {
"Girl": [ "Girl": [
{ {
"title": ["girl","man","World"], "title": [
"girl",
"man",
"World"
],
"volume": "", "volume": "",
"publisher": "", "publisher": "",
"series": "girl", "series": "girl",
@ -52,25 +69,61 @@
] ]
}, },
{ {
"Aquaman": [ "number 1": [
{ {
"title": "", "title": "",
"volume": "", "volume": "",
"publisher": "", "publisher": "",
"series": "aquaman", "series": "",
"issue": ["2","3","5","10","22"] "issue": [
} "1"
] ]
} }
, ]
},
{ {
"Girl series": [ "Aquaman": [
{ {
"title": "", "title": [
"Tyrant King",
"The Deluge Act Three",
"Warhead Part One",
"Black Mantra"
],
"volume": "", "volume": "",
"publisher": "", "publisher": "",
"series": "girl", "series": "",
"issue": "2" "issue": ""
}
]
},
{
"2020-2022 DC Comics": [
{
"title": "",
"volume": [
"2020",
"2022"
],
"publisher": "DC Comics",
"series": [
"Batman",
"Detective Comics"
],
"issue": "",
"limit": 50
}
]
},
{
"New Series 2023": [
{
"title": "",
"volume": "2023",
"publisher": "",
"series": "",
"issue": "1",
"limit": 30
} }
] ]
} }