Compare commits
3 Commits
89e68e3f8e
...
e8e2ef20d5
Author | SHA1 | Date | |
---|---|---|---|
e8e2ef20d5 | |||
34f734d399 | |||
f6acbfa3e4 |
@ -4,9 +4,18 @@ from werkzeug.security import generate_password_hash
|
||||
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/library") #docker
|
||||
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/home/drudoo/ComicsTest/Comics") #linux
|
||||
CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "/Comics/ComicRack") #windows
|
||||
#CONTENT_BASE_DIR = os.getenv("CONTENT_BASE_DIR", "testlibrary") #windows test library
|
||||
|
||||
THUMBNAIL_DIR = os.getenv("THUMBNAIL_DIR",'thumbnails')
|
||||
|
||||
WIN_DRIVE_LETTER = 'B'
|
||||
DEFAULT_SEARCH_NUMBER = 10
|
||||
|
||||
DEBUG = True
|
||||
|
||||
def _print(arg):
|
||||
if DEBUG:
|
||||
print(arg)
|
||||
|
||||
TEENYOPDS_ADMIN_PASSWORD = os.getenv("TEENYOPDS_ADMIN_PASSWORD", None)
|
||||
users = {}
|
||||
|
113
main.py
113
main.py
@ -34,7 +34,7 @@ def startpage():
|
||||
#result = "Hello, World!"
|
||||
conn = sqlite3.connect('app.db')
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("select * from comics;")
|
||||
cursor.execute("select * from comics LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";")
|
||||
result = cursor.fetchall()
|
||||
conn.close()
|
||||
return render_template("start.html", result=result)
|
||||
@ -61,58 +61,72 @@ def import2sql():
|
||||
try:
|
||||
comiccount = comiccount + 1
|
||||
s = zipfile.ZipFile(f)
|
||||
filemodtime = os.path.getmtime(f)
|
||||
#s = gzip.GzipFile(f)
|
||||
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#print(Bs_data.select('Series')[0].text, file=sys.stderr)
|
||||
#print(Bs_data.select('Title')[0].text, file=sys.stderr)
|
||||
CVDB=re.findall('(?<=\[CVDB)(.*)(?=].)', Bs_data.select('Notes')[0].text)
|
||||
#list.append('CVDB'+CVDB[0] + ': ' + Bs_data.select('Series')[0].text + "(" + Bs_data.select('Volume')[0].text + ") : " + Bs_data.select('Number')[0].text )
|
||||
#print(list, file=sys.stdout)
|
||||
filelist = zipfile.ZipFile.namelist(s)
|
||||
if filelist[0] == 'ComicInfo.xml':
|
||||
filemodtime = os.path.getmtime(f)
|
||||
#s = gzip.GzipFile(f)
|
||||
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
|
||||
ISSUE=Bs_data.select('Number')[0].text
|
||||
SERIES=Bs_data.select('Series')[0].text
|
||||
VOLUME=Bs_data.select('Volume')[0].text
|
||||
PUBLISHER=Bs_data.select('Publisher')[0].text
|
||||
try:
|
||||
TITLE=Bs_data.select('Title')[0].text
|
||||
except:
|
||||
TITLE="" #sometimes title is blank.
|
||||
PATH=f
|
||||
UPDATED=filemodtime
|
||||
#print(UPDATED,file=sys.stdout)
|
||||
#sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
|
||||
#print(sql,file=sys.stdout)
|
||||
#conn.execute(sql);
|
||||
|
||||
# CREATE TABLE IF MISSING
|
||||
# create table COMICS (CVDB, ISSUE, SERIES,VOLUME,PUBLISHER,TITLE,FILE,PATH,UPDATED,PRIMARY KEY(CVDB))
|
||||
try:
|
||||
query = "SELECT UPDATED FROM COMICS WHERE CVDB = '" + str(CVDB[0]) + "';"
|
||||
savedmodtime = conn.execute(query).fetchone()[0]
|
||||
except:
|
||||
savedmodtime = 0
|
||||
#print(savedmodtime)
|
||||
#print(float(savedmodtime))
|
||||
#print(type(savedmodtime))
|
||||
#print(type(filemodtime))
|
||||
if savedmodtime < filemodtime:
|
||||
#print(str(savedmodtime) + " is less than " + str(filemodtime))
|
||||
|
||||
#print(str(CVDB[0]) + " - s: " + str(savedmodtime))
|
||||
#print(str(CVDB[0]) + " - f: " + str(filemodtime))
|
||||
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?)", (CVDB[0], ISSUE, SERIES, VOLUME, PUBLISHER, TITLE, file, f, UPDATED))
|
||||
conn.commit()
|
||||
#print("Adding: " + str(CVDB[0]))
|
||||
importcount = importcount + 1
|
||||
else:
|
||||
# print("Skipping: " + str(CVDB[0]))
|
||||
skippedcount = skippedcount + 1
|
||||
except:
|
||||
#print(Bs_data.select('Series')[0].text, file=sys.stderr)
|
||||
#print(Bs_data.select('Title')[0].text, file=sys.stderr)
|
||||
CVDB=re.findall('(?<=\[CVDB)(.*)(?=].)', Bs_data.select('Notes')[0].text)
|
||||
|
||||
|
||||
#list.append('CVDB'+CVDB[0] + ': ' + Bs_data.select('Series')[0].text + "(" + Bs_data.select('Volume')[0].text + ") : " + Bs_data.select('Number')[0].text )
|
||||
#print(list, file=sys.stdout)
|
||||
|
||||
ISSUE=Bs_data.select('Number')[0].text
|
||||
SERIES=Bs_data.select('Series')[0].text
|
||||
VOLUME=Bs_data.select('Volume')[0].text
|
||||
PUBLISHER=Bs_data.select('Publisher')[0].text
|
||||
try:
|
||||
TITLE=Bs_data.select('Title')[0].text
|
||||
except:
|
||||
TITLE="" #sometimes title is blank.
|
||||
PATH=f
|
||||
UPDATED=filemodtime
|
||||
#print(UPDATED,file=sys.stdout)
|
||||
#sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
|
||||
#print(sql,file=sys.stdout)
|
||||
#conn.execute(sql);
|
||||
|
||||
# CREATE TABLE IF MISSING
|
||||
# create table COMICS (CVDB, ISSUE, SERIES,VOLUME,PUBLISHER,TITLE,FILE,PATH,UPDATED,PRIMARY KEY(CVDB))
|
||||
try:
|
||||
query = "SELECT UPDATED FROM COMICS WHERE CVDB = '" + str(CVDB[0]) + "';"
|
||||
savedmodtime = conn.execute(query).fetchone()[0]
|
||||
except:
|
||||
savedmodtime = 0
|
||||
#print(savedmodtime)
|
||||
#print(float(savedmodtime))
|
||||
#print(type(savedmodtime))
|
||||
#print(type(filemodtime))
|
||||
if savedmodtime < filemodtime:
|
||||
#print(str(savedmodtime) + " is less than " + str(filemodtime))
|
||||
|
||||
#print(str(CVDB[0]) + " - s: " + str(savedmodtime))
|
||||
#print(str(CVDB[0]) + " - f: " + str(filemodtime))
|
||||
|
||||
cover = s.open(filelist[1]).read()
|
||||
c = open(config.THUMBNAIL_DIR + "/" + str(CVDB[0]) + ".jpg", 'wb+')
|
||||
c.write(cover)
|
||||
c.close()
|
||||
|
||||
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?)", (CVDB[0], ISSUE, SERIES, VOLUME, PUBLISHER, TITLE, file, f, UPDATED))
|
||||
conn.commit()
|
||||
#print("Adding: " + str(CVDB[0]))
|
||||
importcount = importcount + 1
|
||||
else:
|
||||
# print("Skipping: " + str(CVDB[0]))
|
||||
skippedcount = skippedcount + 1
|
||||
except Exception as e:
|
||||
errorcount = errorcount + 1
|
||||
comics_with_errors.append(f)
|
||||
print(e)
|
||||
#print(f,file=sys.stdout)
|
||||
|
||||
print(comics_with_errors)
|
||||
conn.close()
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
elapsed_time = "IMPORTED IN: " + str(round(elapsed,2)) + "s"
|
||||
@ -125,6 +139,10 @@ def send_content(path):
|
||||
print('content')
|
||||
return send_from_directory(config.CONTENT_BASE_DIR, path)
|
||||
|
||||
@app.route("/image/<path:path>")
|
||||
def image(path):
|
||||
return send_from_directory(config.THUMBNAIL_DIR,path)
|
||||
|
||||
@app.route("/catalog")
|
||||
@app.route("/catalog/")
|
||||
@app.route("/catalog/<path:path>")
|
||||
@ -135,6 +153,7 @@ def catalog(path=""):
|
||||
#print(request.root_url)
|
||||
c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path)
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
print("-----------------------------------------------------------------------------------------------------------------------")
|
||||
print("RENDERED IN: " + str(round(elapsed,2))+"s")
|
||||
|
||||
return c.render()
|
||||
|
@ -125,73 +125,97 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
else:
|
||||
with open('test.json') as fi:
|
||||
data=json.load(fi)
|
||||
print("--> LOADED 2 FILE") # try and get this as low as possible.
|
||||
config._print("--> LOADED 2 FILE") # try and get this as low as possible.
|
||||
for e in data:
|
||||
for key, value in e.items():
|
||||
#print(key)
|
||||
config._print(key)
|
||||
searchArr.append(key)
|
||||
for i in searchArr:
|
||||
#print(i)
|
||||
config._print("i (in searchArr): " + i)
|
||||
config._print("quote i: " + quote(f""+i))
|
||||
if quote(f""+i) in c.url:
|
||||
conn = sqlite3.connect('app.db')
|
||||
#print(data)
|
||||
for e in data:
|
||||
config._print("e (in data): " + str(e))
|
||||
for key, value in e.items():
|
||||
print(key)
|
||||
config._print("key: " + key)
|
||||
if key == i:
|
||||
config._print("key <" + str(key) + "> matches <" + str(i) + ">")
|
||||
query="SELECT * FROM COMICS where "
|
||||
for i in value:
|
||||
for h in value:
|
||||
first=True
|
||||
for j,k in i.items():
|
||||
for j,k in h.items():
|
||||
|
||||
if j == 'SQL':
|
||||
query = query + k
|
||||
if k != '' and j != "SQL":
|
||||
print(j,k)
|
||||
if not first:
|
||||
config._print(j)
|
||||
config._print(k)
|
||||
config._print(query)
|
||||
if not first and j != 'limit':
|
||||
query = query + "and "
|
||||
config._print(query)
|
||||
if type(k) == list:
|
||||
print(k)
|
||||
config._print(k)
|
||||
if j == "series" or j == "title":
|
||||
firstS = True
|
||||
query = query + "("
|
||||
config._print(query)
|
||||
for l in k:
|
||||
if not firstS:
|
||||
query = query + "or "
|
||||
config._print(query)
|
||||
query = query + j + " like '%" + l + "%' "
|
||||
config._print(query)
|
||||
if firstS:
|
||||
firstS = False
|
||||
query = query + ") "
|
||||
config._print(query)
|
||||
else:
|
||||
query = query + j + " in ("
|
||||
config._print(query)
|
||||
firstL = True
|
||||
for l in k:
|
||||
if not firstL:
|
||||
query = query + ","
|
||||
query = query + "'" + l + "'"
|
||||
config._print(query)
|
||||
query = query + "'" + str(l) + "'"
|
||||
config._print(query)
|
||||
if firstL:
|
||||
firstL = False
|
||||
query = query + ") "
|
||||
config._print(query)
|
||||
|
||||
elif j != 'limit':
|
||||
query = query + j + " like '%" + str(k) + "%' "
|
||||
config._print(query)
|
||||
elif j == 'limit':
|
||||
config.DEFAULT_SEARCH_NUMBER = k
|
||||
else:
|
||||
query = query + j + " like '%" + k + "%' "
|
||||
print(">>>>>>>>>>>ERROR THIS SHOULD NOT HAPPEN<<<<<<<<<<<")
|
||||
if first:
|
||||
first = False
|
||||
query = query + " order by series asc, cast(issue as unsigned) asc "
|
||||
if config.DEFAULT_SEARCH_NUMBER != 0:
|
||||
query = query + "LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";"
|
||||
else:
|
||||
query = query + ";"
|
||||
print("----> " + query)
|
||||
|
||||
query = query + " order by series asc, cast(issue as unsigned) asc "
|
||||
if config.DEFAULT_SEARCH_NUMBER != 0:
|
||||
query = query + "LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";"
|
||||
else:
|
||||
query = query + ";"
|
||||
break
|
||||
else:
|
||||
config._print("key <" + str(key) + "> DOES NOT match <" + str(i) + ">")
|
||||
|
||||
config._print("----> " + query)
|
||||
|
||||
sql = query
|
||||
#sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';"
|
||||
#print(sql)
|
||||
#config._print(sql)
|
||||
s = conn.execute(sql)
|
||||
#list=[]
|
||||
for r in s:
|
||||
#print(r)
|
||||
#config._print(r)
|
||||
tUrl=f""+r[7].replace('\\','/').replace(config.WIN_DRIVE_LETTER + ':','').replace(config.CONTENT_BASE_DIR,"/content")
|
||||
print(tUrl)
|
||||
#config._print(tUrl)
|
||||
tTitle=r[6]
|
||||
link3 = Link(
|
||||
#href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"),
|
||||
@ -200,7 +224,7 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
rpath=path,
|
||||
type="application/x-cbz",
|
||||
)
|
||||
print(link3.href)
|
||||
#config._print(link3.href)
|
||||
c.add_entry(
|
||||
Entry(
|
||||
title=tTitle,
|
||||
|
@ -1,8 +1,10 @@
|
||||
import zipfile
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
import re
|
||||
|
||||
from extras import get_size
|
||||
import config
|
||||
|
||||
class Entry(object):
|
||||
valid_keys = (
|
||||
@ -62,7 +64,8 @@ class Entry(object):
|
||||
data=BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#self.cover=s.open('P00001.jpg').read()
|
||||
self.authors = data.select('Writer')[0].text.split(",")
|
||||
print(self.authors)
|
||||
self.cover = "/image/" + re.findall('(?<=\[CVDB)(.*)(?=].)', data.select('Notes')[0].text)[0] + ".jpg"
|
||||
|
||||
#print(data)
|
||||
#print(kwargs["links"][0])
|
||||
#print(data.select('Series')[0].text)
|
||||
|
@ -34,6 +34,9 @@
|
||||
</author>
|
||||
{% endfor %}
|
||||
{% if entry.updated %} <updated>{{ entry.updated }}</updated> {% endif %}
|
||||
<link rel="http://opds-spec.org/image"
|
||||
href="{{ entry.cover }}"
|
||||
type="image/jpg"/>
|
||||
{% for link in entry.links %}
|
||||
<link rel="{{ link.rel }}"
|
||||
href="{{ link.href }}"
|
||||
|
78
test.json
78
test.json
@ -1,11 +1,12 @@
|
||||
[
|
||||
{
|
||||
"SQL TEST": [
|
||||
"Amazons": [
|
||||
{
|
||||
"SQL": "(series like '%Aqua%' or series like '%girl%') and issue in ('1','2','5','10') and title not like '%Annual%'"
|
||||
"SQL": "(series = 'Nubia & the Amazons' and issue in ('1','2','3','4','5','6')) or (series like 'Trial of the Amazons%' and issue in ('1','2')) or (series = 'Wonder Woman' and issue in ('785','786','787'))"
|
||||
}
|
||||
]
|
||||
},{
|
||||
},
|
||||
{
|
||||
"Letter 44": [
|
||||
{
|
||||
"title": "",
|
||||
@ -15,8 +16,9 @@
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
},{
|
||||
"Man 2020,2019": [
|
||||
},
|
||||
{
|
||||
"Man 2020 or 2019": [
|
||||
{
|
||||
"title": "Man",
|
||||
"volume": [
|
||||
@ -28,17 +30,19 @@
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
},{
|
||||
"DC (BAT)": [
|
||||
},
|
||||
{
|
||||
"DC BAT": [
|
||||
{
|
||||
"title": "",
|
||||
"volume": "",
|
||||
"publisher": "DC Comics",
|
||||
"series": "%bat%",
|
||||
"series": "Bat",
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
},{
|
||||
},
|
||||
{
|
||||
"Marvel": [
|
||||
{
|
||||
"title": "",
|
||||
@ -48,48 +52,78 @@
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
},{
|
||||
},
|
||||
{
|
||||
"Girl": [
|
||||
{
|
||||
"title": ["girl","man","World"],
|
||||
"title": [
|
||||
"girl",
|
||||
"man",
|
||||
"World"
|
||||
],
|
||||
"volume": "",
|
||||
"publisher": "",
|
||||
"series": "girl",
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
},{
|
||||
},
|
||||
{
|
||||
"number 1": [
|
||||
{
|
||||
"title": "",
|
||||
"volume": "",
|
||||
"publisher": "",
|
||||
"series": "",
|
||||
"issue": ["1"]
|
||||
"issue": [
|
||||
"1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
,
|
||||
},
|
||||
{
|
||||
"Aquaman": [
|
||||
{
|
||||
"title": ["Tyrant King", "The Deluge Act Three", "Warhead Part One"],
|
||||
"title": [
|
||||
"Tyrant King",
|
||||
"The Deluge Act Three",
|
||||
"Warhead Part One",
|
||||
"Black Mantra"
|
||||
],
|
||||
"volume": "",
|
||||
"publisher": "",
|
||||
"series": "",
|
||||
"issue": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
,
|
||||
},
|
||||
{
|
||||
"Girl series": [
|
||||
"2020-2022 DC Comics": [
|
||||
{
|
||||
"title": "",
|
||||
"volume": "",
|
||||
"volume": [
|
||||
"2020",
|
||||
"2022"
|
||||
],
|
||||
"publisher": "DC Comics",
|
||||
"series": [
|
||||
"Batman",
|
||||
"Detective Comics"
|
||||
],
|
||||
"issue": "",
|
||||
"limit": 50
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"New Series 2023": [
|
||||
{
|
||||
"title": "",
|
||||
"volume": "2023",
|
||||
"publisher": "",
|
||||
"series": "girl",
|
||||
"issue": "2"
|
||||
"series": "",
|
||||
"issue": "1",
|
||||
"limit": 30
|
||||
}
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user