Compare commits
4 Commits
0d38808e16
...
fc3ddd47d4
Author | SHA1 | Date | |
---|---|---|---|
fc3ddd47d4 | |||
8a10fba79f | |||
be7791443a | |||
a1d3be4fed |
11
extras.py
Normal file
11
extras.py
Normal file
@ -0,0 +1,11 @@
|
||||
import os
|
||||
|
||||
def get_size(file_path, unit='bytes'):
|
||||
file_size = os.path.getsize(file_path)
|
||||
exponents_map = {'bytes': 0, 'kb': 1, 'mb': 2, 'gb': 3}
|
||||
if unit not in exponents_map:
|
||||
raise ValueError("Must select from \
|
||||
['bytes', 'kb', 'mb', 'gb']")
|
||||
else:
|
||||
size = file_size / 1024 ** exponents_map[unit]
|
||||
return round(size, 1)
|
39
main.py
39
main.py
@ -19,7 +19,6 @@ import config
|
||||
app = Flask(__name__, static_url_path="", static_folder="static")
|
||||
auth = HTTPBasicAuth()
|
||||
|
||||
|
||||
@auth.verify_password
|
||||
def verify_password(username, password):
|
||||
if not config.TEENYOPDS_ADMIN_PASSWORD:
|
||||
@ -47,6 +46,10 @@ def healthz():
|
||||
def import2sql():
|
||||
conn = sqlite3.connect('app.db')
|
||||
list = []
|
||||
comiccount = 0
|
||||
importcount = 0
|
||||
skippedcount = 0
|
||||
errorcount = 0
|
||||
|
||||
start_time = timeit.default_timer()
|
||||
for root, dirs, files in os.walk(os.path.abspath(config.CONTENT_BASE_DIR)):
|
||||
@ -55,7 +58,9 @@ def import2sql():
|
||||
#try:
|
||||
if f.endswith('.cbz'):
|
||||
try:
|
||||
comiccount = comiccount + 1
|
||||
s = zipfile.ZipFile(f)
|
||||
filemodtime = os.path.getmtime(f)
|
||||
#s = gzip.GzipFile(f)
|
||||
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#print(Bs_data.select('Series')[0].text, file=sys.stderr)
|
||||
@ -73,25 +78,43 @@ def import2sql():
|
||||
except:
|
||||
TITLE="" #sometimes title is blank.
|
||||
PATH=f
|
||||
UPDATED=str(datetime.datetime.now())
|
||||
UPDATED=filemodtime
|
||||
#print(UPDATED,file=sys.stdout)
|
||||
#sql="INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES ("+CVDB[0]+",'"+ISSUE+"','"+SERIES+"','"+VOLUME+"','"+PUBLISHER+"','"+TITLE+"','"+file+"','" + f + "','" + UPDATED + "')"
|
||||
#print(sql,file=sys.stdout)
|
||||
#conn.execute(sql);
|
||||
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?)", (CVDB[0], ISSUE, SERIES, VOLUME, PUBLISHER, TITLE, file, f, UPDATED))
|
||||
conn.commit()
|
||||
query = "SELECT UPDATED FROM COMICS WHERE CVDB = '" + str(CVDB[0]) + "';"
|
||||
savedmodtime = conn.execute(query).fetchone()[0]
|
||||
#print(savedmodtime)
|
||||
#print(float(savedmodtime))
|
||||
#print(type(savedmodtime))
|
||||
#print(type(filemodtime))
|
||||
if savedmodtime < filemodtime:
|
||||
#print(str(savedmodtime) + " is less than " + str(filemodtime))
|
||||
|
||||
print(str(CVDB[0]) + " - s: " + str(savedmodtime))
|
||||
print(str(CVDB[0]) + " - f: " + str(filemodtime))
|
||||
conn.execute("INSERT OR REPLACE INTO COMICS (CVDB,ISSUE,SERIES,VOLUME, PUBLISHER, TITLE, FILE,PATH,UPDATED) VALUES (?,?,?,?,?,?,?,?,?)", (CVDB[0], ISSUE, SERIES, VOLUME, PUBLISHER, TITLE, file, f, UPDATED))
|
||||
conn.commit()
|
||||
print("Adding: " + str(CVDB[0]))
|
||||
importcount = importcount + 1
|
||||
else:
|
||||
# print("Skipping: " + str(CVDB[0]))
|
||||
skippedcount = skippedcount + 1
|
||||
except:
|
||||
errorcount = errorcount + 1
|
||||
print(f,file=sys.stdout)
|
||||
|
||||
conn.close()
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
print(elapsed)
|
||||
|
||||
return str(elapsed)
|
||||
elapsed_time = "IMPORTED IN: " + str(round(elapsed,2)) + "s"
|
||||
print(elapsed_time)
|
||||
return elapsed_time + "<br>Comics: " + str(comiccount) + "<br>Imported: " + str(importcount) + "<br>Skipped: " + str(skippedcount) + "<br>Errors: " + str(errorcount)
|
||||
|
||||
@app.route("/content/<path:path>")
|
||||
@auth.login_required
|
||||
def send_content(path):
|
||||
print('content')
|
||||
return send_from_directory(config.CONTENT_BASE_DIR, path)
|
||||
|
||||
@app.route("/catalog")
|
||||
@ -103,7 +126,7 @@ def catalog(path=""):
|
||||
#print(request.root_url)
|
||||
c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path)
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
#print("RENDERED IN: " + str(elapsed))
|
||||
print("RENDERED IN: " + str(round(elapsed,2))+"s")
|
||||
|
||||
return c.render()
|
||||
|
||||
|
@ -126,16 +126,16 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
print("--> LOADED 2 FILE") # try and get this as low as possible.
|
||||
for e in data:
|
||||
for key, value in e.items():
|
||||
print(key)
|
||||
#print(key)
|
||||
searchArr.append(key)
|
||||
for i in searchArr:
|
||||
print(i)
|
||||
#print(i)
|
||||
if quote(f""+i) in c.url:
|
||||
conn = sqlite3.connect('app.db')
|
||||
print(data)
|
||||
#print(data)
|
||||
for e in data:
|
||||
for key, value in e.items():
|
||||
print(key)
|
||||
#print(key)
|
||||
if key == i:
|
||||
query="SELECT * FROM COMICS where "
|
||||
for i in value:
|
||||
@ -179,7 +179,7 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
|
||||
sql = query
|
||||
#sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';"
|
||||
print(sql)
|
||||
#print(sql)
|
||||
s = conn.execute(sql)
|
||||
#list=[]
|
||||
for r in s:
|
||||
@ -200,12 +200,12 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
links=[link3]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
#print(c.title)
|
||||
return c
|
||||
|
||||
|
||||
|
||||
|
||||
def mimetype(path):
|
||||
extension = path.split(".")[-1].lower()
|
||||
if extension == "pdf":
|
||||
|
@ -2,6 +2,8 @@ import zipfile
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
|
||||
from extras import get_size
|
||||
|
||||
class Entry(object):
|
||||
valid_keys = (
|
||||
"id",
|
||||
@ -23,7 +25,10 @@ class Entry(object):
|
||||
"oai_updatedates",
|
||||
"authors",
|
||||
"formats",
|
||||
"size",
|
||||
"links",
|
||||
"cover",
|
||||
"covertype"
|
||||
)
|
||||
|
||||
required_keys = ("id", "title", "links")
|
||||
@ -53,7 +58,11 @@ class Entry(object):
|
||||
f=self.links[0].get("rpath")+"/"+self.title+".cbz"
|
||||
if os.path.exists(f):
|
||||
s = zipfile.ZipFile(f)
|
||||
self.size = get_size(f, 'mb')
|
||||
data=BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
#self.cover=s.open('P00001.jpg').read()
|
||||
self.authors = data.select('Writer')[0].text.split(",")
|
||||
print(self.authors)
|
||||
#print(data)
|
||||
#print(kwargs["links"][0])
|
||||
#print(data.select('Series')[0].text)
|
||||
@ -61,9 +70,9 @@ class Entry(object):
|
||||
if data.select('Series')[0].text in kwargs["links"][0].get("rpath"):
|
||||
releasedate=data.select('Year')[0].text+"-"+data.select('Month')[0].text.zfill(2)+"-"+data.select('Day')[0].text.zfill(2)
|
||||
try:
|
||||
self.title = "#"+data.select('Number')[0].text.zfill(2) + ": " + data.select('Title')[0].text + " (" + releasedate + ")"
|
||||
self.title = "#"+data.select('Number')[0].text.zfill(2) + ": " + data.select('Title')[0].text + " (" + releasedate + ") [" + str(self.size) + "MB]"
|
||||
except:
|
||||
self.title = "#"+data.select('Number')[0].text.zfill(2) + " (" + releasedate + ")"
|
||||
self.title = "#"+data.select('Number')[0].text.zfill(2) + " (" + releasedate + ") [" + str(self.size) + "MB]"
|
||||
#print(self.title)
|
||||
else:
|
||||
self.title = kwargs["title"]
|
||||
@ -71,8 +80,6 @@ class Entry(object):
|
||||
else:
|
||||
self.title = kwargs["title"]
|
||||
#self.title = data.select('Title')[0].text
|
||||
|
||||
|
||||
def get(self, key):
|
||||
return self._data.get(key, None)
|
||||
|
||||
|
@ -27,6 +27,12 @@
|
||||
<entry>
|
||||
<title>{{ entry.title }}</title>
|
||||
<id>{{ entry.id }}</id>
|
||||
{% for author in entry.authors %}
|
||||
<author>
|
||||
<name>{{ author }}</name>
|
||||
<uri></uri>
|
||||
</author>
|
||||
{% endfor %}
|
||||
{% if entry.updated %} <updated>{{ entry.updated }}</updated> {% endif %}
|
||||
{% for link in entry.links %}
|
||||
<link rel="{{ link.rel }}"
|
||||
|
Loading…
Reference in New Issue
Block a user