Fixed xml escape issue
This commit is contained in:
parent
0ef53ab48c
commit
c85185d975
10
extras.py
10
extras.py
@ -1,5 +1,15 @@
|
||||
import os,re
|
||||
|
||||
table = str.maketrans({
|
||||
"<": "<",
|
||||
">": ">",
|
||||
"&": "&",
|
||||
"'": "'",
|
||||
'"': """,
|
||||
})
|
||||
def xmlesc(txt):
|
||||
return txt.translate(table)
|
||||
|
||||
def get_size(file_path, unit='bytes'):
|
||||
file_size = os.path.getsize(file_path)
|
||||
exponents_map = {'bytes': 0, 'kb': 1, 'mb': 2, 'gb': 3}
|
||||
|
24
main.py
24
main.py
@ -267,23 +267,23 @@ def image(path):
|
||||
@app.route("/catalog/<path:path>")
|
||||
@auth.login_required
|
||||
def catalog(path=""):
|
||||
#config._print("path: " + path)
|
||||
#config._print("root_url: " + request.root_url)
|
||||
#config._print("url: " + request.url)
|
||||
#config._print("CONTENT_BASE_DIR: " + config.CONTENT_BASE_DIR)
|
||||
config._print("path: " + path)
|
||||
config._print("root_url: " + request.root_url)
|
||||
config._print("url: " + request.url)
|
||||
config._print("CONTENT_BASE_DIR: " + config.CONTENT_BASE_DIR)
|
||||
#print("PRESSED ON")
|
||||
start_time = timeit.default_timer()
|
||||
#start_time = timeit.default_timer()
|
||||
#print(request.root_url)
|
||||
c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path)
|
||||
#print("c: ")
|
||||
print("c: ")
|
||||
#pprint(vars(c))
|
||||
#for x in c.entries:
|
||||
# for y in x.links:
|
||||
# pprint(y.href)
|
||||
for x in c.entries:
|
||||
for y in x.links:
|
||||
pprint(y.href)
|
||||
#print("------")
|
||||
elapsed = timeit.default_timer() - start_time
|
||||
print("-----------------------------------------------------------------------------------------------------------------------")
|
||||
print("RENDERED IN: " + str(round(elapsed,2))+"s")
|
||||
#elapsed = timeit.default_timer() - start_time
|
||||
#print("-----------------------------------------------------------------------------------------------------------------------")
|
||||
#print("RENDERED IN: " + str(round(elapsed,2))+"s")
|
||||
|
||||
return c.render()
|
||||
|
||||
|
@ -6,6 +6,7 @@ from .entry import Entry
|
||||
from .link import Link
|
||||
import sqlite3,json
|
||||
import config
|
||||
import extras
|
||||
|
||||
class Catalog(object):
|
||||
def __init__(
|
||||
@ -47,8 +48,7 @@ def fromsearch(root_url, url, content_base_path, content_relative_path):
|
||||
return c
|
||||
|
||||
def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
|
||||
|
||||
|
||||
path = os.path.join(content_base_path, content_relative_path)
|
||||
|
||||
if os.path.basename(content_relative_path) == "":
|
||||
@ -59,7 +59,7 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
)
|
||||
else:
|
||||
c = Catalog(
|
||||
title=os.path.basename(content_relative_path),
|
||||
title=extras.xmlesc(os.path.basename(content_relative_path)),
|
||||
root_url=root_url,
|
||||
url=url
|
||||
)
|
||||
@ -94,7 +94,7 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
rpath=path,
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
|
||||
)
|
||||
c.add_entry(Entry(title=dirname, id=uuid4(), links=[link]))
|
||||
c.add_entry(Entry(title=extras.xmlesc(dirname), id=uuid4(), links=[link]))
|
||||
|
||||
|
||||
if c.url.endswith("/catalog"):
|
||||
@ -119,7 +119,10 @@ def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
rpath=path,
|
||||
type=mimetype(filename),
|
||||
)
|
||||
c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
|
||||
#c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
c.add_entry(Entry(title=extras.xmlesc(filename).rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
|
||||
#fixed issue with multiple . in filename
|
||||
#print(c.render())
|
||||
else:
|
||||
|
@ -62,7 +62,7 @@ class Entry(object):
|
||||
if os.path.exists(f):
|
||||
s = zipfile.ZipFile(f)
|
||||
self.size = extras.get_size(f, 'mb')
|
||||
data=BeautifulSoup(s.open('ComicInfo.xml').read(), features="html.parser")
|
||||
data=BeautifulSoup(s.open('ComicInfo.xml').read(), features="lxml")
|
||||
#self.cover=s.open('P00001.jpg').read()
|
||||
|
||||
if data.select('Writer') != []:
|
||||
|
Loading…
Reference in New Issue
Block a user