From 18e9575799e94aa046d3d55faebfa2d907821248 Mon Sep 17 00:00:00 2001 From: Frederik Baerentsen Date: Mon, 29 Jan 2024 14:16:28 -0500 Subject: [PATCH] Fixed showing non-cbz files --- main.py | 8 ++++---- opds/catalog.py | 6 +++++- opds/entry.py | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/main.py b/main.py index a111a10..1d968f6 100644 --- a/main.py +++ b/main.py @@ -275,11 +275,11 @@ def catalog(path=""): #start_time = timeit.default_timer() #print(request.root_url) c = fromdir(request.root_url, request.url, config.CONTENT_BASE_DIR, path) - print("c: ") + #print("c: ") #pprint(vars(c)) - for x in c.entries: - for y in x.links: - pprint(y.href) + #for x in c.entries: + # for y in x.links: + # pprint(y.href) #print("------") #elapsed = timeit.default_timer() - start_time #print("-----------------------------------------------------------------------------------------------------------------------") diff --git a/opds/catalog.py b/opds/catalog.py index 33e46ff..75a0b53 100644 --- a/opds/catalog.py +++ b/opds/catalog.py @@ -86,8 +86,10 @@ def fromdir(root_url, url, content_base_path, content_relative_path): onlydirs = [ f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f)) ] - #print(onlydirs) + onlydirs.sort() + print(onlydirs) for dirname in onlydirs: + print(dirname) link = Link( href=quote(f"/catalog/{content_relative_path}/{dirname}").replace('//','/'), #windows fix rel="subsection", @@ -113,6 +115,8 @@ def fromdir(root_url, url, content_base_path, content_relative_path): onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] onlyfiles.sort() for filename in onlyfiles: + if not filename.endswith('cbz'): + continue link = Link( href=quote(f"/content/{content_relative_path}/{filename}"), rel="http://opds-spec.org/acquisition", diff --git a/opds/entry.py b/opds/entry.py index c4e60c4..7e35001 100644 --- a/opds/entry.py +++ b/opds/entry.py @@ -62,7 +62,7 @@ class Entry(object): if os.path.exists(f): s = zipfile.ZipFile(f) self.size = extras.get_size(f, 'mb') - data=BeautifulSoup(s.open('ComicInfo.xml').read(), features="lxml") + data=BeautifulSoup(s.open('ComicInfo.xml').read(), features="xml") #self.cover=s.open('P00001.jpg').read() if data.select('Writer') != []: