Started work on search
This commit is contained in:
parent
18e9575799
commit
ace4e9bcba
@ -8,6 +8,7 @@ services:
|
||||
- '5000:5000'
|
||||
volumes:
|
||||
#- '/opt/data/Comics/ComicRack:/library:ro'
|
||||
- '/home/drudoo/Pi1/Comics/ComicRack:/library:ro'
|
||||
#- '/home/drudoo/Pi1/Comics/ComicRack:/library:ro'
|
||||
- '${PWD}/CT/:/library:ro'
|
||||
- '${PWD}/thumbnails:/thumbnails'
|
||||
- '${PWD}/:/app'
|
||||
|
27
main.py
27
main.py
@ -118,6 +118,24 @@ def startpage():
|
||||
def healthz():
|
||||
return "ok"
|
||||
|
||||
@app.route('/search')
|
||||
def search():
|
||||
args = request.args.get('q')
|
||||
print(args)
|
||||
conn = sqlite3.connect('app.db')
|
||||
cursor = conn.cursor()
|
||||
result = 'no good'
|
||||
try:
|
||||
cursor.execute("select TITLE, PATH from comics where TITLE like '%" + str(args) + "%';")
|
||||
result = cursor.fetchall()
|
||||
|
||||
cursor.close()
|
||||
for i in result:
|
||||
print(i)
|
||||
except Exception as e:
|
||||
config._print(e)
|
||||
return str(result)
|
||||
|
||||
@app.route("/generate")
|
||||
def generate():
|
||||
force = request.args.get('force')
|
||||
@ -135,7 +153,7 @@ def generate():
|
||||
comiccount = comiccount + 1
|
||||
s = zipfile.ZipFile(f)
|
||||
filelist = zipfile.ZipFile.namelist(s)
|
||||
if filelist[0] == 'ComicInfo.xml':
|
||||
if 'ComicInfo.xml' in filelist:
|
||||
Bs_data = BeautifulSoup(s.open('ComicInfo.xml').read(), "xml")
|
||||
CVDB=extras.get_cvdb(Bs_data.select('Notes'))
|
||||
if force == 'True':
|
||||
@ -168,10 +186,12 @@ def generate():
|
||||
generated = generated + 1
|
||||
except Exception as e:
|
||||
errormsg = str(e)
|
||||
print(e)
|
||||
config._print(e)
|
||||
else:
|
||||
skippedcount = skippedcount + 1
|
||||
if not force:
|
||||
skippedcount = skippedcount + 1
|
||||
else:
|
||||
print("Error at: " + str(CVDB) + " " + str(f))
|
||||
files_withtout_comicinfo = files_without_comicinfo + 1
|
||||
except Exception as e:
|
||||
errorcount = errorcount + 1
|
||||
@ -181,6 +201,7 @@ def generate():
|
||||
return "Forced generation: " + str(force) + "<br>Comics: " + str(comiccount) + "<br>Generated: " + str(generated) + "<br>CBZ files without ComicInfo.xml: " + str(files_without_comicinfo) + "<br>Errors: " + str(errorcount) + "<br>Skipped: " + str(skippedcount) + "<br>" + errormsg
|
||||
|
||||
|
||||
|
||||
@app.route('/import')
|
||||
def import2sql():
|
||||
conn = sqlite3.connect('app.db')
|
||||
|
237
opds/search.py
Normal file
237
opds/search.py
Normal file
@ -0,0 +1,237 @@
|
||||
import os
|
||||
from uuid import uuid4
|
||||
from urllib.parse import quote
|
||||
from jinja2 import Environment, FileSystemLoader, select_autoescape
|
||||
from .entry import Entry
|
||||
from .link import Link
|
||||
import sqlite3,json
|
||||
import config
|
||||
import extras
|
||||
|
||||
class Search(object):
|
||||
def __init__(
|
||||
self,
|
||||
title,
|
||||
):
|
||||
self.title = title
|
||||
|
||||
def render(self):
|
||||
env = Environment(
|
||||
loader=FileSystemLoader(
|
||||
searchpath=os.path.join(os.path.dirname(__file__), "templates")
|
||||
),
|
||||
autoescape=select_autoescape(["html", "xml"]),
|
||||
)
|
||||
template = env.get_template("catalog.opds.jinja2")
|
||||
return template.render(catalog=self)
|
||||
|
||||
def fromdir(root_url, url, content_base_path, content_relative_path):
|
||||
|
||||
path = os.path.join(content_base_path, content_relative_path)
|
||||
|
||||
if os.path.basename(content_relative_path) == "":
|
||||
c = Catalog(
|
||||
title="Comics",
|
||||
root_url=root_url,
|
||||
url=url
|
||||
)
|
||||
else:
|
||||
c = Catalog(
|
||||
title=extras.xmlesc(os.path.basename(content_relative_path)),
|
||||
root_url=root_url,
|
||||
url=url
|
||||
)
|
||||
#title=os.path.basename(os.path.dirname(path)), root_url=root_url, url=url
|
||||
|
||||
##########WORKING AREA###########
|
||||
searchArr=[]
|
||||
if c.url.endswith("/catalog"):
|
||||
with open('test.json') as fi:
|
||||
data=json.load(fi)
|
||||
print("--> LOADED FILE") # try and get this as low as possible.
|
||||
#searchArr=["Girl","Bat","Part One"]
|
||||
|
||||
for e in data:
|
||||
for key, value in e.items():
|
||||
searchArr.append(key)
|
||||
print(searchArr)
|
||||
######################
|
||||
|
||||
|
||||
|
||||
|
||||
if not "search" in c.url:
|
||||
onlydirs = [
|
||||
f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))
|
||||
]
|
||||
onlydirs.sort()
|
||||
print(onlydirs)
|
||||
for dirname in onlydirs:
|
||||
print(dirname)
|
||||
link = Link(
|
||||
href=quote(f"/catalog/{content_relative_path}/{dirname}").replace('//','/'), #windows fix
|
||||
rel="subsection",
|
||||
rpath=path,
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
|
||||
)
|
||||
c.add_entry(Entry(title=extras.xmlesc(dirname), id=uuid4(), links=[link]))
|
||||
|
||||
|
||||
if c.url.endswith("/catalog"):
|
||||
|
||||
for i in searchArr:
|
||||
|
||||
link2 = Link(
|
||||
href=quote(f"/catalog/search["+i+"]"),
|
||||
rel="subsection",
|
||||
rpath=path,
|
||||
type="application/atom+xml;profile=opds-catalog;kind=acquisition",
|
||||
)
|
||||
c.add_entry(Entry(title="["+i+"]",id=uuid4(),links=[link2]))
|
||||
|
||||
if not "search" in c.url:
|
||||
onlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
|
||||
onlyfiles.sort()
|
||||
for filename in onlyfiles:
|
||||
if not filename.endswith('cbz'):
|
||||
continue
|
||||
link = Link(
|
||||
href=quote(f"/content/{content_relative_path}/{filename}"),
|
||||
rel="http://opds-spec.org/acquisition",
|
||||
rpath=path,
|
||||
type=mimetype(filename),
|
||||
)
|
||||
|
||||
#c.add_entry(Entry(title=filename.rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
c.add_entry(Entry(title=extras.xmlesc(filename).rsplit(".",1)[0], id=uuid4(), links=[link]))
|
||||
|
||||
#fixed issue with multiple . in filename
|
||||
#print(c.render())
|
||||
else:
|
||||
with open('test.json') as fi:
|
||||
data=json.load(fi)
|
||||
config._print("--> LOADED 2 FILE") # try and get this as low as possible.
|
||||
for e in data:
|
||||
for key, value in e.items():
|
||||
config._print(key)
|
||||
searchArr.append(key)
|
||||
for i in searchArr:
|
||||
config._print("i (in searchArr): " + i)
|
||||
config._print("quote i: " + quote(f""+i))
|
||||
if quote(f""+i) in c.url:
|
||||
conn = sqlite3.connect('app.db')
|
||||
for e in data:
|
||||
config._print("e (in data): " + str(e))
|
||||
for key, value in e.items():
|
||||
config._print("key: " + key)
|
||||
if key == i:
|
||||
config._print("key <" + str(key) + "> matches <" + str(i) + ">")
|
||||
query="SELECT * FROM COMICS where "
|
||||
for h in value:
|
||||
first=True
|
||||
for j,k in h.items():
|
||||
|
||||
if j == 'SQL':
|
||||
query = query + k
|
||||
if k != '' and j != "SQL":
|
||||
config._print(j)
|
||||
config._print(k)
|
||||
config._print(query)
|
||||
if not first and j != 'limit':
|
||||
query = query + "and "
|
||||
config._print(query)
|
||||
if type(k) == list:
|
||||
config._print(k)
|
||||
if j == "series" or j == "title":
|
||||
firstS = True
|
||||
query = query + "("
|
||||
config._print(query)
|
||||
for l in k:
|
||||
if not firstS:
|
||||
query = query + "or "
|
||||
config._print(query)
|
||||
query = query + j + " like '%" + l + "%' "
|
||||
config._print(query)
|
||||
if firstS:
|
||||
firstS = False
|
||||
query = query + ") "
|
||||
config._print(query)
|
||||
else:
|
||||
query = query + j + " in ("
|
||||
config._print(query)
|
||||
firstL = True
|
||||
for l in k:
|
||||
if not firstL:
|
||||
query = query + ","
|
||||
config._print(query)
|
||||
query = query + "'" + str(l) + "'"
|
||||
config._print(query)
|
||||
if firstL:
|
||||
firstL = False
|
||||
query = query + ") "
|
||||
config._print(query)
|
||||
|
||||
elif j != 'limit':
|
||||
query = query + j + " like '%" + str(k) + "%' "
|
||||
config._print(query)
|
||||
elif j == 'limit':
|
||||
config.DEFAULT_SEARCH_NUMBER = k
|
||||
else:
|
||||
print(">>>>>>>>>>>ERROR THIS SHOULD NOT HAPPEN<<<<<<<<<<<")
|
||||
if first:
|
||||
first = False
|
||||
|
||||
query = query + " order by series asc, cast(issue as unsigned) asc "
|
||||
if config.DEFAULT_SEARCH_NUMBER != 0:
|
||||
query = query + "LIMIT " + str(config.DEFAULT_SEARCH_NUMBER) + ";"
|
||||
else:
|
||||
query = query + ";"
|
||||
break
|
||||
else:
|
||||
config._print("key <" + str(key) + "> DOES NOT match <" + str(i) + ">")
|
||||
|
||||
config._print("----> " + query)
|
||||
|
||||
sql = query
|
||||
#sql="SELECT * from COMICS where SERIES like '%" + i+ "%' or Title like '%" + i+ "%';"
|
||||
#config._print(sql)
|
||||
s = conn.execute(sql)
|
||||
#list=[]
|
||||
for r in s:
|
||||
#config._print(r)
|
||||
tUrl=f""+r[7].replace('\\','/').replace(config.WIN_DRIVE_LETTER + ':','').replace(config.CONTENT_BASE_DIR,"/content")
|
||||
#config._print(tUrl)
|
||||
tTitle=r[6]
|
||||
link3 = Link(
|
||||
#href=quote(f"/content/DC Comics/Earth Cities/Gotham City/Batgirl/Annual/(2012) Batgirl Annual/Batgirl Annual #001 - The Blood That Moves Us [December, 2012].cbz"),
|
||||
href=quote(tUrl),
|
||||
rel="http://opds-spec.org/acquisition",
|
||||
rpath=path,
|
||||
type="application/x-cbz",
|
||||
)
|
||||
#config._print(link3.href)
|
||||
c.add_entry(
|
||||
Entry(
|
||||
title=tTitle,
|
||||
id=uuid4(),
|
||||
links=[link3]
|
||||
)
|
||||
)
|
||||
#print(c.title)
|
||||
return c
|
||||
|
||||
|
||||
|
||||
|
||||
def mimetype(path):
|
||||
extension = path.split(".")[-1].lower()
|
||||
if extension == "pdf":
|
||||
return "application/pdf"
|
||||
elif extension == "epub":
|
||||
return "application/epub"
|
||||
elif extension == "mobi":
|
||||
return "application/mobi"
|
||||
elif extension == "cbz":
|
||||
return "application/x-cbz"
|
||||
else:
|
||||
return "application/unknown"
|
Loading…
Reference in New Issue
Block a user