Working on retirement date on wishlist

This commit is contained in:
FrederikBaerentsen 2024-12-27 09:46:54 +01:00
parent 1dc797a826
commit 00ccc57ee7
2 changed files with 26 additions and 3 deletions

5
app.py
View File

@ -12,7 +12,7 @@ import rebrick #rebrickable api
import requests # request img from web
import shutil # save img locally
import eventlet
from downloadRB import download_and_unzip,get_nil_images
from downloadRB import download_and_unzip,get_nil_images,get_retired_sets
from db import initialize_database,get_rows,delete_tables
from werkzeug.middleware.proxy_fix import ProxyFix
@ -395,7 +395,7 @@ def get_file_creation_dates(file_list):
@app.route('/config',methods=['POST','GET'])
def config():
file_list = ['themes.csv', 'colors.csv', 'sets.csv','static/nil.png','static/nil_mf.jpg']
file_list = ['themes.csv', 'colors.csv', 'sets.csv','static/nil.png','static/nil_mf.jpg','retired_sets.csv']
creation_dates = get_file_creation_dates(file_list)
row_counts = [0]
@ -417,6 +417,7 @@ def config():
for i in urls:
download_and_unzip("https://cdn.rebrickable.com/media/downloads/"+i+".csv.gz")
get_nil_images()
get_retired_sets()
return redirect(url_for('config'))
elif request.form.get('deletedb') == 'Delete Database':

View File

@ -8,7 +8,8 @@ from urllib.parse import urlparse
def get_nil_images():
image_urls = [
"https://rebrickable.com/static/img/nil_mf.jpg",
"https://rebrickable.com/static/img/nil.png"
"https://rebrickable.com/static/img/nil.png",
"https://docs.google.com/spreadsheets/d/1rlYfEXtNKxUOZt2Mfv0H17DvK7bj6Pe0CuYwq6ay8WA/gviz/tq?tqx=out:csv&sheet=Sorted%20by%20Retirement"
]
static_folder = "static"
@ -31,6 +32,27 @@ def get_nil_images():
print(f"Downloaded {output_file}")
def get_retired_sets():
urls = [
"https://docs.google.com/spreadsheets/d/1rlYfEXtNKxUOZt2Mfv0H17DvK7bj6Pe0CuYwq6ay8WA/gviz/tq?tqx=out:csv&sheet=Sorted%20by%20Retirement%20Date"
]
for url in urls:
# Extract the output filename from the URL
parsed_url = urlparse(url)
output_file = os.path.basename(parsed_url.path)
# Download the image
response = requests.get(url, stream=True)
response.raise_for_status() # Check for any request errors
# Save the image to the static folder
with open('retired_sets.csv', 'wb') as f:
f.write(response.content)
print(f"Downloaded {output_file}")
def download_and_unzip(url: str):
# Extract the output filename from the URL