1
0
mirror of https://github.com/krateng/maloja.git synced 2023-08-10 21:12:55 +03:00

Removed last unnecessary cross-server HTTP call

This commit is contained in:
Krateng 2019-03-28 14:18:12 +01:00
parent d3e46ac50b
commit ef765352e0
2 changed files with 92 additions and 90 deletions

View File

@ -4,6 +4,7 @@
from bottle import Bottle, route, get, post, error, run, template, static_file, request, response, FormsDict, redirect, template from bottle import Bottle, route, get, post, error, run, template, static_file, request, response, FormsDict, redirect, template
import waitress import waitress
# rest of the project # rest of the project
import database
from htmlgenerators import removeIdentical from htmlgenerators import removeIdentical
from utilities import * from utilities import *
from htmlgenerators import KeySplit from htmlgenerators import KeySplit
@ -71,7 +72,8 @@ def database_post(pth):
def graceful_exit(sig=None,frame=None): def graceful_exit(sig=None,frame=None):
urllib.request.urlopen("http://[::1]:" + str(DATABASE_PORT) + "/sync") #urllib.request.urlopen("http://[::1]:" + str(DATABASE_PORT) + "/sync")
database.sync()
log("Server shutting down...") log("Server shutting down...")
os._exit(42) os._exit(42)
@ -172,7 +174,8 @@ signal.signal(signal.SIGTERM, graceful_exit)
setproctitle.setproctitle("Maloja") setproctitle.setproctitle("Maloja")
## start database server ## start database server
_thread.start_new_thread(SourceFileLoader("database","database.py").load_module().runserver,(DATABASE_PORT,)) #_thread.start_new_thread(SourceFileLoader("database","database.py").load_module().runserver,(DATABASE_PORT,))
_thread.start_new_thread(database.runserver,(DATABASE_PORT,))
log("Starting up Maloja server...") log("Starting up Maloja server...")
run(webserver, host='::', port=MAIN_PORT, server='waitress') run(webserver, host='::', port=MAIN_PORT, server='waitress')

View File

@ -11,10 +11,10 @@ import datetime
def parseTSV(filename,*args,escape=True): def parseTSV(filename,*args,escape=True):
f = open(filename) f = open(filename)
result = [] result = []
for l in [l for l in f if (not l.startswith("#")) and (not l.strip()=="")]: for l in [l for l in f if (not l.startswith("#")) and (not l.strip()=="")]:
l = l.replace("\n","") l = l.replace("\n","")
if escape: if escape:
l = l.split("#")[0] l = l.split("#")[0]
@ -42,28 +42,28 @@ def parseTSV(filename,*args,escape=True):
entry.append((data[i].lower() in ["true","yes","1","y"])) entry.append((data[i].lower() in ["true","yes","1","y"]))
except: except:
entry.append(False) entry.append(False)
result.append(entry) result.append(entry)
f.close() f.close()
return result return result
def checksumTSV(folder): def checksumTSV(folder):
sums = "" sums = ""
for f in os.listdir(folder + "/"): for f in os.listdir(folder + "/"):
if (f.endswith(".tsv")): if (f.endswith(".tsv")):
f = open(folder + "/" + f,"rb") f = open(folder + "/" + f,"rb")
sums += hashlib.md5(f.read()).hexdigest() + "\n" sums += hashlib.md5(f.read()).hexdigest() + "\n"
f.close() f.close()
return sums return sums
# returns whether checksums match and sets the checksum to invalid if they don't (or sets the new one if no previous one exists) # returns whether checksums match and sets the checksum to invalid if they don't (or sets the new one if no previous one exists)
def combineChecksums(filename,checksums): def combineChecksums(filename,checksums):
import os import os
if os.path.exists(filename + ".rulestate"): if os.path.exists(filename + ".rulestate"):
f = open(filename + ".rulestate","r") f = open(filename + ".rulestate","r")
oldchecksums = f.read() oldchecksums = f.read()
@ -86,41 +86,41 @@ def combineChecksums(filename,checksums):
f.write(checksums) f.write(checksums)
f.close() f.close()
return True return True
# checks ALL files for their rule state. if they are all the same as the current loaded one, the entire database can be assumed to be consistent with the current ruleset # checks ALL files for their rule state. if they are all the same as the current loaded one, the entire database can be assumed to be consistent with the current ruleset
# in any other case, get out # in any other case, get out
def consistentRulestate(folder,checksums): def consistentRulestate(folder,checksums):
result = [] result = []
for scrobblefile in os.listdir(folder + "/"): for scrobblefile in os.listdir(folder + "/"):
if (scrobblefile.endswith(".tsv")): if (scrobblefile.endswith(".tsv")):
try: try:
f = open(folder + "/" + scrobblefile + ".rulestate","r") f = open(folder + "/" + scrobblefile + ".rulestate","r")
if f.read() != checksums: if f.read() != checksums:
return False return False
except: except:
return False return False
finally: finally:
f.close() f.close()
return True return True
def parseAllTSV(path,*args,escape=True): def parseAllTSV(path,*args,escape=True):
result = [] result = []
for f in os.listdir(path + "/"): for f in os.listdir(path + "/"):
if (f.endswith(".tsv")): if (f.endswith(".tsv")):
result += parseTSV(path + "/" + f,*args,escape=escape) result += parseTSV(path + "/" + f,*args,escape=escape)
return result return result
def createTSV(filename): def createTSV(filename):
if not os.path.exists(filename): if not os.path.exists(filename):
@ -129,14 +129,14 @@ def createTSV(filename):
def addEntry(filename,a,escape=True): def addEntry(filename,a,escape=True):
createTSV(filename) createTSV(filename)
line = "\t".join(a) line = "\t".join(a)
if escape: line = line.replace("#",r"\num") if escape: line = line.replace("#",r"\num")
with open(filename,"a") as f: with open(filename,"a") as f:
f.write(line + "\n") f.write(line + "\n")
def addEntries(filename,al,escape=True): def addEntries(filename,al,escape=True):
with open(filename,"a") as f: with open(filename,"a") as f:
for a in al: for a in al:
line = "\t".join(a) line = "\t".join(a)
@ -147,23 +147,23 @@ def addEntries(filename,al,escape=True):
### Useful functions ### Useful functions
def int_or_none(input_): #def int_or_none(input_):
try: # try:
return int(input_) # return int(input_)
except: # except:
return None # return None
def cleandict(d): #def cleandict(d):
newdict = {k:d[k] for k in d if d[k] is not None} # newdict = {k:d[k] for k in d if d[k] is not None}
d.clear() # d.clear()
d.update(newdict) # d.update(newdict)
### Logging ### Logging
def log(msg,module=None): def log(msg,module=None):
now = datetime.datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S") now = datetime.datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S")
if module is None: if module is None:
@ -173,8 +173,8 @@ def log(msg,module=None):
print("[" + module + "] " + msg) print("[" + module + "] " + msg)
with open("logs/" + module + ".log","a") as logfile: with open("logs/" + module + ".log","a") as logfile:
logfile.write(now + " " + msg + "\n") logfile.write(now + " " + msg + "\n")
### not meant to be precise, just for a rough idea ### not meant to be precise, just for a rough idea
measurement = 0 measurement = 0
def clock(*args): def clock(*args):
@ -191,19 +191,19 @@ def clock(*args):
### Media info ### Media info
def apirequest(artists=None,artist=None,title=None): def apirequest(artists=None,artist=None,title=None):
import urllib.parse, urllib.request import urllib.parse, urllib.request
import json import json
try: try:
with open("apikey","r") as keyfile: with open("apikey","r") as keyfile:
apikey = keyfile.read().replace("\n","") apikey = keyfile.read().replace("\n","")
if apikey == "NONE": return None if apikey == "NONE": return None
except: except:
return None return None
sites = [ sites = [
{ {
"name":"lastfm", "name":"lastfm",
@ -215,8 +215,8 @@ def apirequest(artists=None,artist=None,title=None):
#"result_track_desc":lambda data:None #"result_track_desc":lambda data:None
} }
] ]
# TRACKS # TRACKS
if title is not None: if title is not None:
for s in sites: for s in sites:
@ -230,20 +230,20 @@ def apirequest(artists=None,artist=None,title=None):
return s["result_track_imgurl"](data) return s["result_track_imgurl"](data)
except: except:
pass pass
if len(artists) == 1: if len(artists) == 1:
#return {"image":apirequest(artist=artists[0])["image"]} #return {"image":apirequest(artist=artists[0])["image"]}
return None return None
# try the same track with every single artist # try the same track with every single artist
for a in artists: for a in artists:
rec = apirequest(artists=[a],title=title) rec = apirequest(artists=[a],title=title)
if rec is not None: if rec is not None:
return rec return rec
return None return None
# ARTISTS # ARTISTS
else: else:
for s in sites: for s in sites:
try: try:
@ -254,7 +254,7 @@ def apirequest(artists=None,artist=None,title=None):
return s["result_artist_imgurl"](data) return s["result_artist_imgurl"](data)
except: except:
pass pass
return None return None
# I think I've only just understood modules # I think I've only just understood modules
@ -266,20 +266,20 @@ def saveCache():
stream = pickle.dumps((cachedTracks,cachedArtists)) stream = pickle.dumps((cachedTracks,cachedArtists))
fl.write(stream) fl.write(stream)
fl.close() fl.close()
def loadCache(): def loadCache():
try: try:
fl = open("images/cache","rb") fl = open("images/cache","rb")
except: except:
return return
try: try:
ob = pickle.loads(fl.read()) ob = pickle.loads(fl.read())
global cachedTracks, cachedArtists global cachedTracks, cachedArtists
(cachedTracks, cachedArtists) = ob (cachedTracks, cachedArtists) = ob
finally: finally:
fl.close() fl.close()
# remove corrupt caching from previous versions # remove corrupt caching from previous versions
toremove = [] toremove = []
for k in cachedTracks: for k in cachedTracks:
@ -288,7 +288,7 @@ def loadCache():
for k in toremove: for k in toremove:
del cachedTracks[k] del cachedTracks[k]
log("Removed invalid cache key: " + str(k)) log("Removed invalid cache key: " + str(k))
toremove = [] toremove = []
for k in cachedArtists: for k in cachedArtists:
if cachedArtists[k] == "": if cachedArtists[k] == "":
@ -303,7 +303,7 @@ def getTrackImage(artists,title,fast=False):
filename = "-".join([re.sub("[^a-zA-Z0-9]","",artist) for artist in artists]) + "_" + re.sub("[^a-zA-Z0-9]","",title) filename = "-".join([re.sub("[^a-zA-Z0-9]","",artist) for artist in artists]) + "_" + re.sub("[^a-zA-Z0-9]","",title)
if filename == "": filename = str(hash(obj)) if filename == "": filename = str(hash(obj))
filepath = "images/tracks/" + filename filepath = "images/tracks/" + filename
# check if custom image exists # check if custom image exists
if os.path.exists(filepath + ".png"): if os.path.exists(filepath + ".png"):
imgurl = "/" + filepath + ".png" imgurl = "/" + filepath + ".png"
@ -314,7 +314,7 @@ def getTrackImage(artists,title,fast=False):
elif os.path.exists(filepath + ".jpeg"): elif os.path.exists(filepath + ".jpeg"):
imgurl = "/" + filepath + ".jpeg" imgurl = "/" + filepath + ".jpeg"
return imgurl return imgurl
try: try:
# check our cache # check our cache
# if we have cached the nonexistence of that image, we immediately return the redirect to the artist and let the resolver handle it # if we have cached the nonexistence of that image, we immediately return the redirect to the artist and let the resolver handle it
@ -328,17 +328,17 @@ def getTrackImage(artists,title,fast=False):
return "" return ""
except: except:
pass pass
# fast request only retuns cached and local results, generates redirect link for rest # fast request only retuns cached and local results, generates redirect link for rest
if fast: return "/image?title=" + urllib.parse.quote(title) + "&" + "&".join(["artist=" + urllib.parse.quote(a) for a in artists]) if fast: return "/image?title=" + urllib.parse.quote(title) + "&" + "&".join(["artist=" + urllib.parse.quote(a) for a in artists])
# non-fast lookup (esentially only the resolver lookup) # non-fast lookup (esentially only the resolver lookup)
result = apirequest(artists=artists,title=title) result = apirequest(artists=artists,title=title)
# cache results (even negative ones) # cache results (even negative ones)
cachedTracks[(frozenset(artists),title)] = result cachedTracks[(frozenset(artists),title)] = result
# return either result or redirect to artist # return either result or redirect to artist
if result is not None: return result if result is not None: return result
else: else:
@ -347,16 +347,16 @@ def getTrackImage(artists,title,fast=False):
if res != "": return res if res != "": return res
return "" return ""
def getArtistImage(artist,fast=False): def getArtistImage(artist,fast=False):
obj = artist obj = artist
filename = re.sub("[^a-zA-Z0-9]","",artist) filename = re.sub("[^a-zA-Z0-9]","",artist)
if filename == "": filename = str(hash(obj)) if filename == "": filename = str(hash(obj))
filepath = "images/artists/" + filename filepath = "images/artists/" + filename
#filepath_cache = "info/artists_cache/" + filename #filepath_cache = "info/artists_cache/" + filename
# check if custom image exists # check if custom image exists
if os.path.exists(filepath + ".png"): if os.path.exists(filepath + ".png"):
imgurl = "/" + filepath + ".png" imgurl = "/" + filepath + ".png"
@ -367,7 +367,7 @@ def getArtistImage(artist,fast=False):
elif os.path.exists(filepath + ".jpeg"): elif os.path.exists(filepath + ".jpeg"):
imgurl = "/" + filepath + ".jpeg" imgurl = "/" + filepath + ".jpeg"
return imgurl return imgurl
try: try:
result = cachedArtists[artist] result = cachedArtists[artist]
@ -375,49 +375,49 @@ def getArtistImage(artist,fast=False):
else: return "" else: return ""
except: except:
pass pass
# fast request only retuns cached and local results, generates redirect link for rest # fast request only retuns cached and local results, generates redirect link for rest
if fast: return "/image?artist=" + urllib.parse.quote(artist) if fast: return "/image?artist=" + urllib.parse.quote(artist)
# non-fast lookup (esentially only the resolver lookup) # non-fast lookup (esentially only the resolver lookup)
result = apirequest(artist=artist) result = apirequest(artist=artist)
# cache results (even negative ones) # cache results (even negative ones)
cachedArtists[artist] = result cachedArtists[artist] = result
if result is not None: return result if result is not None: return result
else: return "" else: return ""
def getTrackImages(trackobjectlist,fast=False): def getTrackImages(trackobjectlist,fast=False):
threads = [] threads = []
for track in trackobjectlist: for track in trackobjectlist:
t = Thread(target=getTrackImage,args=(track["artists"],track["title"],),kwargs={"fast":fast}) t = Thread(target=getTrackImage,args=(track["artists"],track["title"],),kwargs={"fast":fast})
t.start() t.start()
threads.append(t) threads.append(t)
for t in threads: for t in threads:
t.join() t.join()
return [getTrackImage(t["artists"],t["title"]) for t in trackobjectlist] return [getTrackImage(t["artists"],t["title"]) for t in trackobjectlist]
def getArtistImages(artistlist,fast=False): def getArtistImages(artistlist,fast=False):
threads = [] threads = []
for artist in artistlist: for artist in artistlist:
t = Thread(target=getArtistImage,args=(artist,),kwargs={"fast":fast}) t = Thread(target=getArtistImage,args=(artist,),kwargs={"fast":fast})
t.start() t.start()
threads.append(t) threads.append(t)
for t in threads: for t in threads:
t.join() t.join()
# async calls only cached results, now we need to get them # async calls only cached results, now we need to get them
return [getArtistImage(a) for a in artistlist] return [getArtistImage(a) for a in artistlist]
@ -431,4 +431,3 @@ def resolveImage(artist=None,track=None):
return getTrackImage(track["artists"],track["title"]) return getTrackImage(track["artists"],track["title"])
elif artist is not None: elif artist is not None:
return getArtistImage(artist) return getArtistImage(artist)