mirror of
https://github.com/krateng/maloja.git
synced 2023-08-10 21:12:55 +03:00
Expanding the code
This commit is contained in:
parent
7b151d60ef
commit
8a3a03b9f3
@ -27,6 +27,12 @@ def cleanup(artiststr):
|
||||
artists = flatten(artistsnew)
|
||||
artistsnew = []
|
||||
|
||||
for a in artists:
|
||||
artistsnew.append(a.split(" & "))
|
||||
|
||||
artists = flatten(artistsnew)
|
||||
artistsnew = []
|
||||
|
||||
|
||||
for a in artists:
|
||||
artistsnew.append(a.split(" ft. "))
|
||||
@ -126,7 +132,7 @@ def findartistsintitle(title):
|
||||
|
||||
newtitle = re.sub(r"(.*) \(Featuring (.*?)\)",r"\1",title)
|
||||
if (title != newtitle):
|
||||
artists = re.sub(r"(.*) \(Featuring. (.*?)\).*",r"\2",title)
|
||||
artists = re.sub(r"(.*) \(Featuring (.*?)\).*",r"\2",title)
|
||||
truetitle = newtitle
|
||||
|
||||
newtitle = re.sub(r"(.*) \(featuring (.*?)\)",r"\1",title)
|
||||
|
41
database.py
41
database.py
@ -15,8 +15,9 @@ def get_scrobbles():
|
||||
keys = request.query
|
||||
r = db_query(artist=keys.get("artist"))
|
||||
#print(r)
|
||||
response.content_type = "application/json"
|
||||
return {"object":r} ##json can't be a list apparently???
|
||||
response.content_type = "application/json; charset=UTF-8"
|
||||
#response.charset = 'UTF-8'
|
||||
return {"list":r} ##json can't be a list apparently???
|
||||
|
||||
#r = db_query(artist=keys.get("artist"))
|
||||
#text = ""
|
||||
@ -32,8 +33,23 @@ def get_scrobbles():
|
||||
def get_tracks():
|
||||
artist = request.query.get("artist")
|
||||
|
||||
ls = [t for t in TRACKS if (artist in t["artists"])]
|
||||
return {"object":ls}
|
||||
# turn the tupel of frozensets into a jsonable object
|
||||
tracklist = [{"artists":list(a[0]),"title":a[1]} for a in TRACKS]
|
||||
ls = [t for t in tracklist if (artist in t["artists"]) or (artist==None)]
|
||||
return {"list":ls}
|
||||
|
||||
@route("/artists")
|
||||
def get_artists():
|
||||
response.content_type = "application/json; charset=UTF-8"
|
||||
#response.charset = "utf-8"
|
||||
return {"list":ARTISTS}
|
||||
|
||||
@route("/charts")
|
||||
def get_charts():
|
||||
since = request.query.get("since")
|
||||
to = request.query.get("to")
|
||||
results = db_query(since=since,to=to)
|
||||
return {"list":results}
|
||||
|
||||
# Starts the server
|
||||
def runserver(DATABASE_PORT):
|
||||
@ -76,7 +92,7 @@ def build():
|
||||
# break
|
||||
# #else:
|
||||
# #print("NO MATCH!")
|
||||
|
||||
#
|
||||
#if not foundexisting:
|
||||
# tracklist.append({"artists":t["artists"],"title":t["title"]})
|
||||
else:
|
||||
@ -97,18 +113,21 @@ def buildh():
|
||||
trackset = set()
|
||||
for t in DATABASE:
|
||||
for a in t["artists"]:
|
||||
if a not in artistset:
|
||||
artistset.add(a)
|
||||
#if a not in artistset:
|
||||
artistset.add(a)
|
||||
|
||||
# we list the tracks as tupels of frozenset(artists) and track
|
||||
# this way they're hashable and easily comparable, but we need to change them back after we have the list
|
||||
if ((frozenset(t["artists"]),t["title"])) not in trackset:
|
||||
trackset.add((frozenset(t["artists"]),t["title"]))
|
||||
#if ((frozenset(t["artists"]),t["title"])) not in trackset:
|
||||
trackset.add((frozenset(t["artists"]),t["title"]))
|
||||
|
||||
print("Done, now converting back!")
|
||||
|
||||
ARTISTS = list(artistset)
|
||||
TRACKS = [{"artists":list(a[0]),"title":a[1]} for a in trackset]
|
||||
#TRACKS = [{"artists":list(a[0]),"title":a[1]} for a in trackset]
|
||||
#actually lets only convert this once we need it, kinda makes sense to store it in the tuple frozenset form
|
||||
TRACKS = list(trackset)
|
||||
|
||||
|
||||
# Rebuilds the database from disk, keeps cached entries
|
||||
def reload():
|
||||
@ -127,6 +146,8 @@ def reload():
|
||||
data = l.split(",")
|
||||
#print(l)
|
||||
|
||||
|
||||
## saving album in the scrobbles is supported, but for now we don't use it. It shouldn't be a defining part of the track (same song from Album or EP), but derived information
|
||||
artists = data[1].split("/")
|
||||
#album = data[3]
|
||||
title = data[2]
|
||||
|
2
rules/.gitignore
vendored
Normal file
2
rules/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.csv
|
||||
!examplerules.csv
|
14
rules/examplerules.csv
Normal file
14
rules/examplerules.csv
Normal file
@ -0,0 +1,14 @@
|
||||
### This folder can have any number of csv files to group your rules
|
||||
### the first column defines the type of the rule:
|
||||
### notanartist: Defines strings that can appear behind "feat" in a song title, but denote additional information about the track instead of another artist. Second column is the string
|
||||
### belongtogether: defines an artist with an ampersand in their name. Otherwise, amerpsands are interpreted as denoting two different artists (except when there are no spaces). Second column is the full name of the artist
|
||||
### replacetitle: defines an alternative spelling of a track title that should be replaced. Second column is the 'wrong' spelling, third column the correct spelling
|
||||
### replaceartist: defines and alternative spelling of an artist that should be replaced. Second column is the 'wrong' spelling, third column the correct spelling
|
||||
### countas: defines an artist that should be counted together with another artist for chart statistics etc. This will not change the separation in the database and all effects of this rule will disappear as soon as it is no longer active. Second column is the artist, third column the replacement artist
|
||||
###
|
||||
### THE RULES IN THIS EXAMPLE FILE ARE IGNORED
|
||||
notanartist,In Dreams,
|
||||
belongtogether,Darth & Vader,
|
||||
replacetitle,첫 사랑니 (Rum Pum Pum Pum),Rum Pum Pum Pum
|
||||
replaceartist,Dal Shabet,Dal★Shabet
|
||||
countas,Trouble Maker,HyunA
|
Can't render this file because it contains an unexpected character in line 3 and column 58.
|
30
server.py
30
server.py
@ -1,9 +1,10 @@
|
||||
from bottle import route, run, template, static_file, request
|
||||
from bottle import route, run, template, static_file, request, response
|
||||
#import os
|
||||
from importlib.machinery import SourceFileLoader
|
||||
#from serverutil import log, db_remove, createVideoFile
|
||||
import _thread
|
||||
import waitress
|
||||
import urllib.request
|
||||
|
||||
|
||||
MAIN_PORT = 12345
|
||||
@ -16,7 +17,7 @@ DATABASE_PORT = 12349
|
||||
#@route("/<pth:path>/<file:re:.*\\.png>")
|
||||
#@route("/<pth:path>/<file:re:.*\\.mp4>")
|
||||
#@route("/<pth:path>/<file:re:.*\\.mkv>")
|
||||
@route("/<pth:path>")
|
||||
#@route("/<pth:path>")
|
||||
def static(pth):
|
||||
|
||||
return static_file(pth,root="")
|
||||
@ -25,21 +26,24 @@ def static(pth):
|
||||
@route("")
|
||||
@route("/")
|
||||
def mainpage():
|
||||
keys = request.query
|
||||
|
||||
return SourceFileLoader("mainpage","mainpage.py").load_module().GET(keys)
|
||||
|
||||
@route("/xhttp")
|
||||
def xhttp():
|
||||
keys = request.query
|
||||
|
||||
return SourceFileLoader("download","download.py").load_module().GET(keys)
|
||||
return static_file("main.html",root="")
|
||||
|
||||
|
||||
|
||||
# this is the fallback option. If you run this service behind a reverse proxy, it is recommended to rewrite /db/ requests to the port of the db server
|
||||
# e.g. location /db { rewrite ^/db(.*)$ $1 break; proxy_pass http://yoururl:12349; }
|
||||
|
||||
@route("/db/<pth:path>")
|
||||
def database(pth):
|
||||
contents = urllib.request.urlopen("http://localhost:" + str(DATABASE_PORT) + "/" + pth).read()
|
||||
response.content_type = "application/json"
|
||||
#print("Returning " + "http://localhost:" + str(DATABASE_PORT) + "/" + pth)
|
||||
return contents
|
||||
|
||||
|
||||
|
||||
|
||||
## other programs to always run with the server
|
||||
#_thread.start_new_thread(SourceFileLoader("downloader","downloader.py").load_module().loop,())
|
||||
_thread.start_new_thread(SourceFileLoader("database","database.py").load_module().runserver,(DATABASE_PORT,))
|
||||
|
||||
print("wat")
|
||||
run(host='0.0.0.0', port=MAIN_PORT, server='waitress')
|
||||
|
Loading…
Reference in New Issue
Block a user