mirror of
https://github.com/krateng/maloja.git
synced 2023-08-10 21:12:55 +03:00
Feels good man
This commit is contained in:
parent
7021099e7b
commit
df07dd7b00
@ -10,7 +10,6 @@ from ..thirdparty import proxy_scrobble_all
|
||||
from ..globalconf import data_dir, malojaconfig, apikeystore
|
||||
#db
|
||||
from . import sqldb
|
||||
from .cache import db_query, db_aggregate
|
||||
|
||||
# doreah toolkit
|
||||
from doreah.logging import log
|
||||
@ -470,92 +469,6 @@ def start_db():
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
####
|
||||
## Database queries
|
||||
####
|
||||
|
||||
|
||||
|
||||
# Queries the database
|
||||
def db_query_full(artist=None,artists=None,title=None,track=None,timerange=None,associated=False,max_=None):
|
||||
|
||||
if not dbstatus['healthy']: raise DatabaseNotBuilt()
|
||||
(since, to) = time_stamps(range=timerange)
|
||||
|
||||
if artists is not None and title is not None:
|
||||
print(col['red']("THIS SHOULD NO LONGER HAPPEN"))
|
||||
track = {'artists':artists,'title':title}
|
||||
|
||||
if track is not None:
|
||||
return list(reversed(sqldb.get_scrobbles_of_track(track=track,since=since,to=to)))
|
||||
|
||||
if artist is not None:
|
||||
return list(reversed(sqldb.get_scrobbles_of_artist(artist=artist,since=since,to=to)))
|
||||
|
||||
return list(reversed(sqldb.get_scrobbles(since=since,to=to)))
|
||||
|
||||
|
||||
|
||||
# Queries that... well... aggregate
|
||||
def db_aggregate_full(by=None,timerange=None,artist=None):
|
||||
|
||||
if not dbstatus['healthy']: raise DatabaseNotBuilt()
|
||||
(since, to) = time_stamps(range=timerange)
|
||||
|
||||
|
||||
if (by=="ARTIST"):
|
||||
|
||||
trackcharts = {}
|
||||
charts = {}
|
||||
scrobbles = sqldb.get_scrobbles(since=since,to=to,resolve_references=False)
|
||||
|
||||
for s in scrobbles:
|
||||
trackcharts[s['track']] = trackcharts.setdefault(s['track'],0) + 1
|
||||
|
||||
for t in trackcharts:
|
||||
artists = sqldb.get_artists_of_track(t,resolve_references=False)
|
||||
for a in coa.getCreditedList(artists):
|
||||
charts[a] = charts.setdefault(a,0) + trackcharts[t]
|
||||
|
||||
|
||||
ls = [{"artist":sqldb.get_artist(a),"scrobbles":charts[a],"counting":[]} for a in charts]
|
||||
ls.sort(key=lambda k:k["scrobbles"],reverse=True)
|
||||
# add ranks
|
||||
for rnk in range(len(ls)):
|
||||
if rnk == 0 or ls[rnk]["scrobbles"] < ls[rnk-1]["scrobbles"]:
|
||||
ls[rnk]["rank"] = rnk + 1
|
||||
else:
|
||||
ls[rnk]["rank"] = ls[rnk-1]["rank"]
|
||||
return ls
|
||||
|
||||
elif (by=="TRACK"):
|
||||
charts = {}
|
||||
if artist is None:
|
||||
scrobbles = sqldb.get_scrobbles(since=since,to=to,resolve_references=False)
|
||||
else:
|
||||
scrobbles = sqldb.get_scrobbles_of_artist(since=since,to=to,artist=artist,resolve_references=False)
|
||||
|
||||
for s in scrobbles:
|
||||
charts[s['track']] = charts.setdefault(s['track'],0) + 1
|
||||
|
||||
|
||||
ls = [{"track":sqldb.get_track(t),"scrobbles":charts[t]} for t in charts]
|
||||
ls.sort(key=lambda k:k["scrobbles"],reverse=True)
|
||||
# add ranks
|
||||
for rnk in range(len(ls)):
|
||||
if rnk == 0 or ls[rnk]["scrobbles"] < ls[rnk-1]["scrobbles"]:
|
||||
ls[rnk]["rank"] = rnk + 1
|
||||
else:
|
||||
ls[rnk]["rank"] = ls[rnk-1]["rank"]
|
||||
return ls
|
||||
|
||||
else:
|
||||
return len(sqldb.get_scrobbles(since=since,to=to,resolve_references=False))
|
||||
|
||||
|
||||
# Search for strings
|
||||
def db_search(query,type=None):
|
||||
results = []
|
||||
|
@ -1,158 +0,0 @@
|
||||
|
||||
###
|
||||
## Caches in front of DB
|
||||
## the volatile caches are intended mainly for excessive site navigation during one session
|
||||
## the permanent caches are there to save data that is hard to calculate and never changes (old charts)
|
||||
###
|
||||
|
||||
import psutil
|
||||
import copy
|
||||
import lru
|
||||
|
||||
from doreah.logging import log
|
||||
|
||||
from ..globalconf import malojaconfig
|
||||
from .. import utilities
|
||||
from .. import database as dbmain
|
||||
|
||||
if False:
|
||||
def db_query(**kwargs):
|
||||
return db_query_cached(**kwargs)
|
||||
def db_aggregate(**kwargs):
|
||||
return db_aggregate_cached(**kwargs)
|
||||
else:
|
||||
def db_query(**kwargs):
|
||||
return dbmain.db_query_full(**kwargs)
|
||||
def db_aggregate(**kwargs):
|
||||
return dbmain.db_aggregate_full(**kwargs)
|
||||
|
||||
|
||||
csz = malojaconfig["DB_CACHE_ENTRIES"]
|
||||
cmp = malojaconfig["DB_MAX_MEMORY"]
|
||||
|
||||
cache_query = lru.LRU(csz)
|
||||
cache_query_perm = lru.LRU(csz)
|
||||
cache_aggregate = lru.LRU(csz)
|
||||
cache_aggregate_perm = lru.LRU(csz)
|
||||
|
||||
perm_caching = malojaconfig["CACHE_DATABASE_PERM"]
|
||||
temp_caching = malojaconfig["CACHE_DATABASE_SHORT"]
|
||||
|
||||
cachestats = {
|
||||
"cache_query":{
|
||||
"hits_perm":0,
|
||||
"hits_tmp":0,
|
||||
"misses":0,
|
||||
"objperm":cache_query_perm,
|
||||
"objtmp":cache_query,
|
||||
"name":"Query Cache"
|
||||
},
|
||||
"cache_aggregate":{
|
||||
"hits_perm":0,
|
||||
"hits_tmp":0,
|
||||
"misses":0,
|
||||
"objperm":cache_aggregate_perm,
|
||||
"objtmp":cache_aggregate,
|
||||
"name":"Aggregate Cache"
|
||||
}
|
||||
}
|
||||
|
||||
from doreah.regular import runhourly
|
||||
|
||||
@runhourly
|
||||
def log_stats():
|
||||
logstr = "{name}: {hitsperm} Perm Hits, {hitstmp} Tmp Hits, {misses} Misses; Current Size: {sizeperm}/{sizetmp}"
|
||||
for s in (cachestats["cache_query"],cachestats["cache_aggregate"]):
|
||||
log(logstr.format(name=s["name"],hitsperm=s["hits_perm"],hitstmp=s["hits_tmp"],misses=s["misses"],
|
||||
sizeperm=len(s["objperm"]),sizetmp=len(s["objtmp"])),module="debug")
|
||||
|
||||
def db_query_cached(**kwargs):
|
||||
global cache_query, cache_query_perm
|
||||
key = utilities.serialize(kwargs)
|
||||
|
||||
eligible_permanent_caching = (
|
||||
"timerange" in kwargs and
|
||||
not kwargs["timerange"].active() and
|
||||
perm_caching
|
||||
)
|
||||
eligible_temporary_caching = (
|
||||
not eligible_permanent_caching and
|
||||
temp_caching
|
||||
)
|
||||
|
||||
# hit permanent cache for past timeranges
|
||||
if eligible_permanent_caching and key in cache_query_perm:
|
||||
cachestats["cache_query"]["hits_perm"] += 1
|
||||
return copy.copy(cache_query_perm.get(key))
|
||||
|
||||
# hit short term cache
|
||||
elif eligible_temporary_caching and key in cache_query:
|
||||
cachestats["cache_query"]["hits_tmp"] += 1
|
||||
return copy.copy(cache_query.get(key))
|
||||
|
||||
else:
|
||||
cachestats["cache_query"]["misses"] += 1
|
||||
result = dbmain.db_query_full(**kwargs)
|
||||
if eligible_permanent_caching: cache_query_perm[key] = result
|
||||
elif eligible_temporary_caching: cache_query[key] = result
|
||||
|
||||
reduce_caches_if_low_ram()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def db_aggregate_cached(**kwargs):
|
||||
global cache_aggregate, cache_aggregate_perm
|
||||
key = utilities.serialize(kwargs)
|
||||
|
||||
eligible_permanent_caching = (
|
||||
"timerange" in kwargs and
|
||||
not kwargs["timerange"].active() and
|
||||
perm_caching
|
||||
)
|
||||
eligible_temporary_caching = (
|
||||
not eligible_permanent_caching and
|
||||
temp_caching
|
||||
)
|
||||
|
||||
# hit permanent cache for past timeranges
|
||||
if eligible_permanent_caching and key in cache_aggregate_perm:
|
||||
cachestats["cache_aggregate"]["hits_perm"] += 1
|
||||
return copy.copy(cache_aggregate_perm.get(key))
|
||||
|
||||
# hit short term cache
|
||||
elif eligible_temporary_caching and key in cache_aggregate:
|
||||
cachestats["cache_aggregate"]["hits_tmp"] += 1
|
||||
return copy.copy(cache_aggregate.get(key))
|
||||
|
||||
else:
|
||||
cachestats["cache_aggregate"]["misses"] += 1
|
||||
result = dbmain.db_aggregate_full(**kwargs)
|
||||
if eligible_permanent_caching: cache_aggregate_perm[key] = result
|
||||
elif eligible_temporary_caching: cache_aggregate[key] = result
|
||||
|
||||
reduce_caches_if_low_ram()
|
||||
|
||||
return result
|
||||
|
||||
def invalidate_caches():
|
||||
global cache_query, cache_aggregate
|
||||
cache_query.clear()
|
||||
cache_aggregate.clear()
|
||||
log("Database caches invalidated.")
|
||||
|
||||
def reduce_caches(to=0.75):
|
||||
global cache_query, cache_aggregate, cache_query_perm, cache_aggregate_perm
|
||||
for c in cache_query, cache_aggregate, cache_query_perm, cache_aggregate_perm:
|
||||
currentsize = len(c)
|
||||
if currentsize > 100:
|
||||
targetsize = max(int(currentsize * to),10)
|
||||
c.set_size(targetsize)
|
||||
c.set_size(csz)
|
||||
|
||||
def reduce_caches_if_low_ram():
|
||||
ramprct = psutil.virtual_memory().percent
|
||||
if ramprct > cmp:
|
||||
log("{prct}% RAM usage, reducing caches!".format(prct=ramprct),module="debug")
|
||||
ratio = (cmp / ramprct) ** 3
|
||||
reduce_caches(to=ratio)
|
@ -148,12 +148,7 @@ malojaconfig = Configuration(
|
||||
},
|
||||
"Technical":{
|
||||
"cache_expire_positive":(tp.Integer(), "Image Cache Expiration", 300, "Days until images are refetched"),
|
||||
"cache_expire_negative":(tp.Integer(), "Image Cache Negative Expiration", 30, "Days until failed image fetches are reattempted"),
|
||||
"use_db_cache":(tp.Boolean(), "Use DB Cache", True),
|
||||
"cache_database_short":(tp.Boolean(), "Use volatile Database Cache", True),
|
||||
"cache_database_perm":(tp.Boolean(), "Use permanent Database Cache", True),
|
||||
"db_cache_entries":(tp.Integer(), "Maximal Cache entries", 10000),
|
||||
"db_max_memory":(tp.Integer(max=100,min=20), "RAM Percentage Theshold", 75, "Maximal percentage of RAM that should be used by whole system before Maloja discards cache entries. Use a higher number if your Maloja runs on a dedicated instance (e.g. a container)")
|
||||
"cache_expire_negative":(tp.Integer(), "Image Cache Negative Expiration", 30, "Days until failed image fetches are reattempted")
|
||||
},
|
||||
"Fluff":{
|
||||
"scrobbles_gold":(tp.Integer(), "Scrobbles for Gold", 250, "How many scrobbles a track needs to be considered 'Gold' status"),
|
||||
|
Loading…
Reference in New Issue
Block a user