Introduce flag to recompute summaries from raw events instead of using aggregations.

This commit is contained in:
Ferdinand Mütsch 2019-11-07 12:56:05 +01:00
parent f3fe8124c8
commit 9e726028c3
4 changed files with 22 additions and 14 deletions

View File

@ -170,12 +170,7 @@ func migrateLanguages(db *gorm.DB, cfg *models.Config) {
func addDefaultUser(db *gorm.DB, cfg *models.Config) {
pw := md5.Sum([]byte(models.DefaultPassword))
pwString := hex.EncodeToString(pw[:])
var err error
apiKey := uuid.Must(uuid.NewV4(), err).String()
if err != nil {
log.Println("Unable to create api key.")
log.Fatal(err)
}
apiKey := uuid.NewV4().String()
u := &models.User{ID: models.DefaultUser, Password: pwString, ApiKey: apiKey}
result := db.FirstOrCreate(u, &models.User{ID: u.ID})
if result.Error != nil {

View File

@ -10,6 +10,7 @@ import (
"github.com/n1try/wakapi/services"
"github.com/n1try/wakapi/utils"
cache "github.com/patrickmn/go-cache"
uuid "github.com/satori/go.uuid"
)
const (
@ -67,21 +68,27 @@ func (h *SummaryHandler) Get(w http.ResponseWriter, r *http.Request) {
}
live := (params.Get("live") != "" && params.Get("live") != "false") || interval == IntervalToday
recompute := (params.Get("recompute") != "" && params.Get("recompute") != "false")
to := utils.StartOfDay()
if live {
to = time.Now()
}
var summary *models.Summary
cacheKey := getHash([]time.Time{from, to}, user)
var cacheKey string
if !recompute {
cacheKey = getHash([]time.Time{from, to}, user)
} else {
cacheKey = uuid.NewV4().String()
}
if cachedSummary, ok := h.Cache.Get(cacheKey); !ok {
// Cache Miss
summary, err = h.SummarySrvc.Construct(from, to, user) // 'to' is always constant
summary, err = h.SummarySrvc.Construct(from, to, user, recompute) // 'to' is always constant
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if !live {
if !live && !recompute {
h.Cache.Set(cacheKey, summary, cache.DefaultExpiration)
}
} else {

View File

@ -53,7 +53,7 @@ func (srv *AggregationService) Schedule() {
func (srv *AggregationService) summaryWorker(jobs <-chan *AggregationJob, summaries chan<- *models.Summary) {
for job := range jobs {
if summary, err := srv.SummaryService.Construct(job.From, job.To, &models.User{ID: job.UserID}); err != nil {
if summary, err := srv.SummaryService.Construct(job.From, job.To, &models.User{ID: job.UserID}, true); err != nil {
log.Printf("Failed to generate summary (%v, %v, %s) %v.\n", job.From, job.To, job.UserID, err)
} else {
log.Printf("Successfully generated summary (%v, %v, %s).\n", job.From, job.To, job.UserID)

View File

@ -22,10 +22,16 @@ type Interval struct {
End time.Time
}
func (srv *SummaryService) Construct(from, to time.Time, user *models.User) (*models.Summary, error) {
existingSummaries, err := srv.GetByUserWithin(user, from, to)
if err != nil {
return nil, err
func (srv *SummaryService) Construct(from, to time.Time, user *models.User, recompute bool) (*models.Summary, error) {
var existingSummaries []*models.Summary
if recompute {
existingSummaries = make([]*models.Summary, 0)
} else {
summaries, err := srv.GetByUserWithin(user, from, to)
if err != nil {
return nil, err
}
existingSummaries = summaries
}
missingIntervals := getMissingIntervals(from, to, existingSummaries)