2019-10-10 00:26:28 +03:00
|
|
|
package services
|
|
|
|
|
|
|
|
import (
|
2021-04-19 21:36:37 +03:00
|
|
|
"errors"
|
2022-03-20 18:27:21 +03:00
|
|
|
datastructure "github.com/duke-git/lancet/v2/datastructure/set"
|
2021-01-30 13:17:37 +03:00
|
|
|
"github.com/emvi/logbuch"
|
2020-09-29 19:55:07 +03:00
|
|
|
"github.com/muety/wakapi/config"
|
2019-10-11 10:06:34 +03:00
|
|
|
"runtime"
|
2021-04-19 21:36:37 +03:00
|
|
|
"sync"
|
2019-10-10 00:26:28 +03:00
|
|
|
"time"
|
|
|
|
|
2020-12-11 12:05:17 +03:00
|
|
|
"github.com/go-co-op/gocron"
|
2020-03-31 13:22:17 +03:00
|
|
|
"github.com/muety/wakapi/models"
|
2019-10-10 00:26:28 +03:00
|
|
|
)
|
|
|
|
|
2019-10-10 17:47:19 +03:00
|
|
|
const (
|
2020-10-09 22:37:16 +03:00
|
|
|
aggregateIntervalDays int = 1
|
2019-10-10 17:47:19 +03:00
|
|
|
)
|
|
|
|
|
2021-04-30 15:07:14 +03:00
|
|
|
var aggregationLock = sync.Mutex{}
|
2021-04-19 21:36:37 +03:00
|
|
|
|
2019-10-10 00:26:28 +03:00
|
|
|
type AggregationService struct {
|
2020-11-01 18:56:36 +03:00
|
|
|
config *config.Config
|
2020-11-08 12:12:49 +03:00
|
|
|
userService IUserService
|
|
|
|
summaryService ISummaryService
|
|
|
|
heartbeatService IHeartbeatService
|
2022-03-20 18:27:21 +03:00
|
|
|
inProgress datastructure.Set[string]
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|
|
|
|
|
2020-11-08 12:12:49 +03:00
|
|
|
func NewAggregationService(userService IUserService, summaryService ISummaryService, heartbeatService IHeartbeatService) *AggregationService {
|
2020-05-24 18:32:26 +03:00
|
|
|
return &AggregationService{
|
2020-11-01 18:56:36 +03:00
|
|
|
config: config.Get(),
|
|
|
|
userService: userService,
|
|
|
|
summaryService: summaryService,
|
|
|
|
heartbeatService: heartbeatService,
|
2022-03-20 18:27:21 +03:00
|
|
|
inProgress: datastructure.NewSet[string](),
|
2020-05-24 18:32:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 00:26:28 +03:00
|
|
|
type AggregationJob struct {
|
2019-10-10 17:47:19 +03:00
|
|
|
UserID string
|
2019-10-10 00:26:28 +03:00
|
|
|
From time.Time
|
|
|
|
To time.Time
|
|
|
|
}
|
|
|
|
|
2019-10-11 10:06:34 +03:00
|
|
|
// Schedule a job to (re-)generate summaries every day shortly after midnight
|
|
|
|
func (srv *AggregationService) Schedule() {
|
2021-04-13 00:36:22 +03:00
|
|
|
// Run once initially
|
2022-03-20 18:27:21 +03:00
|
|
|
if err := srv.Run(datastructure.NewSet[string]()); err != nil {
|
2021-04-13 00:36:22 +03:00
|
|
|
logbuch.Fatal("failed to run AggregationJob: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-12-11 12:05:17 +03:00
|
|
|
s := gocron.NewScheduler(time.Local)
|
2022-03-20 18:27:21 +03:00
|
|
|
s.Every(1).Day().At(srv.config.App.AggregationTime).Do(srv.Run, datastructure.NewSet[string]())
|
2020-12-11 12:05:17 +03:00
|
|
|
s.StartBlocking()
|
2020-11-06 19:09:41 +03:00
|
|
|
}
|
|
|
|
|
2022-03-20 18:27:21 +03:00
|
|
|
func (srv *AggregationService) Run(userIds datastructure.Set[string]) error {
|
2021-04-19 21:36:37 +03:00
|
|
|
if err := srv.lockUsers(userIds); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer srv.unlockUsers(userIds)
|
|
|
|
|
2019-10-10 17:47:19 +03:00
|
|
|
jobs := make(chan *AggregationJob)
|
|
|
|
summaries := make(chan *models.Summary)
|
|
|
|
|
2019-10-11 10:06:34 +03:00
|
|
|
for i := 0; i < runtime.NumCPU(); i++ {
|
2019-10-10 17:47:19 +03:00
|
|
|
go srv.summaryWorker(jobs, summaries)
|
|
|
|
}
|
|
|
|
|
2020-11-01 18:56:36 +03:00
|
|
|
for i := 0; i < int(srv.config.Db.MaxConn); i++ {
|
2019-10-10 17:47:19 +03:00
|
|
|
go srv.persistWorker(summaries)
|
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// don't leak open channels
|
|
|
|
go func(c1 chan *AggregationJob, c2 chan *models.Summary) {
|
|
|
|
defer close(c1)
|
|
|
|
defer close(c2)
|
|
|
|
time.Sleep(1 * time.Hour)
|
|
|
|
}(jobs, summaries)
|
|
|
|
|
2020-11-06 19:09:41 +03:00
|
|
|
return srv.trigger(jobs, userIds)
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|
|
|
|
|
2019-10-10 17:47:19 +03:00
|
|
|
func (srv *AggregationService) summaryWorker(jobs <-chan *AggregationJob, summaries chan<- *models.Summary) {
|
|
|
|
for job := range jobs {
|
2021-12-26 19:02:14 +03:00
|
|
|
if summary, err := srv.summaryService.Summarize(job.From, job.To, &models.User{ID: job.UserID}, nil); err != nil {
|
2022-02-17 14:20:22 +03:00
|
|
|
config.Log().Error("failed to generate summary (%v, %v, %s) - %v", job.From, job.To, job.UserID, err)
|
2019-10-10 17:47:19 +03:00
|
|
|
} else {
|
2021-01-30 13:17:37 +03:00
|
|
|
logbuch.Info("successfully generated summary (%v, %v, %s)", job.From, job.To, job.UserID)
|
2019-10-10 17:47:19 +03:00
|
|
|
summaries <- summary
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-10 00:26:28 +03:00
|
|
|
|
2019-10-10 17:47:19 +03:00
|
|
|
func (srv *AggregationService) persistWorker(summaries <-chan *models.Summary) {
|
|
|
|
for summary := range summaries {
|
2020-11-01 18:56:36 +03:00
|
|
|
if err := srv.summaryService.Insert(summary); err != nil {
|
2022-02-17 14:20:22 +03:00
|
|
|
config.Log().Error("failed to save summary (%v, %v, %s) - %v", summary.UserID, summary.FromTime, summary.ToTime, err)
|
2019-10-10 17:47:19 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-20 18:27:21 +03:00
|
|
|
func (srv *AggregationService) trigger(jobs chan<- *AggregationJob, userIds datastructure.Set[string]) error {
|
2021-01-30 13:17:37 +03:00
|
|
|
logbuch.Info("generating summaries")
|
2019-10-11 10:06:34 +03:00
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Get a map from user ids to the time of their latest summary or nil if none exists yet
|
|
|
|
lastUserSummaryTimes, err := srv.summaryService.GetLatestByUser()
|
2019-10-10 00:26:28 +03:00
|
|
|
if err != nil {
|
2021-04-16 16:59:39 +03:00
|
|
|
config.Log().Error(err.Error())
|
2019-10-10 17:47:19 +03:00
|
|
|
return err
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Get a map from user ids to the time of their earliest heartbeats or nil if none exists yet
|
|
|
|
firstUserHeartbeatTimes, err := srv.heartbeatService.GetFirstByUsers()
|
2019-10-10 00:26:28 +03:00
|
|
|
if err != nil {
|
2021-04-16 16:59:39 +03:00
|
|
|
config.Log().Error(err.Error())
|
2019-10-10 17:47:19 +03:00
|
|
|
return err
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Build actual lookup table from it
|
|
|
|
firstUserHeartbeatLookup := make(map[string]models.CustomTime)
|
|
|
|
for _, e := range firstUserHeartbeatTimes {
|
|
|
|
firstUserHeartbeatLookup[e.User] = e.Time
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Generate summary aggregation jobs
|
|
|
|
for _, e := range lastUserSummaryTimes {
|
2022-03-20 18:27:21 +03:00
|
|
|
if userIds != nil && !userIds.IsEmpty() && !userIds.Contain(e.User) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
if e.Time.Valid() {
|
|
|
|
// Case 1: User has aggregated summaries already
|
|
|
|
// -> Spawn jobs to create summaries from their latest aggregation to now
|
|
|
|
generateUserJobs(e.User, e.Time.T(), jobs)
|
|
|
|
} else if t := firstUserHeartbeatLookup[e.User]; t.Valid() {
|
|
|
|
// Case 2: User has no aggregated summaries, yet, but has heartbeats
|
|
|
|
// -> Spawn jobs to create summaries from their first heartbeat to now
|
|
|
|
generateUserJobs(e.User, t.T(), jobs)
|
|
|
|
}
|
|
|
|
// Case 3: User doesn't have heartbeats at all
|
|
|
|
// -> Nothing to do
|
2019-10-10 17:47:19 +03:00
|
|
|
}
|
2019-10-10 00:26:28 +03:00
|
|
|
|
2019-10-10 17:47:19 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-20 18:27:21 +03:00
|
|
|
func (srv *AggregationService) lockUsers(userIds datastructure.Set[string]) error {
|
2021-04-30 15:07:14 +03:00
|
|
|
aggregationLock.Lock()
|
|
|
|
defer aggregationLock.Unlock()
|
2021-04-19 21:36:37 +03:00
|
|
|
for uid := range userIds {
|
2022-03-20 18:27:21 +03:00
|
|
|
if srv.inProgress.Contain(uid) {
|
2021-04-19 21:36:37 +03:00
|
|
|
return errors.New("aggregation already in progress for at least of the request users")
|
|
|
|
}
|
|
|
|
}
|
2022-03-20 18:27:21 +03:00
|
|
|
srv.inProgress = srv.inProgress.Union(userIds)
|
2021-04-19 21:36:37 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-20 18:27:21 +03:00
|
|
|
func (srv *AggregationService) unlockUsers(userIds datastructure.Set[string]) {
|
2021-04-30 15:07:14 +03:00
|
|
|
aggregationLock.Lock()
|
|
|
|
defer aggregationLock.Unlock()
|
2021-04-19 21:36:37 +03:00
|
|
|
for uid := range userIds {
|
2022-03-20 18:27:21 +03:00
|
|
|
srv.inProgress.Delete(uid)
|
2021-04-19 21:36:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
func generateUserJobs(userId string, from time.Time, jobs chan<- *AggregationJob) {
|
|
|
|
var to time.Time
|
2019-10-10 17:47:19 +03:00
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Go to next day of either user's first heartbeat or latest aggregation
|
2021-04-13 00:36:22 +03:00
|
|
|
from = from.Add(-1 * time.Second)
|
2020-11-07 14:01:35 +03:00
|
|
|
from = time.Date(
|
|
|
|
from.Year(),
|
|
|
|
from.Month(),
|
|
|
|
from.Day()+aggregateIntervalDays,
|
|
|
|
0, 0, 0, 0,
|
|
|
|
from.Location(),
|
|
|
|
)
|
2019-10-10 00:26:28 +03:00
|
|
|
|
2020-11-07 14:01:35 +03:00
|
|
|
// Iteratively aggregate per-day summaries until end of yesterday is reached
|
|
|
|
end := getStartOfToday().Add(-1 * time.Second)
|
2019-10-10 17:47:19 +03:00
|
|
|
for from.Before(end) && to.Before(end) {
|
2019-11-08 00:56:52 +03:00
|
|
|
to = time.Date(
|
|
|
|
from.Year(),
|
|
|
|
from.Month(),
|
2020-03-09 19:30:23 +03:00
|
|
|
from.Day()+aggregateIntervalDays,
|
2019-11-08 00:56:52 +03:00
|
|
|
0, 0, 0, 0,
|
|
|
|
from.Location(),
|
|
|
|
)
|
2019-10-10 17:47:19 +03:00
|
|
|
jobs <- &AggregationJob{userId, from, to}
|
|
|
|
from = to
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getStartOfToday() time.Time {
|
|
|
|
now := time.Now()
|
|
|
|
return time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 1, now.Location())
|
2019-10-10 00:26:28 +03:00
|
|
|
}
|