mirror of
https://github.com/muety/wakapi.git
synced 2023-08-10 21:12:56 +03:00
refactor: move more background jobs to using job queue
This commit is contained in:
parent
e2ef54152d
commit
fcca881cfc
@ -2,25 +2,33 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/emvi/logbuch"
|
||||||
"github.com/muety/artifex"
|
"github.com/muety/artifex"
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
var jobqueues map[string]*artifex.Dispatcher
|
var jobqueues map[string]*artifex.Dispatcher
|
||||||
|
|
||||||
const (
|
const (
|
||||||
QueueDefault = ""
|
QueueDefault = "wakapi.default"
|
||||||
QueueProcessing = "processing"
|
QueueProcessing = "wakapi.processing"
|
||||||
QueueMails = "mails"
|
QueueReports = "wakapi.reports"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
jobqueues = make(map[string]*artifex.Dispatcher)
|
jobqueues = make(map[string]*artifex.Dispatcher)
|
||||||
|
|
||||||
|
InitQueue(QueueDefault, 1)
|
||||||
|
InitQueue(QueueProcessing, int(math.Ceil(float64(runtime.NumCPU())/2.0)))
|
||||||
|
InitQueue(QueueReports, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitQueue(name string, workers int) error {
|
func InitQueue(name string, workers int) error {
|
||||||
if _, ok := jobqueues[name]; ok {
|
if _, ok := jobqueues[name]; ok {
|
||||||
return fmt.Errorf("queue '%s' already existing", name)
|
return fmt.Errorf("queue '%s' already existing", name)
|
||||||
}
|
}
|
||||||
|
logbuch.Info("creating job queue '%s' (%d workers)", name, workers)
|
||||||
jobqueues[name] = artifex.NewDispatcher(workers, 4096)
|
jobqueues[name] = artifex.NewDispatcher(workers, 4096)
|
||||||
jobqueues[name].Start()
|
jobqueues[name].Start()
|
||||||
return nil
|
return nil
|
||||||
|
4
main.go
4
main.go
@ -186,9 +186,9 @@ func main() {
|
|||||||
|
|
||||||
// Schedule background tasks
|
// Schedule background tasks
|
||||||
go aggregationService.Schedule()
|
go aggregationService.Schedule()
|
||||||
go leaderboardService.ScheduleDefault()
|
go leaderboardService.Schedule()
|
||||||
go miscService.ScheduleCountTotalTime()
|
|
||||||
go reportService.Schedule()
|
go reportService.Schedule()
|
||||||
|
go miscService.ScheduleCountTotalTime()
|
||||||
|
|
||||||
routes.Init()
|
routes.Init()
|
||||||
|
|
||||||
|
@ -661,7 +661,7 @@ func (h *SettingsHandler) regenerateSummaries(user *models.User) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := h.aggregationSrvc.Run(datastructure.NewSet(user.ID)); err != nil {
|
if err := h.aggregationSrvc.AggregateSummaries(datastructure.NewSet(user.ID)); err != nil {
|
||||||
logbuch.Error("failed to regenerate summaries: %v", err)
|
logbuch.Error("failed to regenerate summaries: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -4,12 +4,11 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
datastructure "github.com/duke-git/lancet/v2/datastructure/set"
|
datastructure "github.com/duke-git/lancet/v2/datastructure/set"
|
||||||
"github.com/emvi/logbuch"
|
"github.com/emvi/logbuch"
|
||||||
|
"github.com/muety/artifex"
|
||||||
"github.com/muety/wakapi/config"
|
"github.com/muety/wakapi/config"
|
||||||
"runtime"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-co-op/gocron"
|
|
||||||
"github.com/muety/wakapi/models"
|
"github.com/muety/wakapi/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,6 +24,8 @@ type AggregationService struct {
|
|||||||
summaryService ISummaryService
|
summaryService ISummaryService
|
||||||
heartbeatService IHeartbeatService
|
heartbeatService IHeartbeatService
|
||||||
inProgress datastructure.Set[string]
|
inProgress datastructure.Set[string]
|
||||||
|
queueDefault *artifex.Dispatcher
|
||||||
|
queueWorkers *artifex.Dispatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAggregationService(userService IUserService, summaryService ISummaryService, heartbeatService IHeartbeatService) *AggregationService {
|
func NewAggregationService(userService IUserService, summaryService ISummaryService, heartbeatService IHeartbeatService) *AggregationService {
|
||||||
@ -34,6 +35,8 @@ func NewAggregationService(userService IUserService, summaryService ISummaryServ
|
|||||||
summaryService: summaryService,
|
summaryService: summaryService,
|
||||||
heartbeatService: heartbeatService,
|
heartbeatService: heartbeatService,
|
||||||
inProgress: datastructure.NewSet[string](),
|
inProgress: datastructure.NewSet[string](),
|
||||||
|
queueDefault: config.GetDefaultQueue(),
|
||||||
|
queueWorkers: config.GetQueue(config.QueueProcessing),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,58 +48,23 @@ type AggregationJob struct {
|
|||||||
|
|
||||||
// Schedule a job to (re-)generate summaries every day shortly after midnight
|
// Schedule a job to (re-)generate summaries every day shortly after midnight
|
||||||
func (srv *AggregationService) Schedule() {
|
func (srv *AggregationService) Schedule() {
|
||||||
s := gocron.NewScheduler(time.Local)
|
logbuch.Info("scheduling summary aggregation")
|
||||||
s.Every(1).Day().At(srv.config.App.AggregationTime).WaitForSchedule().Do(srv.Run, datastructure.NewSet[string]())
|
|
||||||
s.StartBlocking()
|
if _, err := srv.queueDefault.DispatchCron(func() {
|
||||||
|
if err := srv.AggregateSummaries(datastructure.NewSet[string]()); err != nil {
|
||||||
|
config.Log().Error("failed to generate summaries, %v", err)
|
||||||
|
}
|
||||||
|
}, srv.config.App.GetAggregationTimeCron()); err != nil {
|
||||||
|
config.Log().Error("failed to schedule summary generation, %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *AggregationService) Run(userIds datastructure.Set[string]) error {
|
func (srv *AggregationService) AggregateSummaries(userIds datastructure.Set[string]) error {
|
||||||
if err := srv.lockUsers(userIds); err != nil {
|
if err := srv.lockUsers(userIds); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer srv.unlockUsers(userIds)
|
defer srv.unlockUsers(userIds)
|
||||||
|
|
||||||
jobs := make(chan *AggregationJob)
|
|
||||||
summaries := make(chan *models.Summary)
|
|
||||||
|
|
||||||
for i := 0; i < runtime.NumCPU(); i++ {
|
|
||||||
go srv.summaryWorker(jobs, summaries)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < int(srv.config.Db.MaxConn); i++ {
|
|
||||||
go srv.persistWorker(summaries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't leak open channels
|
|
||||||
go func(c1 chan *AggregationJob, c2 chan *models.Summary) {
|
|
||||||
defer close(c1)
|
|
||||||
defer close(c2)
|
|
||||||
time.Sleep(1 * time.Hour)
|
|
||||||
}(jobs, summaries)
|
|
||||||
|
|
||||||
return srv.trigger(jobs, userIds)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *AggregationService) summaryWorker(jobs <-chan *AggregationJob, summaries chan<- *models.Summary) {
|
|
||||||
for job := range jobs {
|
|
||||||
if summary, err := srv.summaryService.Summarize(job.From, job.To, &models.User{ID: job.UserID}, nil); err != nil {
|
|
||||||
config.Log().Error("failed to generate summary (%v, %v, %s) - %v", job.From, job.To, job.UserID, err)
|
|
||||||
} else {
|
|
||||||
logbuch.Info("successfully generated summary (%v, %v, %s)", job.From, job.To, job.UserID)
|
|
||||||
summaries <- summary
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *AggregationService) persistWorker(summaries <-chan *models.Summary) {
|
|
||||||
for summary := range summaries {
|
|
||||||
if err := srv.summaryService.Insert(summary); err != nil {
|
|
||||||
config.Log().Error("failed to save summary (%v, %v, %s) - %v", summary.UserID, summary.FromTime, summary.ToTime, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *AggregationService) trigger(jobs chan<- *AggregationJob, userIds datastructure.Set[string]) error {
|
|
||||||
logbuch.Info("generating summaries")
|
logbuch.Info("generating summaries")
|
||||||
|
|
||||||
// Get a map from user ids to the time of their latest summary or nil if none exists yet
|
// Get a map from user ids to the time of their latest summary or nil if none exists yet
|
||||||
@ -119,6 +87,19 @@ func (srv *AggregationService) trigger(jobs chan<- *AggregationJob, userIds data
|
|||||||
firstUserHeartbeatLookup[e.User] = e.Time
|
firstUserHeartbeatLookup[e.User] = e.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dispatch summary generation jobs
|
||||||
|
jobs := make(chan *AggregationJob)
|
||||||
|
defer close(jobs)
|
||||||
|
go func() {
|
||||||
|
for job := range jobs {
|
||||||
|
if err := srv.queueWorkers.Dispatch(func() {
|
||||||
|
srv.process(job)
|
||||||
|
}); err != nil {
|
||||||
|
config.Log().Error("failed to dispatch summary generation job for user '%s'", job.UserID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Generate summary aggregation jobs
|
// Generate summary aggregation jobs
|
||||||
for _, e := range lastUserSummaryTimes {
|
for _, e := range lastUserSummaryTimes {
|
||||||
if userIds != nil && !userIds.IsEmpty() && !userIds.Contain(e.User) {
|
if userIds != nil && !userIds.IsEmpty() && !userIds.Contain(e.User) {
|
||||||
@ -141,24 +122,15 @@ func (srv *AggregationService) trigger(jobs chan<- *AggregationJob, userIds data
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *AggregationService) lockUsers(userIds datastructure.Set[string]) error {
|
func (srv *AggregationService) process(job *AggregationJob) {
|
||||||
aggregationLock.Lock()
|
if summary, err := srv.summaryService.Summarize(job.From, job.To, &models.User{ID: job.UserID}, nil); err != nil {
|
||||||
defer aggregationLock.Unlock()
|
config.Log().Error("failed to generate summary (%v, %v, %s) - %v", job.From, job.To, job.UserID, err)
|
||||||
for uid := range userIds {
|
} else {
|
||||||
if srv.inProgress.Contain(uid) {
|
logbuch.Info("successfully generated summary (%v, %v, %s)", job.From, job.To, job.UserID)
|
||||||
return errors.New("aggregation already in progress for at least of the request users")
|
if err := srv.summaryService.Insert(summary); err != nil {
|
||||||
|
config.Log().Error("failed to save summary (%v, %v, %s) - %v", summary.UserID, summary.FromTime, summary.ToTime, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
srv.inProgress = srv.inProgress.Union(userIds)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *AggregationService) unlockUsers(userIds datastructure.Set[string]) {
|
|
||||||
aggregationLock.Lock()
|
|
||||||
defer aggregationLock.Unlock()
|
|
||||||
for uid := range userIds {
|
|
||||||
srv.inProgress.Delete(uid)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateUserJobs(userId string, from time.Time, jobs chan<- *AggregationJob) {
|
func generateUserJobs(userId string, from time.Time, jobs chan<- *AggregationJob) {
|
||||||
@ -189,6 +161,26 @@ func generateUserJobs(userId string, from time.Time, jobs chan<- *AggregationJob
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (srv *AggregationService) lockUsers(userIds datastructure.Set[string]) error {
|
||||||
|
aggregationLock.Lock()
|
||||||
|
defer aggregationLock.Unlock()
|
||||||
|
for uid := range userIds {
|
||||||
|
if srv.inProgress.Contain(uid) {
|
||||||
|
return errors.New("aggregation already in progress for at least of the request users")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
srv.inProgress = srv.inProgress.Union(userIds)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *AggregationService) unlockUsers(userIds datastructure.Set[string]) {
|
||||||
|
aggregationLock.Lock()
|
||||||
|
defer aggregationLock.Unlock()
|
||||||
|
for uid := range userIds {
|
||||||
|
srv.inProgress.Delete(uid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getStartOfToday() time.Time {
|
func getStartOfToday() time.Time {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
return time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 1, now.Location())
|
return time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 1, now.Location())
|
||||||
|
@ -2,8 +2,8 @@ package services
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/emvi/logbuch"
|
"github.com/emvi/logbuch"
|
||||||
"github.com/go-co-op/gocron"
|
|
||||||
"github.com/leandro-lugaresi/hub"
|
"github.com/leandro-lugaresi/hub"
|
||||||
|
"github.com/muety/artifex"
|
||||||
"github.com/muety/wakapi/config"
|
"github.com/muety/wakapi/config"
|
||||||
"github.com/muety/wakapi/models"
|
"github.com/muety/wakapi/models"
|
||||||
"github.com/muety/wakapi/repositories"
|
"github.com/muety/wakapi/repositories"
|
||||||
@ -22,6 +22,8 @@ type LeaderboardService struct {
|
|||||||
repository repositories.ILeaderboardRepository
|
repository repositories.ILeaderboardRepository
|
||||||
summaryService ISummaryService
|
summaryService ISummaryService
|
||||||
userService IUserService
|
userService IUserService
|
||||||
|
queueDefault *artifex.Dispatcher
|
||||||
|
queueWorkers *artifex.Dispatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLeaderboardService(leaderboardRepo repositories.ILeaderboardRepository, summaryService ISummaryService, userService IUserService) *LeaderboardService {
|
func NewLeaderboardService(leaderboardRepo repositories.ILeaderboardRepository, summaryService ISummaryService, userService IUserService) *LeaderboardService {
|
||||||
@ -32,6 +34,8 @@ func NewLeaderboardService(leaderboardRepo repositories.ILeaderboardRepository,
|
|||||||
repository: leaderboardRepo,
|
repository: leaderboardRepo,
|
||||||
summaryService: summaryService,
|
summaryService: summaryService,
|
||||||
userService: userService,
|
userService: userService,
|
||||||
|
queueDefault: config.GetDefaultQueue(),
|
||||||
|
queueWorkers: config.GetQueue(config.QueueProcessing),
|
||||||
}
|
}
|
||||||
|
|
||||||
onUserUpdate := srv.eventBus.Subscribe(0, config.EventUserUpdate)
|
onUserUpdate := srv.eventBus.Subscribe(0, config.EventUserUpdate)
|
||||||
@ -48,7 +52,7 @@ func NewLeaderboardService(leaderboardRepo repositories.ILeaderboardRepository,
|
|||||||
|
|
||||||
if user.PublicLeaderboard && !exists {
|
if user.PublicLeaderboard && !exists {
|
||||||
logbuch.Info("generating leaderboard for '%s' after settings update", user.ID)
|
logbuch.Info("generating leaderboard for '%s' after settings update", user.ID)
|
||||||
srv.Run([]*models.User{user}, models.IntervalPast7Days, []uint8{models.SummaryLanguage})
|
srv.ComputeLeaderboard([]*models.User{user}, models.IntervalPast7Days, []uint8{models.SummaryLanguage})
|
||||||
} else if !user.PublicLeaderboard && exists {
|
} else if !user.PublicLeaderboard && exists {
|
||||||
logbuch.Info("clearing leaderboard for '%s' after settings update", user.ID)
|
logbuch.Info("clearing leaderboard for '%s' after settings update", user.ID)
|
||||||
if err := srv.repository.DeleteByUser(user.ID); err != nil {
|
if err := srv.repository.DeleteByUser(user.ID); err != nil {
|
||||||
@ -62,23 +66,26 @@ func NewLeaderboardService(leaderboardRepo repositories.ILeaderboardRepository,
|
|||||||
return srv
|
return srv
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *LeaderboardService) ScheduleDefault() {
|
func (srv *LeaderboardService) Schedule() {
|
||||||
runAllUsers := func(interval *models.IntervalKey, by []uint8) {
|
logbuch.Info("scheduling leaderboard generation")
|
||||||
|
|
||||||
|
generate := func() {
|
||||||
users, err := srv.userService.GetAllByLeaderboard(true)
|
users, err := srv.userService.GetAllByLeaderboard(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
config.Log().Error("failed to get users for leaderboard generation - %v", err)
|
config.Log().Error("failed to get users for leaderboard generation - %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
srv.ComputeLeaderboard(users, models.IntervalPast7Days, []uint8{models.SummaryLanguage})
|
||||||
srv.Run(users, interval, by)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s := gocron.NewScheduler(time.Local)
|
for _, cronExp := range srv.config.App.GetLeaderboardGenerationTimeCron() {
|
||||||
s.Every(1).Day().At(srv.config.App.LeaderboardGenerationTime).Do(runAllUsers, models.IntervalPast7Days, []uint8{models.SummaryLanguage})
|
if _, err := srv.queueDefault.DispatchCron(generate, cronExp); err != nil {
|
||||||
s.StartBlocking()
|
config.Log().Error("failed to schedule leaderboard generation (%s), %v", cronExp, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *LeaderboardService) Run(users []*models.User, interval *models.IntervalKey, by []uint8) error {
|
func (srv *LeaderboardService) ComputeLeaderboard(users []*models.User, interval *models.IntervalKey, by []uint8) error {
|
||||||
logbuch.Info("generating leaderboard (%s) for %d users (%d aggregations)", (*interval)[0], len(users), len(by))
|
logbuch.Info("generating leaderboard (%s) for %d users (%d aggregations)", (*interval)[0], len(users), len(by))
|
||||||
|
|
||||||
for _, user := range users {
|
for _, user := range users {
|
||||||
|
@ -2,12 +2,13 @@ package services
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/emvi/logbuch"
|
"github.com/emvi/logbuch"
|
||||||
|
"github.com/muety/artifex"
|
||||||
"github.com/muety/wakapi/config"
|
"github.com/muety/wakapi/config"
|
||||||
"runtime"
|
"github.com/muety/wakapi/utils"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-co-op/gocron"
|
|
||||||
"github.com/muety/wakapi/models"
|
"github.com/muety/wakapi/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -16,6 +17,8 @@ type MiscService struct {
|
|||||||
userService IUserService
|
userService IUserService
|
||||||
summaryService ISummaryService
|
summaryService ISummaryService
|
||||||
keyValueService IKeyValueService
|
keyValueService IKeyValueService
|
||||||
|
queueDefault *artifex.Dispatcher
|
||||||
|
queueWorkers *artifex.Dispatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMiscService(userService IUserService, summaryService ISummaryService, keyValueService IKeyValueService) *MiscService {
|
func NewMiscService(userService IUserService, summaryService ISummaryService, keyValueService IKeyValueService) *MiscService {
|
||||||
@ -24,81 +27,64 @@ func NewMiscService(userService IUserService, summaryService ISummaryService, ke
|
|||||||
userService: userService,
|
userService: userService,
|
||||||
summaryService: summaryService,
|
summaryService: summaryService,
|
||||||
keyValueService: keyValueService,
|
keyValueService: keyValueService,
|
||||||
|
queueDefault: config.GetDefaultQueue(),
|
||||||
|
queueWorkers: config.GetQueue(config.QueueProcessing),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type CountTotalTimeJob struct {
|
|
||||||
UserID string
|
|
||||||
NumJobs int
|
|
||||||
}
|
|
||||||
|
|
||||||
type CountTotalTimeResult struct {
|
|
||||||
UserId string
|
|
||||||
Total time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *MiscService) ScheduleCountTotalTime() {
|
func (srv *MiscService) ScheduleCountTotalTime() {
|
||||||
s := gocron.NewScheduler(time.Local)
|
if _, err := srv.queueDefault.DispatchEvery(srv.CountTotalTime, 1*time.Hour); err != nil {
|
||||||
s.Every(1).Hour().WaitForSchedule().Do(srv.runCountTotalTime)
|
config.Log().Error("failed to schedule user counting jobs, %v", err)
|
||||||
s.StartBlocking()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *MiscService) runCountTotalTime() error {
|
func (srv *MiscService) CountTotalTime() {
|
||||||
users, err := srv.userService.GetAll()
|
users, err := srv.userService.GetAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
logbuch.Error("failed to fetch users for time counting, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
jobs := make(chan *CountTotalTimeJob, len(users))
|
var totalTime time.Duration = 0
|
||||||
results := make(chan *CountTotalTimeResult, len(users))
|
var pendingJobs sync.WaitGroup
|
||||||
|
pendingJobs.Add(len(users))
|
||||||
|
|
||||||
for _, u := range users {
|
for _, u := range users {
|
||||||
jobs <- &CountTotalTimeJob{
|
if err := srv.queueWorkers.Dispatch(func() {
|
||||||
UserID: u.ID,
|
defer pendingJobs.Done()
|
||||||
NumJobs: len(users),
|
totalTime += srv.countUserTotalTime(u.ID)
|
||||||
|
}); err != nil {
|
||||||
|
config.Log().Error("failed to enqueue counting job for user '%s'", u.ID)
|
||||||
|
pendingJobs.Done()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(jobs)
|
|
||||||
|
|
||||||
for i := 0; i < runtime.NumCPU(); i++ {
|
|
||||||
go srv.countTotalTimeWorker(jobs, results)
|
|
||||||
}
|
|
||||||
|
|
||||||
// persist
|
// persist
|
||||||
var i int
|
go func(wg *sync.WaitGroup) {
|
||||||
var total time.Duration
|
if utils.WaitTimeout(&pendingJobs, 10*time.Minute) {
|
||||||
for i = 0; i < len(users); i++ {
|
|
||||||
result := <-results
|
|
||||||
total += result.Total
|
|
||||||
}
|
|
||||||
close(results)
|
|
||||||
|
|
||||||
if err := srv.keyValueService.PutString(&models.KeyStringValue{
|
if err := srv.keyValueService.PutString(&models.KeyStringValue{
|
||||||
Key: config.KeyLatestTotalTime,
|
Key: config.KeyLatestTotalTime,
|
||||||
Value: total.String(),
|
Value: totalTime.String(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logbuch.Error("failed to save total time count: %v", err)
|
logbuch.Error("failed to save total time count: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := srv.keyValueService.PutString(&models.KeyStringValue{
|
if err := srv.keyValueService.PutString(&models.KeyStringValue{
|
||||||
Key: config.KeyLatestTotalUsers,
|
Key: config.KeyLatestTotalUsers,
|
||||||
Value: strconv.Itoa(i),
|
Value: strconv.Itoa(len(users)),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logbuch.Error("failed to save total users count: %v", err)
|
logbuch.Error("failed to save total users count: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (srv *MiscService) countTotalTimeWorker(jobs <-chan *CountTotalTimeJob, results chan<- *CountTotalTimeResult) {
|
|
||||||
for job := range jobs {
|
|
||||||
if result, err := srv.summaryService.Aliased(time.Time{}, time.Now(), &models.User{ID: job.UserID}, srv.summaryService.Retrieve, nil, false); err != nil {
|
|
||||||
config.Log().Error("failed to count total for user %s: %v", job.UserID, err)
|
|
||||||
} else {
|
} else {
|
||||||
results <- &CountTotalTimeResult{
|
config.Log().Error("waiting for user counting jobs timed out")
|
||||||
UserId: job.UserID,
|
|
||||||
Total: result.TotalTime(),
|
|
||||||
}
|
}
|
||||||
|
}(&pendingJobs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (srv *MiscService) countUserTotalTime(userId string) time.Duration {
|
||||||
|
result, err := srv.summaryService.Aliased(time.Time{}, time.Now(), &models.User{ID: userId}, srv.summaryService.Retrieve, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
config.Log().Error("failed to count total for user %s: %v", userId, err)
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
return result.TotalTime()
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"github.com/duke-git/lancet/v2/slice"
|
"github.com/duke-git/lancet/v2/slice"
|
||||||
"github.com/emvi/logbuch"
|
"github.com/emvi/logbuch"
|
||||||
"github.com/leandro-lugaresi/hub"
|
"github.com/leandro-lugaresi/hub"
|
||||||
|
"github.com/muety/artifex"
|
||||||
"github.com/muety/wakapi/config"
|
"github.com/muety/wakapi/config"
|
||||||
"github.com/muety/wakapi/models"
|
"github.com/muety/wakapi/models"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -11,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// delay between evey report generation task (to throttle email sending frequency)
|
// delay between evey report generation task (to throttle email sending frequency)
|
||||||
const reportDelay = 15 * time.Second
|
const reportDelay = 5 * time.Second
|
||||||
|
|
||||||
// past time range to cover in the report
|
// past time range to cover in the report
|
||||||
const reportRange = 7 * 24 * time.Hour
|
const reportRange = 7 * 24 * time.Hour
|
||||||
@ -23,6 +24,8 @@ type ReportService struct {
|
|||||||
userService IUserService
|
userService IUserService
|
||||||
mailService IMailService
|
mailService IMailService
|
||||||
rand *rand.Rand
|
rand *rand.Rand
|
||||||
|
queueDefault *artifex.Dispatcher
|
||||||
|
queueWorkers *artifex.Dispatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReportService(summaryService ISummaryService, userService IUserService, mailService IMailService) *ReportService {
|
func NewReportService(summaryService ISummaryService, userService IUserService, mailService IMailService) *ReportService {
|
||||||
@ -33,15 +36,27 @@ func NewReportService(summaryService ISummaryService, userService IUserService,
|
|||||||
userService: userService,
|
userService: userService,
|
||||||
mailService: mailService,
|
mailService: mailService,
|
||||||
rand: rand.New(rand.NewSource(time.Now().Unix())),
|
rand: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
|
queueDefault: config.GetDefaultQueue(),
|
||||||
|
queueWorkers: config.GetQueue(config.QueueReports),
|
||||||
}
|
}
|
||||||
|
|
||||||
return srv
|
return srv
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *ReportService) Schedule() {
|
func (srv *ReportService) Schedule() {
|
||||||
logbuch.Info("initializing report service")
|
logbuch.Info("scheduling report generation")
|
||||||
|
|
||||||
_, err := config.GetDefaultQueue().DispatchCron(func() {
|
scheduleUserReport := func(u *models.User, index int) {
|
||||||
|
if err := srv.queueWorkers.DispatchIn(func() {
|
||||||
|
if err := srv.SendReport(u, reportRange); err != nil {
|
||||||
|
config.Log().Error("failed to generate report for '%s', %v", u.ID, err)
|
||||||
|
}
|
||||||
|
}, time.Duration(index)*reportDelay); err != nil {
|
||||||
|
config.Log().Error("failed to dispatch report generation job for user '%s', %v", u.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := srv.queueDefault.DispatchCron(func() {
|
||||||
// fetch all users with reports enabled
|
// fetch all users with reports enabled
|
||||||
users, err := srv.userService.GetAllByReports(true)
|
users, err := srv.userService.GetAllByReports(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -54,18 +69,10 @@ func (srv *ReportService) Schedule() {
|
|||||||
return u.Email != ""
|
return u.Email != ""
|
||||||
})
|
})
|
||||||
|
|
||||||
// schedule jobs, throttled by one job per 15 seconds
|
// schedule jobs, throttled by one job per x seconds
|
||||||
logbuch.Info("scheduling report generation for %d users", len(users))
|
logbuch.Info("scheduling report generation for %d users", len(users))
|
||||||
for i, u := range users {
|
for i, u := range users {
|
||||||
err := config.GetQueue(config.QueueMails).DispatchIn(func() {
|
scheduleUserReport(u, i)
|
||||||
if err := srv.SendReport(u, reportRange); err != nil {
|
|
||||||
config.Log().Error("failed to generate report for '%s', %v", u.ID, err)
|
|
||||||
}
|
|
||||||
}, time.Duration(i)*reportDelay)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
config.Log().Error("failed to dispatch report generation job for user '%s', %v", u.ID, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, srv.config.App.GetWeeklyReportCron())
|
}, srv.config.App.GetWeeklyReportCron())
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
type IAggregationService interface {
|
type IAggregationService interface {
|
||||||
Schedule()
|
Schedule()
|
||||||
Run(set datastructure.Set[string]) error
|
AggregateSummaries(set datastructure.Set[string]) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type IMiscService interface {
|
type IMiscService interface {
|
||||||
@ -97,8 +97,8 @@ type IReportService interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ILeaderboardService interface {
|
type ILeaderboardService interface {
|
||||||
ScheduleDefault()
|
Schedule()
|
||||||
Run([]*models.User, *models.IntervalKey, []uint8) error
|
ComputeLeaderboard([]*models.User, *models.IntervalKey, []uint8) error
|
||||||
ExistsAnyByUser(string) (bool, error)
|
ExistsAnyByUser(string) (bool, error)
|
||||||
CountUsers() (int64, error)
|
CountUsers() (int64, error)
|
||||||
GetByInterval(*models.IntervalKey, *models.PageParams, bool) (models.Leaderboard, error)
|
GetByInterval(*models.IntervalKey, *models.PageParams, bool) (models.Leaderboard, error)
|
||||||
|
23
utils/sync.go
Normal file
23
utils/sync.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WaitTimeout waits for the waitgroup for the specified max timeout.
|
||||||
|
// Returns true if waiting timed out.
|
||||||
|
// See // https://stackoverflow.com/a/32843750/3112139.
|
||||||
|
func WaitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
|
||||||
|
c := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(c)
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-c:
|
||||||
|
return false // completed normally
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return true // timed out
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user