Skip to content

Commit 1f031cb

Browse files
committed
subsume workerpool into the queues and create a flushable interface
1 parent 74a30fc commit 1f031cb

File tree

9 files changed

+121
-86
lines changed

9 files changed

+121
-86
lines changed

modules/queue/manager.go

Lines changed: 56 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -37,22 +37,37 @@ type ManagedQueue struct {
3737
Name string
3838
Configuration interface{}
3939
ExemplarType string
40-
Pool ManagedPool
40+
Managed interface{}
4141
counter int64
4242
PoolWorkers map[int64]*PoolWorkers
4343
}
4444

45+
// Flushable represents a pool or queue that is flushable
46+
type Flushable interface {
47+
// Flush will add a flush worker to the pool
48+
Flush(time.Duration) error
49+
// IsEmpty will return if the managed pool is empty and has no work
50+
IsEmpty() bool
51+
}
52+
4553
// ManagedPool is a simple interface to get certain details from a worker pool
4654
type ManagedPool interface {
55+
// AddWorkers adds a number of worker as group to the pool with the provided timeout. A CancelFunc is provided to cancel the group
4756
AddWorkers(number int, timeout time.Duration) context.CancelFunc
57+
// NumberOfWorkers returns the total number of workers in the pool
4858
NumberOfWorkers() int
59+
// MaxNumberOfWorkers returns the maximum number of workers the pool can dynamically grow to
4960
MaxNumberOfWorkers() int
61+
// SetMaxNumberOfWorkers sets the maximum number of workers the pool can dynamically grow to
5062
SetMaxNumberOfWorkers(int)
63+
// BoostTimeout returns the current timeout for worker groups created during a boost
5164
BoostTimeout() time.Duration
65+
// BlockTimeout returns the timeout the internal channel can block for before a boost would occur
5266
BlockTimeout() time.Duration
67+
// BoostWorkers sets the number of workers to be created during a boost
5368
BoostWorkers() int
54-
SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
55-
Flush(time.Duration) error
69+
// SetPoolSettings sets the user updatable settings for the pool
70+
SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
5671
}
5772

5873
// ManagedQueueList implements the sort.Interface
@@ -66,7 +81,7 @@ type PoolWorkers struct {
6681
Timeout time.Time
6782
HasTimeout bool
6883
Cancel context.CancelFunc
69-
IsFlush bool
84+
IsFlusher bool
7085
}
7186

7287
// PoolWorkersList implements the sort.Interface for PoolWorkers
@@ -87,26 +102,28 @@ func GetManager() *Manager {
87102
}
88103

89104
// Add adds a queue to this manager
90-
func (m *Manager) Add(name string,
105+
func (m *Manager) Add(managed interface{},
91106
t Type,
92107
configuration,
93-
exemplar interface{},
94-
pool ManagedPool) int64 {
108+
exemplar interface{}) int64 {
95109

96110
cfg, _ := json.Marshal(configuration)
97111
mq := &ManagedQueue{
98112
Type: t,
99113
Configuration: string(cfg),
100114
ExemplarType: reflect.TypeOf(exemplar).String(),
101115
PoolWorkers: make(map[int64]*PoolWorkers),
102-
Pool: pool,
116+
Managed: managed,
103117
}
104118
m.mutex.Lock()
105119
m.counter++
106120
mq.QID = m.counter
107121
mq.Name = fmt.Sprintf("queue-%d", mq.QID)
108-
if len(name) > 0 {
109-
mq.Name = name
122+
if named, ok := managed.(Named); ok {
123+
name := named.Name()
124+
if len(name) > 0 {
125+
mq.Name = name
126+
}
110127
}
111128
m.Queues[mq.QID] = mq
112129
m.mutex.Unlock()
@@ -155,7 +172,7 @@ func (q *ManagedQueue) Workers() []*PoolWorkers {
155172
}
156173

157174
// RegisterWorkers registers workers to this queue
158-
func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc, isFlush bool) int64 {
175+
func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc, isFlusher bool) int64 {
159176
q.mutex.Lock()
160177
defer q.mutex.Unlock()
161178
q.counter++
@@ -166,7 +183,7 @@ func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout b
166183
Timeout: timeout,
167184
HasTimeout: hasTimeout,
168185
Cancel: cancel,
169-
IsFlush: isFlush,
186+
IsFlusher: isFlusher,
170187
}
171188
return q.counter
172189
}
@@ -195,65 +212,74 @@ func (q *ManagedQueue) RemoveWorkers(pid int64) {
195212

196213
// AddWorkers adds workers to the queue if it has registered an add worker function
197214
func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
198-
if q.Pool != nil {
215+
if pool, ok := q.Managed.(ManagedPool); ok {
199216
// the cancel will be added to the pool workers description above
200-
return q.Pool.AddWorkers(number, timeout)
217+
return pool.AddWorkers(number, timeout)
201218
}
202219
return nil
203220
}
204221

205222
// Flush flushes the queue with a timeout
206223
func (q *ManagedQueue) Flush(timeout time.Duration) error {
207-
if q.Pool != nil {
208-
return q.Pool.Flush(timeout)
224+
if flushable, ok := q.Managed.(Flushable); ok {
225+
// the cancel will be added to the pool workers description above
226+
return flushable.Flush(timeout)
209227
}
210228
return nil
211229
}
212230

231+
// IsEmpty returns if the queue is empty
232+
func (q *ManagedQueue) IsEmpty() bool {
233+
if flushable, ok := q.Managed.(Flushable); ok {
234+
return flushable.IsEmpty()
235+
}
236+
return true
237+
}
238+
213239
// NumberOfWorkers returns the number of workers in the queue
214240
func (q *ManagedQueue) NumberOfWorkers() int {
215-
if q.Pool != nil {
216-
return q.Pool.NumberOfWorkers()
241+
if pool, ok := q.Managed.(ManagedPool); ok {
242+
return pool.NumberOfWorkers()
217243
}
218244
return -1
219245
}
220246

221247
// MaxNumberOfWorkers returns the maximum number of workers for the pool
222248
func (q *ManagedQueue) MaxNumberOfWorkers() int {
223-
if q.Pool != nil {
224-
return q.Pool.MaxNumberOfWorkers()
249+
if pool, ok := q.Managed.(ManagedPool); ok {
250+
return pool.MaxNumberOfWorkers()
225251
}
226252
return 0
227253
}
228254

229255
// BoostWorkers returns the number of workers for a boost
230256
func (q *ManagedQueue) BoostWorkers() int {
231-
if q.Pool != nil {
232-
return q.Pool.BoostWorkers()
257+
if pool, ok := q.Managed.(ManagedPool); ok {
258+
return pool.BoostWorkers()
233259
}
234260
return -1
235261
}
236262

237263
// BoostTimeout returns the timeout of the next boost
238264
func (q *ManagedQueue) BoostTimeout() time.Duration {
239-
if q.Pool != nil {
240-
return q.Pool.BoostTimeout()
265+
if pool, ok := q.Managed.(ManagedPool); ok {
266+
return pool.BoostTimeout()
241267
}
242268
return 0
243269
}
244270

245271
// BlockTimeout returns the timeout til the next boost
246272
func (q *ManagedQueue) BlockTimeout() time.Duration {
247-
if q.Pool != nil {
248-
return q.Pool.BlockTimeout()
273+
if pool, ok := q.Managed.(ManagedPool); ok {
274+
return pool.BlockTimeout()
249275
}
250276
return 0
251277
}
252278

253-
// SetSettings sets the setable boost values
254-
func (q *ManagedQueue) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
255-
if q.Pool != nil {
256-
q.Pool.SetSettings(maxNumberOfWorkers, boostWorkers, timeout)
279+
// SetPoolSettings sets the setable boost values
280+
func (q *ManagedQueue) SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
281+
if pool, ok := q.Managed.(ManagedPool); ok {
282+
pool.SetPoolSettings(maxNumberOfWorkers, boostWorkers, timeout)
257283
}
258284
}
259285

modules/queue/queue_channel.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro
4848
workers: config.Workers,
4949
name: config.Name,
5050
}
51-
queue.qid = GetManager().Add(config.Name, ChannelQueueType, config, exemplar, queue)
51+
queue.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar)
5252
return queue, nil
5353
}
5454

modules/queue/queue_disk.go

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"encoding/json"
1010
"fmt"
1111
"sync"
12+
"sync/atomic"
1213
"time"
1314

1415
"code.gitea.io/gitea/modules/log"
@@ -29,7 +30,7 @@ type LevelQueueConfiguration struct {
2930

3031
// LevelQueue implements a disk library queue
3132
type LevelQueue struct {
32-
pool *WorkerPool
33+
*WorkerPool
3334
queue *levelqueue.Queue
3435
closed chan struct{}
3536
terminated chan struct{}
@@ -53,15 +54,15 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
5354
}
5455

5556
queue := &LevelQueue{
56-
pool: NewWorkerPool(handle, config.WorkerPoolConfiguration),
57+
WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration),
5758
queue: internal,
5859
exemplar: exemplar,
5960
closed: make(chan struct{}),
6061
terminated: make(chan struct{}),
6162
workers: config.Workers,
6263
name: config.Name,
6364
}
64-
queue.pool.qid = GetManager().Add(config.Name, LevelQueueType, config, exemplar, queue.pool)
65+
queue.qid = GetManager().Add(queue, LevelQueueType, config, exemplar)
6566
return queue, nil
6667
}
6768

@@ -72,7 +73,7 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func()))
7273
log.Debug("LevelQueue: %s Starting", l.name)
7374

7475
go func() {
75-
_ = l.pool.AddWorkers(l.workers, 0)
76+
_ = l.AddWorkers(l.workers, 0)
7677
}()
7778

7879
go l.readToChan()
@@ -81,12 +82,12 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func()))
8182
<-l.closed
8283

8384
log.Trace("LevelQueue: %s Waiting til done", l.name)
84-
l.pool.Wait()
85+
l.Wait()
8586

8687
log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
8788
ctx, cancel := context.WithCancel(context.Background())
8889
atTerminate(ctx, cancel)
89-
l.pool.CleanUp(ctx)
90+
l.CleanUp(ctx)
9091
cancel()
9192
log.Trace("LevelQueue: %s Cleaned", l.name)
9293

@@ -97,33 +98,37 @@ func (l *LevelQueue) readToChan() {
9798
select {
9899
case <-l.closed:
99100
// tell the pool to shutdown.
100-
l.pool.cancel()
101+
l.cancel()
101102
return
102103
default:
104+
atomic.AddInt64(&l.numInQueue, 1)
103105
bs, err := l.queue.RPop()
104106
if err != nil {
105107
if err != levelqueue.ErrNotFound {
106108
log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
107109
}
110+
atomic.AddInt64(&l.numInQueue, -1)
108111
time.Sleep(time.Millisecond * 100)
109112
continue
110113
}
111114

112115
if len(bs) == 0 {
116+
atomic.AddInt64(&l.numInQueue, -1)
113117
time.Sleep(time.Millisecond * 100)
114118
continue
115119
}
116120

117121
data, err := unmarshalAs(bs, l.exemplar)
118122
if err != nil {
119123
log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err)
124+
atomic.AddInt64(&l.numInQueue, -1)
120125
time.Sleep(time.Millisecond * 100)
121126
continue
122127
}
123128

124129
log.Trace("LevelQueue %s: Task found: %#v", l.name, data)
125-
l.pool.Push(data)
126-
130+
l.WorkerPool.Push(data)
131+
atomic.AddInt64(&l.numInQueue, -1)
127132
}
128133
}
129134
}
@@ -140,14 +145,9 @@ func (l *LevelQueue) Push(data Data) error {
140145
return l.queue.LPush(bs)
141146
}
142147

143-
// Flush flushes the queue and blocks till the queue is empty
144-
func (l *LevelQueue) Flush(timeout time.Duration) error {
145-
return l.pool.Flush(timeout)
146-
}
147-
148148
// IsEmpty checks whether the queue is empty
149149
func (l *LevelQueue) IsEmpty() bool {
150-
if !l.pool.IsEmpty() {
150+
if !l.WorkerPool.IsEmpty() {
151151
return false
152152
}
153153
return l.queue.Len() == 0
@@ -177,6 +177,9 @@ func (l *LevelQueue) Terminate() {
177177
default:
178178
close(l.terminated)
179179
l.lock.Unlock()
180+
if log.IsDebug() {
181+
log.Debug("LevelQueue: %s Closing with %d tasks left in queue", l.name, l.queue.Len())
182+
}
180183
if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
181184
log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
182185
}

0 commit comments

Comments
 (0)