mirror of
				https://gitee.com/gitea/gitea
				synced 2025-11-04 16:40:24 +08:00 
			
		
		
		
	Queue: Make WorkerPools and Queues flushable (#10001)
* Make WorkerPools and Queues flushable Adds Flush methods to Queues and the WorkerPool Further abstracts the WorkerPool Adds a final step to Flush the queues in the defer from PrintCurrentTest Fixes an issue with Settings inheritance in queues Signed-off-by: Andrew Thornton <art27@cantab.net> * Change to for loop * Add IsEmpty and begin just making the queues composed WorkerPools * subsume workerpool into the queues and create a flushable interface * Add manager command * Move flushall to queue.Manager and add to testlogger * As per @guillep2k * as per @guillep2k * Just make queues all implement flushable and clean up the wrapped queue flushes * cope with no timeout Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
		
							
								
								
									
										92
									
								
								cmd/manager.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								cmd/manager.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,92 @@
 | 
				
			|||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package cmd
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"net/http"
 | 
				
			||||||
 | 
						"os"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/private"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"github.com/urfave/cli"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					var (
 | 
				
			||||||
 | 
						// CmdManager represents the manager command
 | 
				
			||||||
 | 
						CmdManager = cli.Command{
 | 
				
			||||||
 | 
							Name:        "manager",
 | 
				
			||||||
 | 
							Usage:       "Manage the running gitea process",
 | 
				
			||||||
 | 
							Description: "This is a command for managing the running gitea process",
 | 
				
			||||||
 | 
							Subcommands: []cli.Command{
 | 
				
			||||||
 | 
								subcmdShutdown,
 | 
				
			||||||
 | 
								subcmdRestart,
 | 
				
			||||||
 | 
								subcmdFlushQueues,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						subcmdShutdown = cli.Command{
 | 
				
			||||||
 | 
							Name:   "shutdown",
 | 
				
			||||||
 | 
							Usage:  "Gracefully shutdown the running process",
 | 
				
			||||||
 | 
							Action: runShutdown,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						subcmdRestart = cli.Command{
 | 
				
			||||||
 | 
							Name:   "restart",
 | 
				
			||||||
 | 
							Usage:  "Gracefully restart the running process - (not implemented for windows servers)",
 | 
				
			||||||
 | 
							Action: runRestart,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						subcmdFlushQueues = cli.Command{
 | 
				
			||||||
 | 
							Name:   "flush-queues",
 | 
				
			||||||
 | 
							Usage:  "Flush queues in the running process",
 | 
				
			||||||
 | 
							Action: runFlushQueues,
 | 
				
			||||||
 | 
							Flags: []cli.Flag{
 | 
				
			||||||
 | 
								cli.DurationFlag{
 | 
				
			||||||
 | 
									Name:  "timeout",
 | 
				
			||||||
 | 
									Value: 60 * time.Second,
 | 
				
			||||||
 | 
									Usage: "Timeout for the flushing process",
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								cli.BoolFlag{
 | 
				
			||||||
 | 
									Name:  "non-blocking",
 | 
				
			||||||
 | 
									Usage: "Set to true to not wait for flush to complete before returning",
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func runShutdown(c *cli.Context) error {
 | 
				
			||||||
 | 
						setup("manager", false)
 | 
				
			||||||
 | 
						statusCode, msg := private.Shutdown()
 | 
				
			||||||
 | 
						switch statusCode {
 | 
				
			||||||
 | 
						case http.StatusInternalServerError:
 | 
				
			||||||
 | 
							fail("InternalServerError", msg)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						fmt.Fprintln(os.Stdout, msg)
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func runRestart(c *cli.Context) error {
 | 
				
			||||||
 | 
						setup("manager", false)
 | 
				
			||||||
 | 
						statusCode, msg := private.Restart()
 | 
				
			||||||
 | 
						switch statusCode {
 | 
				
			||||||
 | 
						case http.StatusInternalServerError:
 | 
				
			||||||
 | 
							fail("InternalServerError", msg)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						fmt.Fprintln(os.Stdout, msg)
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func runFlushQueues(c *cli.Context) error {
 | 
				
			||||||
 | 
						setup("manager", false)
 | 
				
			||||||
 | 
						statusCode, msg := private.FlushQueues(c.Duration("timeout"), c.Bool("non-blocking"))
 | 
				
			||||||
 | 
						switch statusCode {
 | 
				
			||||||
 | 
						case http.StatusInternalServerError:
 | 
				
			||||||
 | 
							fail("InternalServerError", msg)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						fmt.Fprintln(os.Stdout, msg)
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -5,6 +5,7 @@
 | 
				
			|||||||
package integrations
 | 
					package integrations
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"os"
 | 
						"os"
 | 
				
			||||||
@@ -12,8 +13,10 @@ import (
 | 
				
			|||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/queue"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var prefix string
 | 
					var prefix string
 | 
				
			||||||
@@ -98,6 +101,9 @@ func PrintCurrentTest(t testing.TB, skip ...int) func() {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	writerCloser.setT(&t)
 | 
						writerCloser.setT(&t)
 | 
				
			||||||
	return func() {
 | 
						return func() {
 | 
				
			||||||
 | 
							if err := queue.GetManager().FlushAll(context.Background(), 20*time.Second); err != nil {
 | 
				
			||||||
 | 
								t.Errorf("Flushing queues failed with error %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		_ = writerCloser.Close()
 | 
							_ = writerCloser.Close()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										1
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								main.go
									
									
									
									
									
								
							@@ -69,6 +69,7 @@ arguments - which can alternatively be run by running the subcommand web.`
 | 
				
			|||||||
		cmd.CmdKeys,
 | 
							cmd.CmdKeys,
 | 
				
			||||||
		cmd.CmdConvert,
 | 
							cmd.CmdConvert,
 | 
				
			||||||
		cmd.CmdDoctor,
 | 
							cmd.CmdDoctor,
 | 
				
			||||||
 | 
							cmd.CmdManager,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Now adjust these commands to add our global configuration options
 | 
						// Now adjust these commands to add our global configuration options
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -110,28 +110,19 @@ func (g *Manager) handleSignals(ctx context.Context) {
 | 
				
			|||||||
		case sig := <-signalChannel:
 | 
							case sig := <-signalChannel:
 | 
				
			||||||
			switch sig {
 | 
								switch sig {
 | 
				
			||||||
			case syscall.SIGHUP:
 | 
								case syscall.SIGHUP:
 | 
				
			||||||
				if setting.GracefulRestartable {
 | 
									log.Info("PID: %d. Received SIGHUP. Attempting GracefulShutdown...", pid)
 | 
				
			||||||
					log.Info("PID: %d. Received SIGHUP. Forking...", pid)
 | 
									g.DoGracefulShutdown()
 | 
				
			||||||
					err := g.doFork()
 | 
					 | 
				
			||||||
					if err != nil && err.Error() != "another process already forked. Ignoring this one" {
 | 
					 | 
				
			||||||
						log.Error("Error whilst forking from PID: %d : %v", pid, err)
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
				} else {
 | 
					 | 
				
			||||||
					log.Info("PID: %d. Received SIGHUP. Not set restartable. Shutting down...", pid)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
					g.doShutdown()
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			case syscall.SIGUSR1:
 | 
								case syscall.SIGUSR1:
 | 
				
			||||||
				log.Info("PID %d. Received SIGUSR1.", pid)
 | 
									log.Info("PID %d. Received SIGUSR1.", pid)
 | 
				
			||||||
			case syscall.SIGUSR2:
 | 
								case syscall.SIGUSR2:
 | 
				
			||||||
				log.Warn("PID %d. Received SIGUSR2. Hammering...", pid)
 | 
									log.Warn("PID %d. Received SIGUSR2. Hammering...", pid)
 | 
				
			||||||
				g.doHammerTime(0 * time.Second)
 | 
									g.DoImmediateHammer()
 | 
				
			||||||
			case syscall.SIGINT:
 | 
								case syscall.SIGINT:
 | 
				
			||||||
				log.Warn("PID %d. Received SIGINT. Shutting down...", pid)
 | 
									log.Warn("PID %d. Received SIGINT. Shutting down...", pid)
 | 
				
			||||||
				g.doShutdown()
 | 
									g.DoGracefulShutdown()
 | 
				
			||||||
			case syscall.SIGTERM:
 | 
								case syscall.SIGTERM:
 | 
				
			||||||
				log.Warn("PID %d. Received SIGTERM. Shutting down...", pid)
 | 
									log.Warn("PID %d. Received SIGTERM. Shutting down...", pid)
 | 
				
			||||||
				g.doShutdown()
 | 
									g.DoGracefulShutdown()
 | 
				
			||||||
			case syscall.SIGTSTP:
 | 
								case syscall.SIGTSTP:
 | 
				
			||||||
				log.Info("PID %d. Received SIGTSTP.", pid)
 | 
									log.Info("PID %d. Received SIGTSTP.", pid)
 | 
				
			||||||
			default:
 | 
								default:
 | 
				
			||||||
@@ -139,7 +130,7 @@ func (g *Manager) handleSignals(ctx context.Context) {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		case <-ctx.Done():
 | 
							case <-ctx.Done():
 | 
				
			||||||
			log.Warn("PID: %d. Background context for manager closed - %v - Shutting down...", pid, ctx.Err())
 | 
								log.Warn("PID: %d. Background context for manager closed - %v - Shutting down...", pid, ctx.Err())
 | 
				
			||||||
			g.doShutdown()
 | 
								g.DoGracefulShutdown()
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -160,6 +151,31 @@ func (g *Manager) doFork() error {
 | 
				
			|||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DoGracefulRestart causes a graceful restart
 | 
				
			||||||
 | 
					func (g *Manager) DoGracefulRestart() {
 | 
				
			||||||
 | 
						if setting.GracefulRestartable {
 | 
				
			||||||
 | 
							log.Info("PID: %d. Forking...", os.Getpid())
 | 
				
			||||||
 | 
							err := g.doFork()
 | 
				
			||||||
 | 
							if err != nil && err.Error() != "another process already forked. Ignoring this one" {
 | 
				
			||||||
 | 
								log.Error("Error whilst forking from PID: %d : %v", os.Getpid(), err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							log.Info("PID: %d. Not set restartable. Shutting down...", os.Getpid())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							g.doShutdown()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DoImmediateHammer causes an immediate hammer
 | 
				
			||||||
 | 
					func (g *Manager) DoImmediateHammer() {
 | 
				
			||||||
 | 
						g.doHammerTime(0 * time.Second)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DoGracefulShutdown causes a graceful shutdown
 | 
				
			||||||
 | 
					func (g *Manager) DoGracefulShutdown() {
 | 
				
			||||||
 | 
						g.doShutdown()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die.
 | 
					// RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die.
 | 
				
			||||||
// Any call to RegisterServer must be matched by a call to ServerDone
 | 
					// Any call to RegisterServer must be matched by a call to ServerDone
 | 
				
			||||||
func (g *Manager) RegisterServer() {
 | 
					func (g *Manager) RegisterServer() {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -43,6 +43,7 @@ type Manager struct {
 | 
				
			|||||||
	runningServerWaitGroup sync.WaitGroup
 | 
						runningServerWaitGroup sync.WaitGroup
 | 
				
			||||||
	createServerWaitGroup  sync.WaitGroup
 | 
						createServerWaitGroup  sync.WaitGroup
 | 
				
			||||||
	terminateWaitGroup     sync.WaitGroup
 | 
						terminateWaitGroup     sync.WaitGroup
 | 
				
			||||||
 | 
						shutdownRequested	   chan struct{}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newGracefulManager(ctx context.Context) *Manager {
 | 
					func newGracefulManager(ctx context.Context) *Manager {
 | 
				
			||||||
@@ -62,6 +63,7 @@ func (g *Manager) start() {
 | 
				
			|||||||
	g.shutdown = make(chan struct{})
 | 
						g.shutdown = make(chan struct{})
 | 
				
			||||||
	g.hammer = make(chan struct{})
 | 
						g.hammer = make(chan struct{})
 | 
				
			||||||
	g.done = make(chan struct{})
 | 
						g.done = make(chan struct{})
 | 
				
			||||||
 | 
						g.shutdownRequested = make(chan struct{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Set the running state
 | 
						// Set the running state
 | 
				
			||||||
	g.setState(stateRunning)
 | 
						g.setState(stateRunning)
 | 
				
			||||||
@@ -107,7 +109,10 @@ loop:
 | 
				
			|||||||
	for {
 | 
						for {
 | 
				
			||||||
		select {
 | 
							select {
 | 
				
			||||||
		case <-g.ctx.Done():
 | 
							case <-g.ctx.Done():
 | 
				
			||||||
			g.doShutdown()
 | 
								g.DoGracefulShutdown()
 | 
				
			||||||
 | 
								waitTime += setting.GracefulHammerTime
 | 
				
			||||||
 | 
								break loop
 | 
				
			||||||
 | 
							case <-g.shutdownRequested:
 | 
				
			||||||
			waitTime += setting.GracefulHammerTime
 | 
								waitTime += setting.GracefulHammerTime
 | 
				
			||||||
			break loop
 | 
								break loop
 | 
				
			||||||
		case change := <-changes:
 | 
							case change := <-changes:
 | 
				
			||||||
@@ -115,12 +120,12 @@ loop:
 | 
				
			|||||||
			case svc.Interrogate:
 | 
								case svc.Interrogate:
 | 
				
			||||||
				status <- change.CurrentStatus
 | 
									status <- change.CurrentStatus
 | 
				
			||||||
			case svc.Stop, svc.Shutdown:
 | 
								case svc.Stop, svc.Shutdown:
 | 
				
			||||||
				g.doShutdown()
 | 
									g.DoGracefulShutdown()
 | 
				
			||||||
				waitTime += setting.GracefulHammerTime
 | 
									waitTime += setting.GracefulHammerTime
 | 
				
			||||||
				break loop
 | 
									break loop
 | 
				
			||||||
			case hammerCode:
 | 
								case hammerCode:
 | 
				
			||||||
				g.doShutdown()
 | 
									g.DoGracefulShutdown()
 | 
				
			||||||
				g.doHammerTime(0 * time.Second)
 | 
									g.DoImmediateHammer()
 | 
				
			||||||
				break loop
 | 
									break loop
 | 
				
			||||||
			default:
 | 
								default:
 | 
				
			||||||
				log.Debug("Unexpected control request: %v", change.Cmd)
 | 
									log.Debug("Unexpected control request: %v", change.Cmd)
 | 
				
			||||||
@@ -140,7 +145,7 @@ hammerLoop:
 | 
				
			|||||||
			case svc.Interrogate:
 | 
								case svc.Interrogate:
 | 
				
			||||||
				status <- change.CurrentStatus
 | 
									status <- change.CurrentStatus
 | 
				
			||||||
			case svc.Stop, svc.Shutdown, hammerCmd:
 | 
								case svc.Stop, svc.Shutdown, hammerCmd:
 | 
				
			||||||
				g.doHammerTime(0 * time.Second)
 | 
									g.DoImmediateHammer()
 | 
				
			||||||
				break hammerLoop
 | 
									break hammerLoop
 | 
				
			||||||
			default:
 | 
								default:
 | 
				
			||||||
				log.Debug("Unexpected control request: %v", change.Cmd)
 | 
									log.Debug("Unexpected control request: %v", change.Cmd)
 | 
				
			||||||
@@ -152,6 +157,24 @@ hammerLoop:
 | 
				
			|||||||
	return false, 0
 | 
						return false, 0
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DoImmediateHammer causes an immediate hammer
 | 
				
			||||||
 | 
					func (g *Manager) DoImmediateHammer() {
 | 
				
			||||||
 | 
						g.doHammerTime(0 * time.Second)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DoGracefulShutdown causes a graceful shutdown
 | 
				
			||||||
 | 
					func (g *Manager) DoGracefulShutdown() {
 | 
				
			||||||
 | 
						g.lock.Lock()
 | 
				
			||||||
 | 
						select {
 | 
				
			||||||
 | 
						case <-g.shutdownRequested:
 | 
				
			||||||
 | 
							g.lock.Unlock()
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
							close(g.shutdownRequested)
 | 
				
			||||||
 | 
							g.lock.Unlock()
 | 
				
			||||||
 | 
							g.doShutdown()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// RegisterServer registers the running of a listening server.
 | 
					// RegisterServer registers the running of a listening server.
 | 
				
			||||||
// Any call to RegisterServer must be matched by a call to ServerDone
 | 
					// Any call to RegisterServer must be matched by a call to ServerDone
 | 
				
			||||||
func (g *Manager) RegisterServer() {
 | 
					func (g *Manager) RegisterServer() {
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										83
									
								
								modules/private/manager.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								modules/private/manager.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,83 @@
 | 
				
			|||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package private
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"encoding/json"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"net/http"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/setting"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Shutdown calls the internal shutdown function
 | 
				
			||||||
 | 
					func Shutdown() (int, string) {
 | 
				
			||||||
 | 
						reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/shutdown")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						req := newInternalRequest(reqURL, "POST")
 | 
				
			||||||
 | 
						resp, err := req.Response()
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return http.StatusInternalServerError, fmt.Sprintf("Unable to contact gitea: %v", err.Error())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer resp.Body.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if resp.StatusCode != http.StatusOK {
 | 
				
			||||||
 | 
							return resp.StatusCode, decodeJSONError(resp).Err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return http.StatusOK, "Shutting down"
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Restart calls the internal restart function
 | 
				
			||||||
 | 
					func Restart() (int, string) {
 | 
				
			||||||
 | 
						reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/restart")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						req := newInternalRequest(reqURL, "POST")
 | 
				
			||||||
 | 
						resp, err := req.Response()
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return http.StatusInternalServerError, fmt.Sprintf("Unable to contact gitea: %v", err.Error())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer resp.Body.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if resp.StatusCode != http.StatusOK {
 | 
				
			||||||
 | 
							return resp.StatusCode, decodeJSONError(resp).Err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return http.StatusOK, "Restarting"
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushOptions represents the options for the flush call
 | 
				
			||||||
 | 
					type FlushOptions struct {
 | 
				
			||||||
 | 
						Timeout     time.Duration
 | 
				
			||||||
 | 
						NonBlocking bool
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushQueues calls the internal flush-queues function
 | 
				
			||||||
 | 
					func FlushQueues(timeout time.Duration, nonBlocking bool) (int, string) {
 | 
				
			||||||
 | 
						reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/flush-queues")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						req := newInternalRequest(reqURL, "POST")
 | 
				
			||||||
 | 
						if timeout > 0 {
 | 
				
			||||||
 | 
							req.SetTimeout(timeout+10*time.Second, timeout+10*time.Second)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						req = req.Header("Content-Type", "application/json")
 | 
				
			||||||
 | 
						jsonBytes, _ := json.Marshal(FlushOptions{
 | 
				
			||||||
 | 
							Timeout:     timeout,
 | 
				
			||||||
 | 
							NonBlocking: nonBlocking,
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
						req.Body(jsonBytes)
 | 
				
			||||||
 | 
						resp, err := req.Response()
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return http.StatusInternalServerError, fmt.Sprintf("Unable to contact gitea: %v", err.Error())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer resp.Body.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if resp.StatusCode != http.StatusOK {
 | 
				
			||||||
 | 
							return resp.StatusCode, decodeJSONError(resp).Err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return http.StatusOK, "Flushed"
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										63
									
								
								modules/queue/helper.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								modules/queue/helper.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,63 @@
 | 
				
			|||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package queue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"encoding/json"
 | 
				
			||||||
 | 
						"reflect"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// toConfig will attempt to convert a given configuration cfg into the provided exemplar type.
 | 
				
			||||||
 | 
					//
 | 
				
			||||||
 | 
					// It will tolerate the cfg being passed as a []byte or string of a json representation of the
 | 
				
			||||||
 | 
					// exemplar or the correct type of the exemplar itself
 | 
				
			||||||
 | 
					func toConfig(exemplar, cfg interface{}) (interface{}, error) {
 | 
				
			||||||
 | 
						if reflect.TypeOf(cfg).AssignableTo(reflect.TypeOf(exemplar)) {
 | 
				
			||||||
 | 
							return cfg, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						configBytes, ok := cfg.([]byte)
 | 
				
			||||||
 | 
						if !ok {
 | 
				
			||||||
 | 
							configStr, ok := cfg.(string)
 | 
				
			||||||
 | 
							if !ok {
 | 
				
			||||||
 | 
								return nil, ErrInvalidConfiguration{cfg: cfg}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							configBytes = []byte(configStr)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						newVal := reflect.New(reflect.TypeOf(exemplar))
 | 
				
			||||||
 | 
						if err := json.Unmarshal(configBytes, newVal.Interface()); err != nil {
 | 
				
			||||||
 | 
							return nil, ErrInvalidConfiguration{cfg: cfg, err: err}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return newVal.Elem().Interface(), nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// unmarshalAs will attempt to unmarshal provided bytes as the provided exemplar
 | 
				
			||||||
 | 
					func unmarshalAs(bs []byte, exemplar interface{}) (data Data, err error) {
 | 
				
			||||||
 | 
						if exemplar != nil {
 | 
				
			||||||
 | 
							t := reflect.TypeOf(exemplar)
 | 
				
			||||||
 | 
							n := reflect.New(t)
 | 
				
			||||||
 | 
							ne := n.Elem()
 | 
				
			||||||
 | 
							err = json.Unmarshal(bs, ne.Addr().Interface())
 | 
				
			||||||
 | 
							data = ne.Interface().(Data)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							err = json.Unmarshal(bs, &data)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// assignableTo will check if provided data is assignable to the same type as the exemplar
 | 
				
			||||||
 | 
					// if the provided exemplar is nil then it will always return true
 | 
				
			||||||
 | 
					func assignableTo(data Data, exemplar interface{}) bool {
 | 
				
			||||||
 | 
						if exemplar == nil {
 | 
				
			||||||
 | 
							return true
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Assert data is of same type as exemplar
 | 
				
			||||||
 | 
						t := reflect.TypeOf(data)
 | 
				
			||||||
 | 
						exemplarType := reflect.TypeOf(exemplar)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return t.AssignableTo(exemplarType) && data != nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -26,36 +26,57 @@ type Manager struct {
 | 
				
			|||||||
	Queues  map[int64]*ManagedQueue
 | 
						Queues  map[int64]*ManagedQueue
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ManagedQueue represents a working queue inheriting from Gitea.
 | 
					// ManagedQueue represents a working queue with a Pool of workers.
 | 
				
			||||||
 | 
					//
 | 
				
			||||||
 | 
					// Although a ManagedQueue should really represent a Queue this does not
 | 
				
			||||||
 | 
					// necessarily have to be the case. This could be used to describe any queue.WorkerPool.
 | 
				
			||||||
type ManagedQueue struct {
 | 
					type ManagedQueue struct {
 | 
				
			||||||
	mutex         sync.Mutex
 | 
						mutex         sync.Mutex
 | 
				
			||||||
	QID           int64
 | 
						QID           int64
 | 
				
			||||||
	Queue         Queue
 | 
					 | 
				
			||||||
	Type          Type
 | 
						Type          Type
 | 
				
			||||||
	Name          string
 | 
						Name          string
 | 
				
			||||||
	Configuration interface{}
 | 
						Configuration interface{}
 | 
				
			||||||
	ExemplarType  string
 | 
						ExemplarType  string
 | 
				
			||||||
	Pool          ManagedPool
 | 
						Managed       interface{}
 | 
				
			||||||
	counter       int64
 | 
						counter       int64
 | 
				
			||||||
	PoolWorkers   map[int64]*PoolWorkers
 | 
						PoolWorkers   map[int64]*PoolWorkers
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flushable represents a pool or queue that is flushable
 | 
				
			||||||
 | 
					type Flushable interface {
 | 
				
			||||||
 | 
						// Flush will add a flush worker to the pool - the worker should be autoregistered with the manager
 | 
				
			||||||
 | 
						Flush(time.Duration) error
 | 
				
			||||||
 | 
						// FlushWithContext is very similar to Flush
 | 
				
			||||||
 | 
						// NB: The worker will not be registered with the manager.
 | 
				
			||||||
 | 
						FlushWithContext(ctx context.Context) error
 | 
				
			||||||
 | 
						// IsEmpty will return if the managed pool is empty and has no work
 | 
				
			||||||
 | 
						IsEmpty() bool
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ManagedPool is a simple interface to get certain details from a worker pool
 | 
					// ManagedPool is a simple interface to get certain details from a worker pool
 | 
				
			||||||
type ManagedPool interface {
 | 
					type ManagedPool interface {
 | 
				
			||||||
 | 
						// AddWorkers adds a number of worker as group to the pool with the provided timeout. A CancelFunc is provided to cancel the group
 | 
				
			||||||
	AddWorkers(number int, timeout time.Duration) context.CancelFunc
 | 
						AddWorkers(number int, timeout time.Duration) context.CancelFunc
 | 
				
			||||||
 | 
						// NumberOfWorkers returns the total number of workers in the pool
 | 
				
			||||||
	NumberOfWorkers() int
 | 
						NumberOfWorkers() int
 | 
				
			||||||
 | 
						// MaxNumberOfWorkers returns the maximum number of workers the pool can dynamically grow to
 | 
				
			||||||
	MaxNumberOfWorkers() int
 | 
						MaxNumberOfWorkers() int
 | 
				
			||||||
 | 
						// SetMaxNumberOfWorkers sets the maximum number of workers the pool can dynamically grow to
 | 
				
			||||||
	SetMaxNumberOfWorkers(int)
 | 
						SetMaxNumberOfWorkers(int)
 | 
				
			||||||
 | 
						// BoostTimeout returns the current timeout for worker groups created during a boost
 | 
				
			||||||
	BoostTimeout() time.Duration
 | 
						BoostTimeout() time.Duration
 | 
				
			||||||
 | 
						// BlockTimeout returns the timeout the internal channel can block for before a boost would occur
 | 
				
			||||||
	BlockTimeout() time.Duration
 | 
						BlockTimeout() time.Duration
 | 
				
			||||||
 | 
						// BoostWorkers sets the number of workers to be created during a boost
 | 
				
			||||||
	BoostWorkers() int
 | 
						BoostWorkers() int
 | 
				
			||||||
	SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
 | 
						// SetPoolSettings sets the user updatable settings for the pool
 | 
				
			||||||
 | 
						SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ManagedQueueList implements the sort.Interface
 | 
					// ManagedQueueList implements the sort.Interface
 | 
				
			||||||
type ManagedQueueList []*ManagedQueue
 | 
					type ManagedQueueList []*ManagedQueue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PoolWorkers represents a working queue inheriting from Gitea.
 | 
					// PoolWorkers represents a group of workers working on a queue
 | 
				
			||||||
type PoolWorkers struct {
 | 
					type PoolWorkers struct {
 | 
				
			||||||
	PID        int64
 | 
						PID        int64
 | 
				
			||||||
	Workers    int
 | 
						Workers    int
 | 
				
			||||||
@@ -63,9 +84,10 @@ type PoolWorkers struct {
 | 
				
			|||||||
	Timeout    time.Time
 | 
						Timeout    time.Time
 | 
				
			||||||
	HasTimeout bool
 | 
						HasTimeout bool
 | 
				
			||||||
	Cancel     context.CancelFunc
 | 
						Cancel     context.CancelFunc
 | 
				
			||||||
 | 
						IsFlusher  bool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PoolWorkersList implements the sort.Interface
 | 
					// PoolWorkersList implements the sort.Interface for PoolWorkers
 | 
				
			||||||
type PoolWorkersList []*PoolWorkers
 | 
					type PoolWorkersList []*PoolWorkers
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func init() {
 | 
					func init() {
 | 
				
			||||||
@@ -83,27 +105,28 @@ func GetManager() *Manager {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Add adds a queue to this manager
 | 
					// Add adds a queue to this manager
 | 
				
			||||||
func (m *Manager) Add(queue Queue,
 | 
					func (m *Manager) Add(managed interface{},
 | 
				
			||||||
	t Type,
 | 
						t Type,
 | 
				
			||||||
	configuration,
 | 
						configuration,
 | 
				
			||||||
	exemplar interface{},
 | 
						exemplar interface{}) int64 {
 | 
				
			||||||
	pool ManagedPool) int64 {
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cfg, _ := json.Marshal(configuration)
 | 
						cfg, _ := json.Marshal(configuration)
 | 
				
			||||||
	mq := &ManagedQueue{
 | 
						mq := &ManagedQueue{
 | 
				
			||||||
		Queue:         queue,
 | 
					 | 
				
			||||||
		Type:          t,
 | 
							Type:          t,
 | 
				
			||||||
		Configuration: string(cfg),
 | 
							Configuration: string(cfg),
 | 
				
			||||||
		ExemplarType:  reflect.TypeOf(exemplar).String(),
 | 
							ExemplarType:  reflect.TypeOf(exemplar).String(),
 | 
				
			||||||
		PoolWorkers:   make(map[int64]*PoolWorkers),
 | 
							PoolWorkers:   make(map[int64]*PoolWorkers),
 | 
				
			||||||
		Pool:          pool,
 | 
							Managed:       managed,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	m.mutex.Lock()
 | 
						m.mutex.Lock()
 | 
				
			||||||
	m.counter++
 | 
						m.counter++
 | 
				
			||||||
	mq.QID = m.counter
 | 
						mq.QID = m.counter
 | 
				
			||||||
	mq.Name = fmt.Sprintf("queue-%d", mq.QID)
 | 
						mq.Name = fmt.Sprintf("queue-%d", mq.QID)
 | 
				
			||||||
	if named, ok := queue.(Named); ok {
 | 
						if named, ok := managed.(Named); ok {
 | 
				
			||||||
		mq.Name = named.Name()
 | 
							name := named.Name()
 | 
				
			||||||
 | 
							if len(name) > 0 {
 | 
				
			||||||
 | 
								mq.Name = name
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	m.Queues[mq.QID] = mq
 | 
						m.Queues[mq.QID] = mq
 | 
				
			||||||
	m.mutex.Unlock()
 | 
						m.mutex.Unlock()
 | 
				
			||||||
@@ -127,6 +150,64 @@ func (m *Manager) GetManagedQueue(qid int64) *ManagedQueue {
 | 
				
			|||||||
	return m.Queues[qid]
 | 
						return m.Queues[qid]
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushAll flushes all the flushable queues attached to this manager
 | 
				
			||||||
 | 
					func (m *Manager) FlushAll(baseCtx context.Context, timeout time.Duration) error {
 | 
				
			||||||
 | 
						var ctx context.Context
 | 
				
			||||||
 | 
						var cancel context.CancelFunc
 | 
				
			||||||
 | 
						start := time.Now()
 | 
				
			||||||
 | 
						end := start
 | 
				
			||||||
 | 
						hasTimeout := false
 | 
				
			||||||
 | 
						if timeout > 0 {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithTimeout(baseCtx, timeout)
 | 
				
			||||||
 | 
							end = start.Add(timeout)
 | 
				
			||||||
 | 
							hasTimeout = true
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithCancel(baseCtx)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for {
 | 
				
			||||||
 | 
							select {
 | 
				
			||||||
 | 
							case <-ctx.Done():
 | 
				
			||||||
 | 
								return ctx.Err()
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							mqs := m.ManagedQueues()
 | 
				
			||||||
 | 
							wg := sync.WaitGroup{}
 | 
				
			||||||
 | 
							wg.Add(len(mqs))
 | 
				
			||||||
 | 
							allEmpty := true
 | 
				
			||||||
 | 
							for _, mq := range mqs {
 | 
				
			||||||
 | 
								if mq.IsEmpty() {
 | 
				
			||||||
 | 
									wg.Done()
 | 
				
			||||||
 | 
									continue
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								allEmpty = false
 | 
				
			||||||
 | 
								if flushable, ok := mq.Managed.(Flushable); ok {
 | 
				
			||||||
 | 
									go func() {
 | 
				
			||||||
 | 
										localCtx, localCancel := context.WithCancel(ctx)
 | 
				
			||||||
 | 
										pid := mq.RegisterWorkers(1, start, hasTimeout, end, localCancel, true)
 | 
				
			||||||
 | 
										err := flushable.FlushWithContext(localCtx)
 | 
				
			||||||
 | 
										if err != nil && err != ctx.Err() {
 | 
				
			||||||
 | 
											cancel()
 | 
				
			||||||
 | 
										}
 | 
				
			||||||
 | 
										mq.CancelWorkers(pid)
 | 
				
			||||||
 | 
										localCancel()
 | 
				
			||||||
 | 
										wg.Done()
 | 
				
			||||||
 | 
									}()
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									wg.Done()
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if allEmpty {
 | 
				
			||||||
 | 
								break
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							wg.Wait()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ManagedQueues returns the managed queues
 | 
					// ManagedQueues returns the managed queues
 | 
				
			||||||
func (m *Manager) ManagedQueues() []*ManagedQueue {
 | 
					func (m *Manager) ManagedQueues() []*ManagedQueue {
 | 
				
			||||||
	m.mutex.Lock()
 | 
						m.mutex.Lock()
 | 
				
			||||||
@@ -152,7 +233,7 @@ func (q *ManagedQueue) Workers() []*PoolWorkers {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// RegisterWorkers registers workers to this queue
 | 
					// RegisterWorkers registers workers to this queue
 | 
				
			||||||
func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc) int64 {
 | 
					func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc, isFlusher bool) int64 {
 | 
				
			||||||
	q.mutex.Lock()
 | 
						q.mutex.Lock()
 | 
				
			||||||
	defer q.mutex.Unlock()
 | 
						defer q.mutex.Unlock()
 | 
				
			||||||
	q.counter++
 | 
						q.counter++
 | 
				
			||||||
@@ -163,6 +244,7 @@ func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout b
 | 
				
			|||||||
		Timeout:    timeout,
 | 
							Timeout:    timeout,
 | 
				
			||||||
		HasTimeout: hasTimeout,
 | 
							HasTimeout: hasTimeout,
 | 
				
			||||||
		Cancel:     cancel,
 | 
							Cancel:     cancel,
 | 
				
			||||||
 | 
							IsFlusher:  isFlusher,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return q.counter
 | 
						return q.counter
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -191,57 +273,74 @@ func (q *ManagedQueue) RemoveWorkers(pid int64) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// AddWorkers adds workers to the queue if it has registered an add worker function
 | 
					// AddWorkers adds workers to the queue if it has registered an add worker function
 | 
				
			||||||
func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
 | 
					func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		// the cancel will be added to the pool workers description above
 | 
							// the cancel will be added to the pool workers description above
 | 
				
			||||||
		return q.Pool.AddWorkers(number, timeout)
 | 
							return pool.AddWorkers(number, timeout)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flush flushes the queue with a timeout
 | 
				
			||||||
 | 
					func (q *ManagedQueue) Flush(timeout time.Duration) error {
 | 
				
			||||||
 | 
						if flushable, ok := q.Managed.(Flushable); ok {
 | 
				
			||||||
 | 
							// the cancel will be added to the pool workers description above
 | 
				
			||||||
 | 
							return flushable.Flush(timeout)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty returns if the queue is empty
 | 
				
			||||||
 | 
					func (q *ManagedQueue) IsEmpty() bool {
 | 
				
			||||||
 | 
						if flushable, ok := q.Managed.(Flushable); ok {
 | 
				
			||||||
 | 
							return flushable.IsEmpty()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return true
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NumberOfWorkers returns the number of workers in the queue
 | 
					// NumberOfWorkers returns the number of workers in the queue
 | 
				
			||||||
func (q *ManagedQueue) NumberOfWorkers() int {
 | 
					func (q *ManagedQueue) NumberOfWorkers() int {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		return q.Pool.NumberOfWorkers()
 | 
							return pool.NumberOfWorkers()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return -1
 | 
						return -1
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// MaxNumberOfWorkers returns the maximum number of workers for the pool
 | 
					// MaxNumberOfWorkers returns the maximum number of workers for the pool
 | 
				
			||||||
func (q *ManagedQueue) MaxNumberOfWorkers() int {
 | 
					func (q *ManagedQueue) MaxNumberOfWorkers() int {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		return q.Pool.MaxNumberOfWorkers()
 | 
							return pool.MaxNumberOfWorkers()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0
 | 
						return 0
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// BoostWorkers returns the number of workers for a boost
 | 
					// BoostWorkers returns the number of workers for a boost
 | 
				
			||||||
func (q *ManagedQueue) BoostWorkers() int {
 | 
					func (q *ManagedQueue) BoostWorkers() int {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		return q.Pool.BoostWorkers()
 | 
							return pool.BoostWorkers()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return -1
 | 
						return -1
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// BoostTimeout returns the timeout of the next boost
 | 
					// BoostTimeout returns the timeout of the next boost
 | 
				
			||||||
func (q *ManagedQueue) BoostTimeout() time.Duration {
 | 
					func (q *ManagedQueue) BoostTimeout() time.Duration {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		return q.Pool.BoostTimeout()
 | 
							return pool.BoostTimeout()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0
 | 
						return 0
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// BlockTimeout returns the timeout til the next boost
 | 
					// BlockTimeout returns the timeout til the next boost
 | 
				
			||||||
func (q *ManagedQueue) BlockTimeout() time.Duration {
 | 
					func (q *ManagedQueue) BlockTimeout() time.Duration {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		return q.Pool.BlockTimeout()
 | 
							return pool.BlockTimeout()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0
 | 
						return 0
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// SetSettings sets the setable boost values
 | 
					// SetPoolSettings sets the setable boost values
 | 
				
			||||||
func (q *ManagedQueue) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
 | 
					func (q *ManagedQueue) SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
 | 
				
			||||||
	if q.Pool != nil {
 | 
						if pool, ok := q.Managed.(ManagedPool); ok {
 | 
				
			||||||
		q.Pool.SetSettings(maxNumberOfWorkers, boostWorkers, timeout)
 | 
							pool.SetPoolSettings(maxNumberOfWorkers, boostWorkers, timeout)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -6,9 +6,8 @@ package queue
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
					 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"reflect"
 | 
						"time"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ErrInvalidConfiguration is called when there is invalid configuration for a queue
 | 
					// ErrInvalidConfiguration is called when there is invalid configuration for a queue
 | 
				
			||||||
@@ -53,8 +52,11 @@ type Named interface {
 | 
				
			|||||||
	Name() string
 | 
						Name() string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Queue defines an interface to save an issue indexer queue
 | 
					// Queue defines an interface of a queue-like item
 | 
				
			||||||
 | 
					//
 | 
				
			||||||
 | 
					// Queues will handle their own contents in the Run method
 | 
				
			||||||
type Queue interface {
 | 
					type Queue interface {
 | 
				
			||||||
 | 
						Flushable
 | 
				
			||||||
	Run(atShutdown, atTerminate func(context.Context, func()))
 | 
						Run(atShutdown, atTerminate func(context.Context, func()))
 | 
				
			||||||
	Push(Data) error
 | 
						Push(Data) error
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -71,32 +73,27 @@ func NewDummyQueue(handler HandlerFunc, opts, exemplar interface{}) (Queue, erro
 | 
				
			|||||||
type DummyQueue struct {
 | 
					type DummyQueue struct {
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run starts to run the queue
 | 
					// Run does nothing
 | 
				
			||||||
func (b *DummyQueue) Run(_, _ func(context.Context, func())) {}
 | 
					func (b *DummyQueue) Run(_, _ func(context.Context, func())) {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Push pushes data to the queue
 | 
					// Push fakes a push of data to the queue
 | 
				
			||||||
func (b *DummyQueue) Push(Data) error {
 | 
					func (b *DummyQueue) Push(Data) error {
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func toConfig(exemplar, cfg interface{}) (interface{}, error) {
 | 
					// Flush always returns nil
 | 
				
			||||||
	if reflect.TypeOf(cfg).AssignableTo(reflect.TypeOf(exemplar)) {
 | 
					func (b *DummyQueue) Flush(time.Duration) error {
 | 
				
			||||||
		return cfg, nil
 | 
						return nil
 | 
				
			||||||
	}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	configBytes, ok := cfg.([]byte)
 | 
					// FlushWithContext always returns nil
 | 
				
			||||||
	if !ok {
 | 
					func (b *DummyQueue) FlushWithContext(context.Context) error {
 | 
				
			||||||
		configStr, ok := cfg.(string)
 | 
						return nil
 | 
				
			||||||
		if !ok {
 | 
					}
 | 
				
			||||||
			return nil, ErrInvalidConfiguration{cfg: cfg}
 | 
					
 | 
				
			||||||
		}
 | 
					// IsEmpty asserts that the queue is empty
 | 
				
			||||||
		configBytes = []byte(configStr)
 | 
					func (b *DummyQueue) IsEmpty() bool {
 | 
				
			||||||
	}
 | 
						return true
 | 
				
			||||||
	newVal := reflect.New(reflect.TypeOf(exemplar))
 | 
					 | 
				
			||||||
	if err := json.Unmarshal(configBytes, newVal.Interface()); err != nil {
 | 
					 | 
				
			||||||
		return nil, ErrInvalidConfiguration{cfg: cfg, err: err}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return newVal.Elem().Interface(), nil
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var queuesMap = map[Type]NewQueueFunc{DummyQueueType: NewDummyQueue}
 | 
					var queuesMap = map[Type]NewQueueFunc{DummyQueueType: NewDummyQueue}
 | 
				
			||||||
@@ -123,7 +120,7 @@ func RegisteredTypesAsString() []string {
 | 
				
			|||||||
	return types
 | 
						return types
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewQueue takes a queue Type and HandlerFunc some options and possibly an exemplar and returns a Queue or an error
 | 
					// NewQueue takes a queue Type, HandlerFunc, some options and possibly an exemplar and returns a Queue or an error
 | 
				
			||||||
func NewQueue(queueType Type, handlerFunc HandlerFunc, opts, exemplar interface{}) (Queue, error) {
 | 
					func NewQueue(queueType Type, handlerFunc HandlerFunc, opts, exemplar interface{}) (Queue, error) {
 | 
				
			||||||
	newFn, ok := queuesMap[queueType]
 | 
						newFn, ok := queuesMap[queueType]
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,8 +7,6 @@ package queue
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"reflect"
 | 
					 | 
				
			||||||
	"time"
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -18,25 +16,23 @@ const ChannelQueueType Type = "channel"
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// ChannelQueueConfiguration is the configuration for a ChannelQueue
 | 
					// ChannelQueueConfiguration is the configuration for a ChannelQueue
 | 
				
			||||||
type ChannelQueueConfiguration struct {
 | 
					type ChannelQueueConfiguration struct {
 | 
				
			||||||
	QueueLength  int
 | 
						WorkerPoolConfiguration
 | 
				
			||||||
	BatchLength  int
 | 
					 | 
				
			||||||
	Workers int
 | 
						Workers int
 | 
				
			||||||
	MaxWorkers   int
 | 
					 | 
				
			||||||
	BlockTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostWorkers int
 | 
					 | 
				
			||||||
	Name    string
 | 
						Name    string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ChannelQueue implements
 | 
					// ChannelQueue implements Queue
 | 
				
			||||||
 | 
					//
 | 
				
			||||||
 | 
					// A channel queue is not persistable and does not shutdown or terminate cleanly
 | 
				
			||||||
 | 
					// It is basically a very thin wrapper around a WorkerPool
 | 
				
			||||||
type ChannelQueue struct {
 | 
					type ChannelQueue struct {
 | 
				
			||||||
	pool     *WorkerPool
 | 
						*WorkerPool
 | 
				
			||||||
	exemplar interface{}
 | 
						exemplar interface{}
 | 
				
			||||||
	workers  int
 | 
						workers  int
 | 
				
			||||||
	name     string
 | 
						name     string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewChannelQueue create a memory channel queue
 | 
					// NewChannelQueue creates a memory channel queue
 | 
				
			||||||
func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
 | 
					func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
 | 
				
			||||||
	configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg)
 | 
						configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -46,26 +42,13 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro
 | 
				
			|||||||
	if config.BatchLength == 0 {
 | 
						if config.BatchLength == 0 {
 | 
				
			||||||
		config.BatchLength = 1
 | 
							config.BatchLength = 1
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	dataChan := make(chan Data, config.QueueLength)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
					 | 
				
			||||||
	queue := &ChannelQueue{
 | 
						queue := &ChannelQueue{
 | 
				
			||||||
		pool: &WorkerPool{
 | 
							WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration),
 | 
				
			||||||
			baseCtx:            ctx,
 | 
					 | 
				
			||||||
			cancel:             cancel,
 | 
					 | 
				
			||||||
			batchLength:        config.BatchLength,
 | 
					 | 
				
			||||||
			handle:             handle,
 | 
					 | 
				
			||||||
			dataChan:           dataChan,
 | 
					 | 
				
			||||||
			blockTimeout:       config.BlockTimeout,
 | 
					 | 
				
			||||||
			boostTimeout:       config.BoostTimeout,
 | 
					 | 
				
			||||||
			boostWorkers:       config.BoostWorkers,
 | 
					 | 
				
			||||||
			maxNumberOfWorkers: config.MaxWorkers,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		exemplar:   exemplar,
 | 
							exemplar:   exemplar,
 | 
				
			||||||
		workers:    config.Workers,
 | 
							workers:    config.Workers,
 | 
				
			||||||
		name:       config.Name,
 | 
							name:       config.Name,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	queue.pool.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar, queue.pool)
 | 
						queue.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar)
 | 
				
			||||||
	return queue, nil
 | 
						return queue, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -77,22 +60,18 @@ func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())
 | 
				
			|||||||
	atTerminate(context.Background(), func() {
 | 
						atTerminate(context.Background(), func() {
 | 
				
			||||||
		log.Warn("ChannelQueue: %s is not terminatable!", c.name)
 | 
							log.Warn("ChannelQueue: %s is not terminatable!", c.name)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
						log.Debug("ChannelQueue: %s Starting", c.name)
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		_ = c.pool.AddWorkers(c.workers, 0)
 | 
							_ = c.AddWorkers(c.workers, 0)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Push will push data into the queue
 | 
					// Push will push data into the queue
 | 
				
			||||||
func (c *ChannelQueue) Push(data Data) error {
 | 
					func (c *ChannelQueue) Push(data Data) error {
 | 
				
			||||||
	if c.exemplar != nil {
 | 
						if !assignableTo(data, c.exemplar) {
 | 
				
			||||||
		// Assert data is of same type as r.exemplar
 | 
					 | 
				
			||||||
		t := reflect.TypeOf(data)
 | 
					 | 
				
			||||||
		exemplarType := reflect.TypeOf(c.exemplar)
 | 
					 | 
				
			||||||
		if !t.AssignableTo(exemplarType) || data == nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name)
 | 
							return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
						c.WorkerPool.Push(data)
 | 
				
			||||||
	c.pool.Push(data)
 | 
					 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,12 +25,14 @@ func TestChannelQueue(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	queue, err := NewChannelQueue(handle,
 | 
						queue, err := NewChannelQueue(handle,
 | 
				
			||||||
		ChannelQueueConfiguration{
 | 
							ChannelQueueConfiguration{
 | 
				
			||||||
 | 
								WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
				QueueLength:  20,
 | 
									QueueLength:  20,
 | 
				
			||||||
			Workers:      1,
 | 
					 | 
				
			||||||
				MaxWorkers:   10,
 | 
									MaxWorkers:   10,
 | 
				
			||||||
				BlockTimeout: 1 * time.Second,
 | 
									BlockTimeout: 1 * time.Second,
 | 
				
			||||||
				BoostTimeout: 5 * time.Minute,
 | 
									BoostTimeout: 5 * time.Minute,
 | 
				
			||||||
				BoostWorkers: 5,
 | 
									BoostWorkers: 5,
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Workers: 1,
 | 
				
			||||||
		}, &testData{})
 | 
							}, &testData{})
 | 
				
			||||||
	assert.NoError(t, err)
 | 
						assert.NoError(t, err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -60,13 +62,15 @@ func TestChannelQueue_Batch(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	queue, err := NewChannelQueue(handle,
 | 
						queue, err := NewChannelQueue(handle,
 | 
				
			||||||
		ChannelQueueConfiguration{
 | 
							ChannelQueueConfiguration{
 | 
				
			||||||
 | 
								WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
				QueueLength:  20,
 | 
									QueueLength:  20,
 | 
				
			||||||
				BatchLength:  2,
 | 
									BatchLength:  2,
 | 
				
			||||||
			Workers:      1,
 | 
					 | 
				
			||||||
			MaxWorkers:   10,
 | 
					 | 
				
			||||||
				BlockTimeout: 1 * time.Second,
 | 
									BlockTimeout: 1 * time.Second,
 | 
				
			||||||
				BoostTimeout: 5 * time.Minute,
 | 
									BoostTimeout: 5 * time.Minute,
 | 
				
			||||||
				BoostWorkers: 5,
 | 
									BoostWorkers: 5,
 | 
				
			||||||
 | 
									MaxWorkers:   10,
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Workers: 1,
 | 
				
			||||||
		}, &testData{})
 | 
							}, &testData{})
 | 
				
			||||||
	assert.NoError(t, err)
 | 
						assert.NoError(t, err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -8,8 +8,8 @@ import (
 | 
				
			|||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"reflect"
 | 
					 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
 | 
						"sync/atomic"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
@@ -22,20 +22,15 @@ const LevelQueueType Type = "level"
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// LevelQueueConfiguration is the configuration for a LevelQueue
 | 
					// LevelQueueConfiguration is the configuration for a LevelQueue
 | 
				
			||||||
type LevelQueueConfiguration struct {
 | 
					type LevelQueueConfiguration struct {
 | 
				
			||||||
 | 
						WorkerPoolConfiguration
 | 
				
			||||||
	DataDir string
 | 
						DataDir string
 | 
				
			||||||
	QueueLength  int
 | 
					 | 
				
			||||||
	BatchLength  int
 | 
					 | 
				
			||||||
	Workers int
 | 
						Workers int
 | 
				
			||||||
	MaxWorkers   int
 | 
					 | 
				
			||||||
	BlockTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostWorkers int
 | 
					 | 
				
			||||||
	Name    string
 | 
						Name    string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// LevelQueue implements a disk library queue
 | 
					// LevelQueue implements a disk library queue
 | 
				
			||||||
type LevelQueue struct {
 | 
					type LevelQueue struct {
 | 
				
			||||||
	pool       *WorkerPool
 | 
						*WorkerPool
 | 
				
			||||||
	queue      *levelqueue.Queue
 | 
						queue      *levelqueue.Queue
 | 
				
			||||||
	closed     chan struct{}
 | 
						closed     chan struct{}
 | 
				
			||||||
	terminated chan struct{}
 | 
						terminated chan struct{}
 | 
				
			||||||
@@ -58,21 +53,8 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dataChan := make(chan Data, config.QueueLength)
 | 
					 | 
				
			||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	queue := &LevelQueue{
 | 
						queue := &LevelQueue{
 | 
				
			||||||
		pool: &WorkerPool{
 | 
							WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration),
 | 
				
			||||||
			baseCtx:            ctx,
 | 
					 | 
				
			||||||
			cancel:             cancel,
 | 
					 | 
				
			||||||
			batchLength:        config.BatchLength,
 | 
					 | 
				
			||||||
			handle:             handle,
 | 
					 | 
				
			||||||
			dataChan:           dataChan,
 | 
					 | 
				
			||||||
			blockTimeout:       config.BlockTimeout,
 | 
					 | 
				
			||||||
			boostTimeout:       config.BoostTimeout,
 | 
					 | 
				
			||||||
			boostWorkers:       config.BoostWorkers,
 | 
					 | 
				
			||||||
			maxNumberOfWorkers: config.MaxWorkers,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		queue:      internal,
 | 
							queue:      internal,
 | 
				
			||||||
		exemplar:   exemplar,
 | 
							exemplar:   exemplar,
 | 
				
			||||||
		closed:     make(chan struct{}),
 | 
							closed:     make(chan struct{}),
 | 
				
			||||||
@@ -80,7 +62,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
		workers:    config.Workers,
 | 
							workers:    config.Workers,
 | 
				
			||||||
		name:       config.Name,
 | 
							name:       config.Name,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	queue.pool.qid = GetManager().Add(queue, LevelQueueType, config, exemplar, queue.pool)
 | 
						queue.qid = GetManager().Add(queue, LevelQueueType, config, exemplar)
 | 
				
			||||||
	return queue, nil
 | 
						return queue, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -88,9 +70,10 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
					func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
				
			||||||
	atShutdown(context.Background(), l.Shutdown)
 | 
						atShutdown(context.Background(), l.Shutdown)
 | 
				
			||||||
	atTerminate(context.Background(), l.Terminate)
 | 
						atTerminate(context.Background(), l.Terminate)
 | 
				
			||||||
 | 
						log.Debug("LevelQueue: %s Starting", l.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		_ = l.pool.AddWorkers(l.workers, 0)
 | 
							_ = l.AddWorkers(l.workers, 0)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go l.readToChan()
 | 
						go l.readToChan()
 | 
				
			||||||
@@ -99,12 +82,12 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func()))
 | 
				
			|||||||
	<-l.closed
 | 
						<-l.closed
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	log.Trace("LevelQueue: %s Waiting til done", l.name)
 | 
						log.Trace("LevelQueue: %s Waiting til done", l.name)
 | 
				
			||||||
	l.pool.Wait()
 | 
						l.Wait()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
 | 
						log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
 | 
				
			||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
	atTerminate(ctx, cancel)
 | 
						atTerminate(ctx, cancel)
 | 
				
			||||||
	l.pool.CleanUp(ctx)
 | 
						l.CleanUp(ctx)
 | 
				
			||||||
	cancel()
 | 
						cancel()
 | 
				
			||||||
	log.Trace("LevelQueue: %s Cleaned", l.name)
 | 
						log.Trace("LevelQueue: %s Cleaned", l.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -115,57 +98,46 @@ func (l *LevelQueue) readToChan() {
 | 
				
			|||||||
		select {
 | 
							select {
 | 
				
			||||||
		case <-l.closed:
 | 
							case <-l.closed:
 | 
				
			||||||
			// tell the pool to shutdown.
 | 
								// tell the pool to shutdown.
 | 
				
			||||||
			l.pool.cancel()
 | 
								l.cancel()
 | 
				
			||||||
			return
 | 
								return
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
 | 
								atomic.AddInt64(&l.numInQueue, 1)
 | 
				
			||||||
			bs, err := l.queue.RPop()
 | 
								bs, err := l.queue.RPop()
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				if err != levelqueue.ErrNotFound {
 | 
									if err != levelqueue.ErrNotFound {
 | 
				
			||||||
					log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
 | 
										log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
									atomic.AddInt64(&l.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if len(bs) == 0 {
 | 
								if len(bs) == 0 {
 | 
				
			||||||
 | 
									atomic.AddInt64(&l.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			var data Data
 | 
								data, err := unmarshalAs(bs, l.exemplar)
 | 
				
			||||||
			if l.exemplar != nil {
 | 
					 | 
				
			||||||
				t := reflect.TypeOf(l.exemplar)
 | 
					 | 
				
			||||||
				n := reflect.New(t)
 | 
					 | 
				
			||||||
				ne := n.Elem()
 | 
					 | 
				
			||||||
				err = json.Unmarshal(bs, ne.Addr().Interface())
 | 
					 | 
				
			||||||
				data = ne.Interface().(Data)
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				err = json.Unmarshal(bs, &data)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err)
 | 
									log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err)
 | 
				
			||||||
 | 
									atomic.AddInt64(&l.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			log.Trace("LevelQueue %s: Task found: %#v", l.name, data)
 | 
								log.Trace("LevelQueue %s: Task found: %#v", l.name, data)
 | 
				
			||||||
			l.pool.Push(data)
 | 
								l.WorkerPool.Push(data)
 | 
				
			||||||
 | 
								atomic.AddInt64(&l.numInQueue, -1)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Push will push the indexer data to queue
 | 
					// Push will push the indexer data to queue
 | 
				
			||||||
func (l *LevelQueue) Push(data Data) error {
 | 
					func (l *LevelQueue) Push(data Data) error {
 | 
				
			||||||
	if l.exemplar != nil {
 | 
						if !assignableTo(data, l.exemplar) {
 | 
				
			||||||
		// Assert data is of same type as r.exemplar
 | 
					 | 
				
			||||||
		value := reflect.ValueOf(data)
 | 
					 | 
				
			||||||
		t := value.Type()
 | 
					 | 
				
			||||||
		exemplarType := reflect.ValueOf(l.exemplar).Type()
 | 
					 | 
				
			||||||
		if !t.AssignableTo(exemplarType) || data == nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, l.exemplar, l.name)
 | 
							return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, l.exemplar, l.name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	bs, err := json.Marshal(data)
 | 
						bs, err := json.Marshal(data)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
@@ -173,16 +145,25 @@ func (l *LevelQueue) Push(data Data) error {
 | 
				
			|||||||
	return l.queue.LPush(bs)
 | 
						return l.queue.LPush(bs)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty checks whether the queue is empty
 | 
				
			||||||
 | 
					func (l *LevelQueue) IsEmpty() bool {
 | 
				
			||||||
 | 
						if !l.WorkerPool.IsEmpty() {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return l.queue.Len() == 0
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Shutdown this queue and stop processing
 | 
					// Shutdown this queue and stop processing
 | 
				
			||||||
func (l *LevelQueue) Shutdown() {
 | 
					func (l *LevelQueue) Shutdown() {
 | 
				
			||||||
	l.lock.Lock()
 | 
						l.lock.Lock()
 | 
				
			||||||
	defer l.lock.Unlock()
 | 
						defer l.lock.Unlock()
 | 
				
			||||||
	log.Trace("LevelQueue: %s Shutdown", l.name)
 | 
						log.Trace("LevelQueue: %s Shutting down", l.name)
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
	case <-l.closed:
 | 
						case <-l.closed:
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		close(l.closed)
 | 
							close(l.closed)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("LevelQueue: %s Shutdown", l.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Terminate this queue and close the queue
 | 
					// Terminate this queue and close the queue
 | 
				
			||||||
@@ -196,11 +177,15 @@ func (l *LevelQueue) Terminate() {
 | 
				
			|||||||
	default:
 | 
						default:
 | 
				
			||||||
		close(l.terminated)
 | 
							close(l.terminated)
 | 
				
			||||||
		l.lock.Unlock()
 | 
							l.lock.Unlock()
 | 
				
			||||||
 | 
							if log.IsDebug() {
 | 
				
			||||||
 | 
								log.Debug("LevelQueue: %s Closing with %d tasks left in queue", l.name, l.queue.Len())
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
 | 
							if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
 | 
				
			||||||
			log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
 | 
								log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("LevelQueue: %s Terminated", l.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Name returns the name of this queue
 | 
					// Name returns the name of this queue
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -6,7 +6,9 @@ package queue
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
 | 
						"sync/atomic"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
@@ -31,8 +33,10 @@ type PersistableChannelQueueConfiguration struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PersistableChannelQueue wraps a channel queue and level queue together
 | 
					// PersistableChannelQueue wraps a channel queue and level queue together
 | 
				
			||||||
 | 
					// The disk level queue will be used to store data at shutdown and terminate - and will be restored
 | 
				
			||||||
 | 
					// on start up.
 | 
				
			||||||
type PersistableChannelQueue struct {
 | 
					type PersistableChannelQueue struct {
 | 
				
			||||||
	*ChannelQueue
 | 
						channelQueue *ChannelQueue
 | 
				
			||||||
	delayedStarter
 | 
						delayedStarter
 | 
				
			||||||
	lock   sync.Mutex
 | 
						lock   sync.Mutex
 | 
				
			||||||
	closed chan struct{}
 | 
						closed chan struct{}
 | 
				
			||||||
@@ -48,13 +52,15 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
 | 
				
			|||||||
	config := configInterface.(PersistableChannelQueueConfiguration)
 | 
						config := configInterface.(PersistableChannelQueueConfiguration)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	channelQueue, err := NewChannelQueue(handle, ChannelQueueConfiguration{
 | 
						channelQueue, err := NewChannelQueue(handle, ChannelQueueConfiguration{
 | 
				
			||||||
 | 
							WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
			QueueLength:  config.QueueLength,
 | 
								QueueLength:  config.QueueLength,
 | 
				
			||||||
			BatchLength:  config.BatchLength,
 | 
								BatchLength:  config.BatchLength,
 | 
				
			||||||
		Workers:      config.Workers,
 | 
					 | 
				
			||||||
		MaxWorkers:   config.MaxWorkers,
 | 
					 | 
				
			||||||
			BlockTimeout: config.BlockTimeout,
 | 
								BlockTimeout: config.BlockTimeout,
 | 
				
			||||||
			BoostTimeout: config.BoostTimeout,
 | 
								BoostTimeout: config.BoostTimeout,
 | 
				
			||||||
			BoostWorkers: config.BoostWorkers,
 | 
								BoostWorkers: config.BoostWorkers,
 | 
				
			||||||
 | 
								MaxWorkers:   config.MaxWorkers,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							Workers: config.Workers,
 | 
				
			||||||
		Name:    config.Name + "-channel",
 | 
							Name:    config.Name + "-channel",
 | 
				
			||||||
	}, exemplar)
 | 
						}, exemplar)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -63,28 +69,30 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// the level backend only needs temporary workers to catch up with the previously dropped work
 | 
						// the level backend only needs temporary workers to catch up with the previously dropped work
 | 
				
			||||||
	levelCfg := LevelQueueConfiguration{
 | 
						levelCfg := LevelQueueConfiguration{
 | 
				
			||||||
		DataDir:      config.DataDir,
 | 
							WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
			QueueLength:  config.QueueLength,
 | 
								QueueLength:  config.QueueLength,
 | 
				
			||||||
			BatchLength:  config.BatchLength,
 | 
								BatchLength:  config.BatchLength,
 | 
				
			||||||
		Workers:      1,
 | 
					 | 
				
			||||||
		MaxWorkers:   6,
 | 
					 | 
				
			||||||
			BlockTimeout: 1 * time.Second,
 | 
								BlockTimeout: 1 * time.Second,
 | 
				
			||||||
			BoostTimeout: 5 * time.Minute,
 | 
								BoostTimeout: 5 * time.Minute,
 | 
				
			||||||
			BoostWorkers: 5,
 | 
								BoostWorkers: 5,
 | 
				
			||||||
 | 
								MaxWorkers:   6,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							DataDir: config.DataDir,
 | 
				
			||||||
 | 
							Workers: 1,
 | 
				
			||||||
		Name:    config.Name + "-level",
 | 
							Name:    config.Name + "-level",
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar)
 | 
						levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar)
 | 
				
			||||||
	if err == nil {
 | 
						if err == nil {
 | 
				
			||||||
		queue := &PersistableChannelQueue{
 | 
							queue := &PersistableChannelQueue{
 | 
				
			||||||
			ChannelQueue: channelQueue.(*ChannelQueue),
 | 
								channelQueue: channelQueue.(*ChannelQueue),
 | 
				
			||||||
			delayedStarter: delayedStarter{
 | 
								delayedStarter: delayedStarter{
 | 
				
			||||||
				internal: levelQueue.(*LevelQueue),
 | 
									internal: levelQueue.(*LevelQueue),
 | 
				
			||||||
				name:     config.Name,
 | 
									name:     config.Name,
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			closed: make(chan struct{}),
 | 
								closed: make(chan struct{}),
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		_ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar, nil)
 | 
							_ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar)
 | 
				
			||||||
		return queue, nil
 | 
							return queue, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if IsErrInvalidConfiguration(err) {
 | 
						if IsErrInvalidConfiguration(err) {
 | 
				
			||||||
@@ -93,7 +101,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	queue := &PersistableChannelQueue{
 | 
						queue := &PersistableChannelQueue{
 | 
				
			||||||
		ChannelQueue: channelQueue.(*ChannelQueue),
 | 
							channelQueue: channelQueue.(*ChannelQueue),
 | 
				
			||||||
		delayedStarter: delayedStarter{
 | 
							delayedStarter: delayedStarter{
 | 
				
			||||||
			cfg:         levelCfg,
 | 
								cfg:         levelCfg,
 | 
				
			||||||
			underlying:  LevelQueueType,
 | 
								underlying:  LevelQueueType,
 | 
				
			||||||
@@ -103,7 +111,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
		closed: make(chan struct{}),
 | 
							closed: make(chan struct{}),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	_ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar, nil)
 | 
						_ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar)
 | 
				
			||||||
	return queue, nil
 | 
						return queue, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -118,15 +126,17 @@ func (p *PersistableChannelQueue) Push(data Data) error {
 | 
				
			|||||||
	case <-p.closed:
 | 
						case <-p.closed:
 | 
				
			||||||
		return p.internal.Push(data)
 | 
							return p.internal.Push(data)
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		return p.ChannelQueue.Push(data)
 | 
							return p.channelQueue.Push(data)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run starts to run the queue
 | 
					// Run starts to run the queue
 | 
				
			||||||
func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
					func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
				
			||||||
 | 
						log.Debug("PersistableChannelQueue: %s Starting", p.delayedStarter.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	p.lock.Lock()
 | 
						p.lock.Lock()
 | 
				
			||||||
	if p.internal == nil {
 | 
						if p.internal == nil {
 | 
				
			||||||
		err := p.setInternal(atShutdown, p.ChannelQueue.pool.handle, p.exemplar)
 | 
							err := p.setInternal(atShutdown, p.channelQueue.handle, p.channelQueue.exemplar)
 | 
				
			||||||
		p.lock.Unlock()
 | 
							p.lock.Unlock()
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			log.Fatal("Unable to create internal queue for %s Error: %v", p.Name(), err)
 | 
								log.Fatal("Unable to create internal queue for %s Error: %v", p.Name(), err)
 | 
				
			||||||
@@ -142,31 +152,83 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte
 | 
				
			|||||||
	go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {})
 | 
						go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		_ = p.ChannelQueue.pool.AddWorkers(p.workers, 0)
 | 
							_ = p.channelQueue.AddWorkers(p.channelQueue.workers, 0)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	log.Trace("PersistableChannelQueue: %s Waiting til closed", p.delayedStarter.name)
 | 
						log.Trace("PersistableChannelQueue: %s Waiting til closed", p.delayedStarter.name)
 | 
				
			||||||
	<-p.closed
 | 
						<-p.closed
 | 
				
			||||||
	log.Trace("PersistableChannelQueue: %s Cancelling pools", p.delayedStarter.name)
 | 
						log.Trace("PersistableChannelQueue: %s Cancelling pools", p.delayedStarter.name)
 | 
				
			||||||
	p.ChannelQueue.pool.cancel()
 | 
						p.channelQueue.cancel()
 | 
				
			||||||
	p.internal.(*LevelQueue).pool.cancel()
 | 
						p.internal.(*LevelQueue).cancel()
 | 
				
			||||||
	log.Trace("PersistableChannelQueue: %s Waiting til done", p.delayedStarter.name)
 | 
						log.Trace("PersistableChannelQueue: %s Waiting til done", p.delayedStarter.name)
 | 
				
			||||||
	p.ChannelQueue.pool.Wait()
 | 
						p.channelQueue.Wait()
 | 
				
			||||||
	p.internal.(*LevelQueue).pool.Wait()
 | 
						p.internal.(*LevelQueue).Wait()
 | 
				
			||||||
	// Redirect all remaining data in the chan to the internal channel
 | 
						// Redirect all remaining data in the chan to the internal channel
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		log.Trace("PersistableChannelQueue: %s Redirecting remaining data", p.delayedStarter.name)
 | 
							log.Trace("PersistableChannelQueue: %s Redirecting remaining data", p.delayedStarter.name)
 | 
				
			||||||
		for data := range p.ChannelQueue.pool.dataChan {
 | 
							for data := range p.channelQueue.dataChan {
 | 
				
			||||||
			_ = p.internal.Push(data)
 | 
								_ = p.internal.Push(data)
 | 
				
			||||||
 | 
								atomic.AddInt64(&p.channelQueue.numInQueue, -1)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", p.delayedStarter.name)
 | 
							log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", p.delayedStarter.name)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	log.Trace("PersistableChannelQueue: %s Done main loop", p.delayedStarter.name)
 | 
						log.Trace("PersistableChannelQueue: %s Done main loop", p.delayedStarter.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flush flushes the queue and blocks till the queue is empty
 | 
				
			||||||
 | 
					func (p *PersistableChannelQueue) Flush(timeout time.Duration) error {
 | 
				
			||||||
 | 
						var ctx context.Context
 | 
				
			||||||
 | 
						var cancel context.CancelFunc
 | 
				
			||||||
 | 
						if timeout > 0 {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithTimeout(context.Background(), timeout)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithCancel(context.Background())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer cancel()
 | 
				
			||||||
 | 
						return p.FlushWithContext(ctx)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushWithContext flushes the queue and blocks till the queue is empty
 | 
				
			||||||
 | 
					func (p *PersistableChannelQueue) FlushWithContext(ctx context.Context) error {
 | 
				
			||||||
 | 
						errChan := make(chan error, 1)
 | 
				
			||||||
 | 
						go func() {
 | 
				
			||||||
 | 
							errChan <- p.channelQueue.FlushWithContext(ctx)
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
						go func() {
 | 
				
			||||||
 | 
							p.lock.Lock()
 | 
				
			||||||
 | 
							if p.internal == nil {
 | 
				
			||||||
 | 
								p.lock.Unlock()
 | 
				
			||||||
 | 
								errChan <- fmt.Errorf("not ready to flush internal queue %s yet", p.Name())
 | 
				
			||||||
 | 
								return
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							p.lock.Unlock()
 | 
				
			||||||
 | 
							errChan <- p.internal.FlushWithContext(ctx)
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
						err1 := <-errChan
 | 
				
			||||||
 | 
						err2 := <-errChan
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err1 != nil {
 | 
				
			||||||
 | 
							return err1
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return err2
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty checks if a queue is empty
 | 
				
			||||||
 | 
					func (p *PersistableChannelQueue) IsEmpty() bool {
 | 
				
			||||||
 | 
						if !p.channelQueue.IsEmpty() {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						p.lock.Lock()
 | 
				
			||||||
 | 
						defer p.lock.Unlock()
 | 
				
			||||||
 | 
						if p.internal == nil {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return p.internal.IsEmpty()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Shutdown processing this queue
 | 
					// Shutdown processing this queue
 | 
				
			||||||
func (p *PersistableChannelQueue) Shutdown() {
 | 
					func (p *PersistableChannelQueue) Shutdown() {
 | 
				
			||||||
	log.Trace("PersistableChannelQueue: %s Shutdown", p.delayedStarter.name)
 | 
						log.Trace("PersistableChannelQueue: %s Shutting down", p.delayedStarter.name)
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
	case <-p.closed:
 | 
						case <-p.closed:
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
@@ -177,6 +239,7 @@ func (p *PersistableChannelQueue) Shutdown() {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		close(p.closed)
 | 
							close(p.closed)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("PersistableChannelQueue: %s Shutdown", p.delayedStarter.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Terminate this queue and close the queue
 | 
					// Terminate this queue and close the queue
 | 
				
			||||||
@@ -188,6 +251,7 @@ func (p *PersistableChannelQueue) Terminate() {
 | 
				
			|||||||
	if p.internal != nil {
 | 
						if p.internal != nil {
 | 
				
			||||||
		p.internal.(*LevelQueue).Terminate()
 | 
							p.internal.(*LevelQueue).Terminate()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("PersistableChannelQueue: %s Terminated", p.delayedStarter.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func init() {
 | 
					func init() {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,14 +32,16 @@ func TestLevelQueue(t *testing.T) {
 | 
				
			|||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
 | 
						queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
 | 
				
			||||||
		DataDir:      tmpDir,
 | 
							WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
		BatchLength:  2,
 | 
					 | 
				
			||||||
		Workers:      1,
 | 
					 | 
				
			||||||
		MaxWorkers:   10,
 | 
					 | 
				
			||||||
			QueueLength:  20,
 | 
								QueueLength:  20,
 | 
				
			||||||
 | 
								BatchLength:  2,
 | 
				
			||||||
			BlockTimeout: 1 * time.Second,
 | 
								BlockTimeout: 1 * time.Second,
 | 
				
			||||||
			BoostTimeout: 5 * time.Minute,
 | 
								BoostTimeout: 5 * time.Minute,
 | 
				
			||||||
			BoostWorkers: 5,
 | 
								BoostWorkers: 5,
 | 
				
			||||||
 | 
								MaxWorkers:   10,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							DataDir: tmpDir,
 | 
				
			||||||
 | 
							Workers: 1,
 | 
				
			||||||
	}, &testData{})
 | 
						}, &testData{})
 | 
				
			||||||
	assert.NoError(t, err)
 | 
						assert.NoError(t, err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -92,14 +94,16 @@ func TestLevelQueue(t *testing.T) {
 | 
				
			|||||||
		WrappedQueueConfiguration{
 | 
							WrappedQueueConfiguration{
 | 
				
			||||||
			Underlying: LevelQueueType,
 | 
								Underlying: LevelQueueType,
 | 
				
			||||||
			Config: LevelQueueConfiguration{
 | 
								Config: LevelQueueConfiguration{
 | 
				
			||||||
				DataDir:      tmpDir,
 | 
									WorkerPoolConfiguration: WorkerPoolConfiguration{
 | 
				
			||||||
				BatchLength:  2,
 | 
					 | 
				
			||||||
				Workers:      1,
 | 
					 | 
				
			||||||
				MaxWorkers:   10,
 | 
					 | 
				
			||||||
					QueueLength:  20,
 | 
										QueueLength:  20,
 | 
				
			||||||
 | 
										BatchLength:  2,
 | 
				
			||||||
					BlockTimeout: 1 * time.Second,
 | 
										BlockTimeout: 1 * time.Second,
 | 
				
			||||||
					BoostTimeout: 5 * time.Minute,
 | 
										BoostTimeout: 5 * time.Minute,
 | 
				
			||||||
					BoostWorkers: 5,
 | 
										BoostWorkers: 5,
 | 
				
			||||||
 | 
										MaxWorkers:   10,
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									DataDir: tmpDir,
 | 
				
			||||||
 | 
									Workers: 1,
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		}, &testData{})
 | 
							}, &testData{})
 | 
				
			||||||
	assert.NoError(t, err)
 | 
						assert.NoError(t, err)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -9,9 +9,9 @@ import (
 | 
				
			|||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"errors"
 | 
						"errors"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"reflect"
 | 
					 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
 | 
						"sync/atomic"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
@@ -25,13 +25,14 @@ const RedisQueueType Type = "redis"
 | 
				
			|||||||
type redisClient interface {
 | 
					type redisClient interface {
 | 
				
			||||||
	RPush(key string, args ...interface{}) *redis.IntCmd
 | 
						RPush(key string, args ...interface{}) *redis.IntCmd
 | 
				
			||||||
	LPop(key string) *redis.StringCmd
 | 
						LPop(key string) *redis.StringCmd
 | 
				
			||||||
 | 
						LLen(key string) *redis.IntCmd
 | 
				
			||||||
	Ping() *redis.StatusCmd
 | 
						Ping() *redis.StatusCmd
 | 
				
			||||||
	Close() error
 | 
						Close() error
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// RedisQueue redis queue
 | 
					// RedisQueue redis queue
 | 
				
			||||||
type RedisQueue struct {
 | 
					type RedisQueue struct {
 | 
				
			||||||
	pool       *WorkerPool
 | 
						*WorkerPool
 | 
				
			||||||
	client     redisClient
 | 
						client     redisClient
 | 
				
			||||||
	queueName  string
 | 
						queueName  string
 | 
				
			||||||
	closed     chan struct{}
 | 
						closed     chan struct{}
 | 
				
			||||||
@@ -44,18 +45,13 @@ type RedisQueue struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// RedisQueueConfiguration is the configuration for the redis queue
 | 
					// RedisQueueConfiguration is the configuration for the redis queue
 | 
				
			||||||
type RedisQueueConfiguration struct {
 | 
					type RedisQueueConfiguration struct {
 | 
				
			||||||
 | 
						WorkerPoolConfiguration
 | 
				
			||||||
	Network   string
 | 
						Network   string
 | 
				
			||||||
	Addresses string
 | 
						Addresses string
 | 
				
			||||||
	Password  string
 | 
						Password  string
 | 
				
			||||||
	DBIndex   int
 | 
						DBIndex   int
 | 
				
			||||||
	BatchLength  int
 | 
					 | 
				
			||||||
	QueueLength  int
 | 
					 | 
				
			||||||
	QueueName string
 | 
						QueueName string
 | 
				
			||||||
	Workers   int
 | 
						Workers   int
 | 
				
			||||||
	MaxWorkers   int
 | 
					 | 
				
			||||||
	BlockTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostTimeout time.Duration
 | 
					 | 
				
			||||||
	BoostWorkers int
 | 
					 | 
				
			||||||
	Name      string
 | 
						Name      string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -69,21 +65,8 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	dbs := strings.Split(config.Addresses, ",")
 | 
						dbs := strings.Split(config.Addresses, ",")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dataChan := make(chan Data, config.QueueLength)
 | 
					 | 
				
			||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	var queue = &RedisQueue{
 | 
						var queue = &RedisQueue{
 | 
				
			||||||
		pool: &WorkerPool{
 | 
							WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration),
 | 
				
			||||||
			baseCtx:            ctx,
 | 
					 | 
				
			||||||
			cancel:             cancel,
 | 
					 | 
				
			||||||
			batchLength:        config.BatchLength,
 | 
					 | 
				
			||||||
			handle:             handle,
 | 
					 | 
				
			||||||
			dataChan:           dataChan,
 | 
					 | 
				
			||||||
			blockTimeout:       config.BlockTimeout,
 | 
					 | 
				
			||||||
			boostTimeout:       config.BoostTimeout,
 | 
					 | 
				
			||||||
			boostWorkers:       config.BoostWorkers,
 | 
					 | 
				
			||||||
			maxNumberOfWorkers: config.MaxWorkers,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		queueName:  config.QueueName,
 | 
							queueName:  config.QueueName,
 | 
				
			||||||
		exemplar:   exemplar,
 | 
							exemplar:   exemplar,
 | 
				
			||||||
		closed:     make(chan struct{}),
 | 
							closed:     make(chan struct{}),
 | 
				
			||||||
@@ -108,7 +91,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
	if err := queue.client.Ping().Err(); err != nil {
 | 
						if err := queue.client.Ping().Err(); err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	queue.pool.qid = GetManager().Add(queue, RedisQueueType, config, exemplar, queue.pool)
 | 
						queue.qid = GetManager().Add(queue, RedisQueueType, config, exemplar)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return queue, nil
 | 
						return queue, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -117,9 +100,10 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
 | 
				
			|||||||
func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
					func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
				
			||||||
	atShutdown(context.Background(), r.Shutdown)
 | 
						atShutdown(context.Background(), r.Shutdown)
 | 
				
			||||||
	atTerminate(context.Background(), r.Terminate)
 | 
						atTerminate(context.Background(), r.Terminate)
 | 
				
			||||||
 | 
						log.Debug("RedisQueue: %s Starting", r.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		_ = r.pool.AddWorkers(r.workers, 0)
 | 
							_ = r.AddWorkers(r.workers, 0)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go r.readToChan()
 | 
						go r.readToChan()
 | 
				
			||||||
@@ -127,12 +111,12 @@ func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func()))
 | 
				
			|||||||
	log.Trace("RedisQueue: %s Waiting til closed", r.name)
 | 
						log.Trace("RedisQueue: %s Waiting til closed", r.name)
 | 
				
			||||||
	<-r.closed
 | 
						<-r.closed
 | 
				
			||||||
	log.Trace("RedisQueue: %s Waiting til done", r.name)
 | 
						log.Trace("RedisQueue: %s Waiting til done", r.name)
 | 
				
			||||||
	r.pool.Wait()
 | 
						r.Wait()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	log.Trace("RedisQueue: %s Waiting til cleaned", r.name)
 | 
						log.Trace("RedisQueue: %s Waiting til cleaned", r.name)
 | 
				
			||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
	atTerminate(ctx, cancel)
 | 
						atTerminate(ctx, cancel)
 | 
				
			||||||
	r.pool.CleanUp(ctx)
 | 
						r.CleanUp(ctx)
 | 
				
			||||||
	cancel()
 | 
						cancel()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -141,54 +125,44 @@ func (r *RedisQueue) readToChan() {
 | 
				
			|||||||
		select {
 | 
							select {
 | 
				
			||||||
		case <-r.closed:
 | 
							case <-r.closed:
 | 
				
			||||||
			// tell the pool to shutdown
 | 
								// tell the pool to shutdown
 | 
				
			||||||
			r.pool.cancel()
 | 
								r.cancel()
 | 
				
			||||||
			return
 | 
								return
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
 | 
								atomic.AddInt64(&r.numInQueue, 1)
 | 
				
			||||||
			bs, err := r.client.LPop(r.queueName).Bytes()
 | 
								bs, err := r.client.LPop(r.queueName).Bytes()
 | 
				
			||||||
			if err != nil && err != redis.Nil {
 | 
								if err != nil && err != redis.Nil {
 | 
				
			||||||
				log.Error("RedisQueue: %s Error on LPop: %v", r.name, err)
 | 
									log.Error("RedisQueue: %s Error on LPop: %v", r.name, err)
 | 
				
			||||||
 | 
									atomic.AddInt64(&r.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if len(bs) == 0 {
 | 
								if len(bs) == 0 {
 | 
				
			||||||
 | 
									atomic.AddInt64(&r.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			var data Data
 | 
								data, err := unmarshalAs(bs, r.exemplar)
 | 
				
			||||||
			if r.exemplar != nil {
 | 
					 | 
				
			||||||
				t := reflect.TypeOf(r.exemplar)
 | 
					 | 
				
			||||||
				n := reflect.New(t)
 | 
					 | 
				
			||||||
				ne := n.Elem()
 | 
					 | 
				
			||||||
				err = json.Unmarshal(bs, ne.Addr().Interface())
 | 
					 | 
				
			||||||
				data = ne.Interface().(Data)
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				err = json.Unmarshal(bs, &data)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				log.Error("RedisQueue: %s Error on Unmarshal: %v", r.name, err)
 | 
									log.Error("RedisQueue: %s Error on Unmarshal: %v", r.name, err)
 | 
				
			||||||
 | 
									atomic.AddInt64(&r.numInQueue, -1)
 | 
				
			||||||
				time.Sleep(time.Millisecond * 100)
 | 
									time.Sleep(time.Millisecond * 100)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			log.Trace("RedisQueue: %s Task found: %#v", r.name, data)
 | 
								log.Trace("RedisQueue: %s Task found: %#v", r.name, data)
 | 
				
			||||||
			r.pool.Push(data)
 | 
								r.WorkerPool.Push(data)
 | 
				
			||||||
 | 
								atomic.AddInt64(&r.numInQueue, -1)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Push implements Queue
 | 
					// Push implements Queue
 | 
				
			||||||
func (r *RedisQueue) Push(data Data) error {
 | 
					func (r *RedisQueue) Push(data Data) error {
 | 
				
			||||||
	if r.exemplar != nil {
 | 
						if !assignableTo(data, r.exemplar) {
 | 
				
			||||||
		// Assert data is of same type as r.exemplar
 | 
					 | 
				
			||||||
		value := reflect.ValueOf(data)
 | 
					 | 
				
			||||||
		t := value.Type()
 | 
					 | 
				
			||||||
		exemplarType := reflect.ValueOf(r.exemplar).Type()
 | 
					 | 
				
			||||||
		if !t.AssignableTo(exemplarType) || data == nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, r.exemplar, r.name)
 | 
							return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, r.exemplar, r.name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	bs, err := json.Marshal(data)
 | 
						bs, err := json.Marshal(data)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
@@ -196,9 +170,22 @@ func (r *RedisQueue) Push(data Data) error {
 | 
				
			|||||||
	return r.client.RPush(r.queueName, bs).Err()
 | 
						return r.client.RPush(r.queueName, bs).Err()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty checks if the queue is empty
 | 
				
			||||||
 | 
					func (r *RedisQueue) IsEmpty() bool {
 | 
				
			||||||
 | 
						if !r.WorkerPool.IsEmpty() {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						length, err := r.client.LLen(r.queueName).Result()
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							log.Error("Error whilst getting queue length for %s: Error: %v", r.name, err)
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return length == 0
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Shutdown processing from this queue
 | 
					// Shutdown processing from this queue
 | 
				
			||||||
func (r *RedisQueue) Shutdown() {
 | 
					func (r *RedisQueue) Shutdown() {
 | 
				
			||||||
	log.Trace("Shutdown: %s", r.name)
 | 
						log.Trace("RedisQueue: %s Shutting down", r.name)
 | 
				
			||||||
	r.lock.Lock()
 | 
						r.lock.Lock()
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
	case <-r.closed:
 | 
						case <-r.closed:
 | 
				
			||||||
@@ -206,11 +193,12 @@ func (r *RedisQueue) Shutdown() {
 | 
				
			|||||||
		close(r.closed)
 | 
							close(r.closed)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	r.lock.Unlock()
 | 
						r.lock.Unlock()
 | 
				
			||||||
 | 
						log.Debug("RedisQueue: %s Shutdown", r.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Terminate this queue and close the queue
 | 
					// Terminate this queue and close the queue
 | 
				
			||||||
func (r *RedisQueue) Terminate() {
 | 
					func (r *RedisQueue) Terminate() {
 | 
				
			||||||
	log.Trace("Terminating: %s", r.name)
 | 
						log.Trace("RedisQueue: %s Terminating", r.name)
 | 
				
			||||||
	r.Shutdown()
 | 
						r.Shutdown()
 | 
				
			||||||
	r.lock.Lock()
 | 
						r.lock.Lock()
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
@@ -219,10 +207,14 @@ func (r *RedisQueue) Terminate() {
 | 
				
			|||||||
	default:
 | 
						default:
 | 
				
			||||||
		close(r.terminated)
 | 
							close(r.terminated)
 | 
				
			||||||
		r.lock.Unlock()
 | 
							r.lock.Unlock()
 | 
				
			||||||
 | 
							if log.IsDebug() {
 | 
				
			||||||
 | 
								log.Debug("RedisQueue: %s Closing with %d tasks left in queue", r.name, r.client.LLen(r.queueName))
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		if err := r.client.Close(); err != nil {
 | 
							if err := r.client.Close(); err != nil {
 | 
				
			||||||
			log.Error("Error whilst closing internal redis client in %s: %v", r.name, err)
 | 
								log.Error("Error whilst closing internal redis client in %s: %v", r.name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("RedisQueue: %s Terminated", r.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Name returns the name of this queue
 | 
					// Name returns the name of this queue
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,8 +7,8 @@ package queue
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"reflect"
 | 
					 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
 | 
						"sync/atomic"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
@@ -56,7 +56,7 @@ func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), h
 | 
				
			|||||||
	for q.internal == nil {
 | 
						for q.internal == nil {
 | 
				
			||||||
		select {
 | 
							select {
 | 
				
			||||||
		case <-ctx.Done():
 | 
							case <-ctx.Done():
 | 
				
			||||||
			return fmt.Errorf("Timedout creating queue %v with cfg %v in %s", q.underlying, q.cfg, q.name)
 | 
								return fmt.Errorf("Timedout creating queue %v with cfg %s in %s", q.underlying, q.cfg, q.name)
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			queue, err := NewQueue(q.underlying, handle, q.cfg, exemplar)
 | 
								queue, err := NewQueue(q.underlying, handle, q.cfg, exemplar)
 | 
				
			||||||
			if err == nil {
 | 
								if err == nil {
 | 
				
			||||||
@@ -64,11 +64,11 @@ func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), h
 | 
				
			|||||||
				break
 | 
									break
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if err.Error() != "resource temporarily unavailable" {
 | 
								if err.Error() != "resource temporarily unavailable" {
 | 
				
			||||||
				log.Warn("[Attempt: %d] Failed to create queue: %v for %s cfg: %v error: %v", i, q.underlying, q.name, q.cfg, err)
 | 
									log.Warn("[Attempt: %d] Failed to create queue: %v for %s cfg: %s error: %v", i, q.underlying, q.name, q.cfg, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			i++
 | 
								i++
 | 
				
			||||||
			if q.maxAttempts > 0 && i > q.maxAttempts {
 | 
								if q.maxAttempts > 0 && i > q.maxAttempts {
 | 
				
			||||||
				return fmt.Errorf("Unable to create queue %v for %s with cfg %v by max attempts: error: %v", q.underlying, q.name, q.cfg, err)
 | 
									return fmt.Errorf("Unable to create queue %v for %s with cfg %s by max attempts: error: %v", q.underlying, q.name, q.cfg, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			sleepTime := 100 * time.Millisecond
 | 
								sleepTime := 100 * time.Millisecond
 | 
				
			||||||
			if q.timeout > 0 && q.maxAttempts > 0 {
 | 
								if q.timeout > 0 && q.maxAttempts > 0 {
 | 
				
			||||||
@@ -92,6 +92,7 @@ type WrappedQueue struct {
 | 
				
			|||||||
	handle     HandlerFunc
 | 
						handle     HandlerFunc
 | 
				
			||||||
	exemplar   interface{}
 | 
						exemplar   interface{}
 | 
				
			||||||
	channel    chan Data
 | 
						channel    chan Data
 | 
				
			||||||
 | 
						numInQueue int64
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewWrappedQueue will attempt to create a queue of the provided type,
 | 
					// NewWrappedQueue will attempt to create a queue of the provided type,
 | 
				
			||||||
@@ -127,7 +128,7 @@ func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro
 | 
				
			|||||||
			name:        config.Name,
 | 
								name:        config.Name,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	_ = GetManager().Add(queue, WrappedQueueType, config, exemplar, nil)
 | 
						_ = GetManager().Add(queue, WrappedQueueType, config, exemplar)
 | 
				
			||||||
	return queue, nil
 | 
						return queue, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -138,21 +139,78 @@ func (q *WrappedQueue) Name() string {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Push will push the data to the internal channel checking it against the exemplar
 | 
					// Push will push the data to the internal channel checking it against the exemplar
 | 
				
			||||||
func (q *WrappedQueue) Push(data Data) error {
 | 
					func (q *WrappedQueue) Push(data Data) error {
 | 
				
			||||||
	if q.exemplar != nil {
 | 
						if !assignableTo(data, q.exemplar) {
 | 
				
			||||||
		// Assert data is of same type as r.exemplar
 | 
							return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name)
 | 
				
			||||||
		value := reflect.ValueOf(data)
 | 
					 | 
				
			||||||
		t := value.Type()
 | 
					 | 
				
			||||||
		exemplarType := reflect.ValueOf(q.exemplar).Type()
 | 
					 | 
				
			||||||
		if !t.AssignableTo(exemplarType) || data == nil {
 | 
					 | 
				
			||||||
			return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						atomic.AddInt64(&q.numInQueue, 1)
 | 
				
			||||||
	q.channel <- data
 | 
						q.channel <- data
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (q *WrappedQueue) flushInternalWithContext(ctx context.Context) error {
 | 
				
			||||||
 | 
						q.lock.Lock()
 | 
				
			||||||
 | 
						if q.internal == nil {
 | 
				
			||||||
 | 
							q.lock.Unlock()
 | 
				
			||||||
 | 
							return fmt.Errorf("not ready to flush wrapped queue %s yet", q.Name())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						q.lock.Unlock()
 | 
				
			||||||
 | 
						select {
 | 
				
			||||||
 | 
						case <-ctx.Done():
 | 
				
			||||||
 | 
							return ctx.Err()
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return q.internal.FlushWithContext(ctx)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flush flushes the queue and blocks till the queue is empty
 | 
				
			||||||
 | 
					func (q *WrappedQueue) Flush(timeout time.Duration) error {
 | 
				
			||||||
 | 
						var ctx context.Context
 | 
				
			||||||
 | 
						var cancel context.CancelFunc
 | 
				
			||||||
 | 
						if timeout > 0 {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithTimeout(context.Background(), timeout)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							ctx, cancel = context.WithCancel(context.Background())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer cancel()
 | 
				
			||||||
 | 
						return q.FlushWithContext(ctx)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushWithContext implements the final part of Flushable
 | 
				
			||||||
 | 
					func (q *WrappedQueue) FlushWithContext(ctx context.Context) error {
 | 
				
			||||||
 | 
						log.Trace("WrappedQueue: %s FlushWithContext", q.Name())
 | 
				
			||||||
 | 
						errChan := make(chan error, 1)
 | 
				
			||||||
 | 
						go func() {
 | 
				
			||||||
 | 
							errChan <- q.flushInternalWithContext(ctx)
 | 
				
			||||||
 | 
							close(errChan)
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						select {
 | 
				
			||||||
 | 
						case err := <-errChan:
 | 
				
			||||||
 | 
							return err
 | 
				
			||||||
 | 
						case <-ctx.Done():
 | 
				
			||||||
 | 
							go func() {
 | 
				
			||||||
 | 
								<-errChan
 | 
				
			||||||
 | 
							}()
 | 
				
			||||||
 | 
							return ctx.Err()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty checks whether the queue is empty
 | 
				
			||||||
 | 
					func (q *WrappedQueue) IsEmpty() bool {
 | 
				
			||||||
 | 
						if atomic.LoadInt64(&q.numInQueue) != 0 {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						q.lock.Lock()
 | 
				
			||||||
 | 
						defer q.lock.Unlock()
 | 
				
			||||||
 | 
						if q.internal == nil {
 | 
				
			||||||
 | 
							return false
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return q.internal.IsEmpty()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run starts to run the queue and attempts to create the internal queue
 | 
					// Run starts to run the queue and attempts to create the internal queue
 | 
				
			||||||
func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
					func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
 | 
				
			||||||
 | 
						log.Debug("WrappedQueue: %s Starting", q.name)
 | 
				
			||||||
	q.lock.Lock()
 | 
						q.lock.Lock()
 | 
				
			||||||
	if q.internal == nil {
 | 
						if q.internal == nil {
 | 
				
			||||||
		err := q.setInternal(atShutdown, q.handle, q.exemplar)
 | 
							err := q.setInternal(atShutdown, q.handle, q.exemplar)
 | 
				
			||||||
@@ -164,6 +222,7 @@ func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())
 | 
				
			|||||||
		go func() {
 | 
							go func() {
 | 
				
			||||||
			for data := range q.channel {
 | 
								for data := range q.channel {
 | 
				
			||||||
				_ = q.internal.Push(data)
 | 
									_ = q.internal.Push(data)
 | 
				
			||||||
 | 
									atomic.AddInt64(&q.numInQueue, -1)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
@@ -176,7 +235,7 @@ func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Shutdown this queue and stop processing
 | 
					// Shutdown this queue and stop processing
 | 
				
			||||||
func (q *WrappedQueue) Shutdown() {
 | 
					func (q *WrappedQueue) Shutdown() {
 | 
				
			||||||
	log.Trace("WrappedQueue: %s Shutdown", q.name)
 | 
						log.Trace("WrappedQueue: %s Shutting down", q.name)
 | 
				
			||||||
	q.lock.Lock()
 | 
						q.lock.Lock()
 | 
				
			||||||
	defer q.lock.Unlock()
 | 
						defer q.lock.Unlock()
 | 
				
			||||||
	if q.internal == nil {
 | 
						if q.internal == nil {
 | 
				
			||||||
@@ -185,6 +244,7 @@ func (q *WrappedQueue) Shutdown() {
 | 
				
			|||||||
	if shutdownable, ok := q.internal.(Shutdownable); ok {
 | 
						if shutdownable, ok := q.internal.(Shutdownable); ok {
 | 
				
			||||||
		shutdownable.Shutdown()
 | 
							shutdownable.Shutdown()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("WrappedQueue: %s Shutdown", q.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Terminate this queue and close the queue
 | 
					// Terminate this queue and close the queue
 | 
				
			||||||
@@ -198,6 +258,7 @@ func (q *WrappedQueue) Terminate() {
 | 
				
			|||||||
	if shutdownable, ok := q.internal.(Shutdownable); ok {
 | 
						if shutdownable, ok := q.internal.(Shutdownable); ok {
 | 
				
			||||||
		shutdownable.Terminate()
 | 
							shutdownable.Terminate()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						log.Debug("WrappedQueue: %s Terminated", q.name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func init() {
 | 
					func init() {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,8 +24,7 @@ func validType(t string) (Type, error) {
 | 
				
			|||||||
	return PersistableChannelQueueType, fmt.Errorf("Unknown queue type: %s defaulting to %s", t, string(PersistableChannelQueueType))
 | 
						return PersistableChannelQueueType, fmt.Errorf("Unknown queue type: %s defaulting to %s", t, string(PersistableChannelQueueType))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CreateQueue for name with provided handler and exemplar
 | 
					func getQueueSettings(name string) (setting.QueueSettings, []byte) {
 | 
				
			||||||
func CreateQueue(name string, handle HandlerFunc, exemplar interface{}) Queue {
 | 
					 | 
				
			||||||
	q := setting.GetQueueSettings(name)
 | 
						q := setting.GetQueueSettings(name)
 | 
				
			||||||
	opts := make(map[string]interface{})
 | 
						opts := make(map[string]interface{})
 | 
				
			||||||
	opts["Name"] = name
 | 
						opts["Name"] = name
 | 
				
			||||||
@@ -43,24 +42,33 @@ func CreateQueue(name string, handle HandlerFunc, exemplar interface{}) Queue {
 | 
				
			|||||||
	opts["BoostTimeout"] = q.BoostTimeout
 | 
						opts["BoostTimeout"] = q.BoostTimeout
 | 
				
			||||||
	opts["BoostWorkers"] = q.BoostWorkers
 | 
						opts["BoostWorkers"] = q.BoostWorkers
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	typ, err := validType(q.Type)
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		log.Error("Invalid type %s provided for queue named %s defaulting to %s", q.Type, name, string(typ))
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cfg, err := json.Marshal(opts)
 | 
						cfg, err := json.Marshal(opts)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		log.Error("Unable to marshall generic options: %v Error: %v", opts, err)
 | 
							log.Error("Unable to marshall generic options: %v Error: %v", opts, err)
 | 
				
			||||||
		log.Error("Unable to create queue for %s", name, err)
 | 
							log.Error("Unable to create queue for %s", name, err)
 | 
				
			||||||
 | 
							return q, []byte{}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return q, cfg
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// CreateQueue for name with provided handler and exemplar
 | 
				
			||||||
 | 
					func CreateQueue(name string, handle HandlerFunc, exemplar interface{}) Queue {
 | 
				
			||||||
 | 
						q, cfg := getQueueSettings(name)
 | 
				
			||||||
 | 
						if len(cfg) == 0 {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						typ, err := validType(q.Type)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							log.Error("Invalid type %s provided for queue named %s defaulting to %s", q.Type, name, string(typ))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	returnable, err := NewQueue(typ, handle, cfg, exemplar)
 | 
						returnable, err := NewQueue(typ, handle, cfg, exemplar)
 | 
				
			||||||
	if q.WrapIfNecessary && err != nil {
 | 
						if q.WrapIfNecessary && err != nil {
 | 
				
			||||||
		log.Warn("Unable to create queue for %s: %v", name, err)
 | 
							log.Warn("Unable to create queue for %s: %v", name, err)
 | 
				
			||||||
		log.Warn("Attempting to create wrapped queue")
 | 
							log.Warn("Attempting to create wrapped queue")
 | 
				
			||||||
		returnable, err = NewQueue(WrappedQueueType, handle, WrappedQueueConfiguration{
 | 
							returnable, err = NewQueue(WrappedQueueType, handle, WrappedQueueConfiguration{
 | 
				
			||||||
			Underlying:  Type(q.Type),
 | 
								Underlying:  typ,
 | 
				
			||||||
			Timeout:     q.Timeout,
 | 
								Timeout:     q.Timeout,
 | 
				
			||||||
			MaxAttempts: q.MaxAttempts,
 | 
								MaxAttempts: q.MaxAttempts,
 | 
				
			||||||
			Config:      cfg,
 | 
								Config:      cfg,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,12 +7,16 @@ package queue
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
 | 
						"sync/atomic"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"code.gitea.io/gitea/modules/log"
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// WorkerPool takes
 | 
					// WorkerPool represent a dynamically growable worker pool for a
 | 
				
			||||||
 | 
					// provided handler function. They have an internal channel which
 | 
				
			||||||
 | 
					// they use to detect if there is a block and will grow and shrink in
 | 
				
			||||||
 | 
					// response to demand as per configuration.
 | 
				
			||||||
type WorkerPool struct {
 | 
					type WorkerPool struct {
 | 
				
			||||||
	lock               sync.Mutex
 | 
						lock               sync.Mutex
 | 
				
			||||||
	baseCtx            context.Context
 | 
						baseCtx            context.Context
 | 
				
			||||||
@@ -27,10 +31,42 @@ type WorkerPool struct {
 | 
				
			|||||||
	blockTimeout       time.Duration
 | 
						blockTimeout       time.Duration
 | 
				
			||||||
	boostTimeout       time.Duration
 | 
						boostTimeout       time.Duration
 | 
				
			||||||
	boostWorkers       int
 | 
						boostWorkers       int
 | 
				
			||||||
 | 
						numInQueue         int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// WorkerPoolConfiguration is the basic configuration for a WorkerPool
 | 
				
			||||||
 | 
					type WorkerPoolConfiguration struct {
 | 
				
			||||||
 | 
						QueueLength  int
 | 
				
			||||||
 | 
						BatchLength  int
 | 
				
			||||||
 | 
						BlockTimeout time.Duration
 | 
				
			||||||
 | 
						BoostTimeout time.Duration
 | 
				
			||||||
 | 
						BoostWorkers int
 | 
				
			||||||
 | 
						MaxWorkers   int
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// NewWorkerPool creates a new worker pool
 | 
				
			||||||
 | 
					func NewWorkerPool(handle HandlerFunc, config WorkerPoolConfiguration) *WorkerPool {
 | 
				
			||||||
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dataChan := make(chan Data, config.QueueLength)
 | 
				
			||||||
 | 
						pool := &WorkerPool{
 | 
				
			||||||
 | 
							baseCtx:            ctx,
 | 
				
			||||||
 | 
							cancel:             cancel,
 | 
				
			||||||
 | 
							batchLength:        config.BatchLength,
 | 
				
			||||||
 | 
							dataChan:           dataChan,
 | 
				
			||||||
 | 
							handle:             handle,
 | 
				
			||||||
 | 
							blockTimeout:       config.BlockTimeout,
 | 
				
			||||||
 | 
							boostTimeout:       config.BoostTimeout,
 | 
				
			||||||
 | 
							boostWorkers:       config.BoostWorkers,
 | 
				
			||||||
 | 
							maxNumberOfWorkers: config.MaxWorkers,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return pool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Push pushes the data to the internal channel
 | 
					// Push pushes the data to the internal channel
 | 
				
			||||||
func (p *WorkerPool) Push(data Data) {
 | 
					func (p *WorkerPool) Push(data Data) {
 | 
				
			||||||
 | 
						atomic.AddInt64(&p.numInQueue, 1)
 | 
				
			||||||
	p.lock.Lock()
 | 
						p.lock.Lock()
 | 
				
			||||||
	if p.blockTimeout > 0 && p.boostTimeout > 0 && (p.numberOfWorkers <= p.maxNumberOfWorkers || p.maxNumberOfWorkers < 0) {
 | 
						if p.blockTimeout > 0 && p.boostTimeout > 0 && (p.numberOfWorkers <= p.maxNumberOfWorkers || p.maxNumberOfWorkers < 0) {
 | 
				
			||||||
		p.lock.Unlock()
 | 
							p.lock.Unlock()
 | 
				
			||||||
@@ -80,7 +116,7 @@ func (p *WorkerPool) pushBoost(data Data) {
 | 
				
			|||||||
				log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, mq.Name, ourTimeout, boost, p.boostTimeout, p.blockTimeout)
 | 
									log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, mq.Name, ourTimeout, boost, p.boostTimeout, p.blockTimeout)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				start := time.Now()
 | 
									start := time.Now()
 | 
				
			||||||
				pid := mq.RegisterWorkers(boost, start, false, start, cancel)
 | 
									pid := mq.RegisterWorkers(boost, start, false, start, cancel, false)
 | 
				
			||||||
				go func() {
 | 
									go func() {
 | 
				
			||||||
					<-ctx.Done()
 | 
										<-ctx.Done()
 | 
				
			||||||
					mq.RemoveWorkers(pid)
 | 
										mq.RemoveWorkers(pid)
 | 
				
			||||||
@@ -138,8 +174,8 @@ func (p *WorkerPool) BlockTimeout() time.Duration {
 | 
				
			|||||||
	return p.blockTimeout
 | 
						return p.blockTimeout
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// SetSettings sets the setable boost values
 | 
					// SetPoolSettings sets the setable boost values
 | 
				
			||||||
func (p *WorkerPool) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
 | 
					func (p *WorkerPool) SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
 | 
				
			||||||
	p.lock.Lock()
 | 
						p.lock.Lock()
 | 
				
			||||||
	defer p.lock.Unlock()
 | 
						defer p.lock.Unlock()
 | 
				
			||||||
	p.maxNumberOfWorkers = maxNumberOfWorkers
 | 
						p.maxNumberOfWorkers = maxNumberOfWorkers
 | 
				
			||||||
@@ -156,8 +192,7 @@ func (p *WorkerPool) SetMaxNumberOfWorkers(newMax int) {
 | 
				
			|||||||
	p.maxNumberOfWorkers = newMax
 | 
						p.maxNumberOfWorkers = newMax
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// AddWorkers adds workers to the pool - this allows the number of workers to go above the limit
 | 
					func (p *WorkerPool) commonRegisterWorkers(number int, timeout time.Duration, isFlusher bool) (context.Context, context.CancelFunc) {
 | 
				
			||||||
func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
 | 
					 | 
				
			||||||
	var ctx context.Context
 | 
						var ctx context.Context
 | 
				
			||||||
	var cancel context.CancelFunc
 | 
						var cancel context.CancelFunc
 | 
				
			||||||
	start := time.Now()
 | 
						start := time.Now()
 | 
				
			||||||
@@ -173,7 +208,7 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	mq := GetManager().GetManagedQueue(p.qid)
 | 
						mq := GetManager().GetManagedQueue(p.qid)
 | 
				
			||||||
	if mq != nil {
 | 
						if mq != nil {
 | 
				
			||||||
		pid := mq.RegisterWorkers(number, start, hasTimeout, end, cancel)
 | 
							pid := mq.RegisterWorkers(number, start, hasTimeout, end, cancel, isFlusher)
 | 
				
			||||||
		go func() {
 | 
							go func() {
 | 
				
			||||||
			<-ctx.Done()
 | 
								<-ctx.Done()
 | 
				
			||||||
			mq.RemoveWorkers(pid)
 | 
								mq.RemoveWorkers(pid)
 | 
				
			||||||
@@ -184,6 +219,12 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance
 | 
				
			|||||||
		log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number)
 | 
							log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						return ctx, cancel
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// AddWorkers adds workers to the pool - this allows the number of workers to go above the limit
 | 
				
			||||||
 | 
					func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
 | 
				
			||||||
 | 
						ctx, cancel := p.commonRegisterWorkers(number, timeout, false)
 | 
				
			||||||
	p.addWorkers(ctx, number)
 | 
						p.addWorkers(ctx, number)
 | 
				
			||||||
	return cancel
 | 
						return cancel
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -235,6 +276,7 @@ func (p *WorkerPool) CleanUp(ctx context.Context) {
 | 
				
			|||||||
	close(p.dataChan)
 | 
						close(p.dataChan)
 | 
				
			||||||
	for data := range p.dataChan {
 | 
						for data := range p.dataChan {
 | 
				
			||||||
		p.handle(data)
 | 
							p.handle(data)
 | 
				
			||||||
 | 
							atomic.AddInt64(&p.numInQueue, -1)
 | 
				
			||||||
		select {
 | 
							select {
 | 
				
			||||||
		case <-ctx.Done():
 | 
							case <-ctx.Done():
 | 
				
			||||||
			log.Warn("WorkerPool: %d Cleanup context closed before finishing clean-up", p.qid)
 | 
								log.Warn("WorkerPool: %d Cleanup context closed before finishing clean-up", p.qid)
 | 
				
			||||||
@@ -245,6 +287,37 @@ func (p *WorkerPool) CleanUp(ctx context.Context) {
 | 
				
			|||||||
	log.Trace("WorkerPool: %d CleanUp Done", p.qid)
 | 
						log.Trace("WorkerPool: %d CleanUp Done", p.qid)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flush flushes the channel with a timeout - the Flush worker will be registered as a flush worker with the manager
 | 
				
			||||||
 | 
					func (p *WorkerPool) Flush(timeout time.Duration) error {
 | 
				
			||||||
 | 
						ctx, cancel := p.commonRegisterWorkers(1, timeout, true)
 | 
				
			||||||
 | 
						defer cancel()
 | 
				
			||||||
 | 
						return p.FlushWithContext(ctx)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsEmpty returns if true if the worker queue is empty
 | 
				
			||||||
 | 
					func (p *WorkerPool) IsEmpty() bool {
 | 
				
			||||||
 | 
						return atomic.LoadInt64(&p.numInQueue) == 0
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushWithContext is very similar to CleanUp but it will return as soon as the dataChan is empty
 | 
				
			||||||
 | 
					// NB: The worker will not be registered with the manager.
 | 
				
			||||||
 | 
					func (p *WorkerPool) FlushWithContext(ctx context.Context) error {
 | 
				
			||||||
 | 
						log.Trace("WorkerPool: %d Flush", p.qid)
 | 
				
			||||||
 | 
						for {
 | 
				
			||||||
 | 
							select {
 | 
				
			||||||
 | 
							case data := <-p.dataChan:
 | 
				
			||||||
 | 
								p.handle(data)
 | 
				
			||||||
 | 
								atomic.AddInt64(&p.numInQueue, -1)
 | 
				
			||||||
 | 
							case <-p.baseCtx.Done():
 | 
				
			||||||
 | 
								return p.baseCtx.Err()
 | 
				
			||||||
 | 
							case <-ctx.Done():
 | 
				
			||||||
 | 
								return ctx.Err()
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
 | 
								return nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (p *WorkerPool) doWork(ctx context.Context) {
 | 
					func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			||||||
	delay := time.Millisecond * 300
 | 
						delay := time.Millisecond * 300
 | 
				
			||||||
	var data = make([]Data, 0, p.batchLength)
 | 
						var data = make([]Data, 0, p.batchLength)
 | 
				
			||||||
@@ -254,6 +327,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
			if len(data) > 0 {
 | 
								if len(data) > 0 {
 | 
				
			||||||
				log.Trace("Handling: %d data, %v", len(data), data)
 | 
									log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
				p.handle(data...)
 | 
									p.handle(data...)
 | 
				
			||||||
 | 
									atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			log.Trace("Worker shutting down")
 | 
								log.Trace("Worker shutting down")
 | 
				
			||||||
			return
 | 
								return
 | 
				
			||||||
@@ -263,6 +337,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
				if len(data) > 0 {
 | 
									if len(data) > 0 {
 | 
				
			||||||
					log.Trace("Handling: %d data, %v", len(data), data)
 | 
										log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
					p.handle(data...)
 | 
										p.handle(data...)
 | 
				
			||||||
 | 
										atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				log.Trace("Worker shutting down")
 | 
									log.Trace("Worker shutting down")
 | 
				
			||||||
				return
 | 
									return
 | 
				
			||||||
@@ -271,6 +346,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
			if len(data) >= p.batchLength {
 | 
								if len(data) >= p.batchLength {
 | 
				
			||||||
				log.Trace("Handling: %d data, %v", len(data), data)
 | 
									log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
				p.handle(data...)
 | 
									p.handle(data...)
 | 
				
			||||||
 | 
									atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
				data = make([]Data, 0, p.batchLength)
 | 
									data = make([]Data, 0, p.batchLength)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
@@ -286,6 +362,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
				if len(data) > 0 {
 | 
									if len(data) > 0 {
 | 
				
			||||||
					log.Trace("Handling: %d data, %v", len(data), data)
 | 
										log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
					p.handle(data...)
 | 
										p.handle(data...)
 | 
				
			||||||
 | 
										atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				log.Trace("Worker shutting down")
 | 
									log.Trace("Worker shutting down")
 | 
				
			||||||
				return
 | 
									return
 | 
				
			||||||
@@ -301,6 +378,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
					if len(data) > 0 {
 | 
										if len(data) > 0 {
 | 
				
			||||||
						log.Trace("Handling: %d data, %v", len(data), data)
 | 
											log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
						p.handle(data...)
 | 
											p.handle(data...)
 | 
				
			||||||
 | 
											atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					log.Trace("Worker shutting down")
 | 
										log.Trace("Worker shutting down")
 | 
				
			||||||
					return
 | 
										return
 | 
				
			||||||
@@ -309,6 +387,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
				if len(data) >= p.batchLength {
 | 
									if len(data) >= p.batchLength {
 | 
				
			||||||
					log.Trace("Handling: %d data, %v", len(data), data)
 | 
										log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
					p.handle(data...)
 | 
										p.handle(data...)
 | 
				
			||||||
 | 
										atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
					data = make([]Data, 0, p.batchLength)
 | 
										data = make([]Data, 0, p.batchLength)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			case <-timer.C:
 | 
								case <-timer.C:
 | 
				
			||||||
@@ -316,6 +395,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
 | 
				
			|||||||
				if len(data) > 0 {
 | 
									if len(data) > 0 {
 | 
				
			||||||
					log.Trace("Handling: %d data, %v", len(data), data)
 | 
										log.Trace("Handling: %d data, %v", len(data), data)
 | 
				
			||||||
					p.handle(data...)
 | 
										p.handle(data...)
 | 
				
			||||||
 | 
										atomic.AddInt64(&p.numInQueue, -1*int64(len(data)))
 | 
				
			||||||
					data = make([]Data, 0, p.batchLength)
 | 
										data = make([]Data, 0, p.batchLength)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -59,7 +59,7 @@ func GetQueueSettings(name string) QueueSettings {
 | 
				
			|||||||
	if !filepath.IsAbs(q.DataDir) {
 | 
						if !filepath.IsAbs(q.DataDir) {
 | 
				
			||||||
		q.DataDir = filepath.Join(AppDataPath, q.DataDir)
 | 
							q.DataDir = filepath.Join(AppDataPath, q.DataDir)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	sec.Key("DATADIR").SetValue(q.DataDir)
 | 
						_, _ = sec.NewKey("DATADIR", q.DataDir)
 | 
				
			||||||
	// The rest are...
 | 
						// The rest are...
 | 
				
			||||||
	q.Length = sec.Key("LENGTH").MustInt(Queue.Length)
 | 
						q.Length = sec.Key("LENGTH").MustInt(Queue.Length)
 | 
				
			||||||
	q.BatchLength = sec.Key("BATCH_LENGTH").MustInt(Queue.BatchLength)
 | 
						q.BatchLength = sec.Key("BATCH_LENGTH").MustInt(Queue.BatchLength)
 | 
				
			||||||
@@ -89,7 +89,7 @@ func NewQueueService() {
 | 
				
			|||||||
	Queue.Length = sec.Key("LENGTH").MustInt(20)
 | 
						Queue.Length = sec.Key("LENGTH").MustInt(20)
 | 
				
			||||||
	Queue.BatchLength = sec.Key("BATCH_LENGTH").MustInt(20)
 | 
						Queue.BatchLength = sec.Key("BATCH_LENGTH").MustInt(20)
 | 
				
			||||||
	Queue.ConnectionString = sec.Key("CONN_STR").MustString(path.Join(AppDataPath, ""))
 | 
						Queue.ConnectionString = sec.Key("CONN_STR").MustString(path.Join(AppDataPath, ""))
 | 
				
			||||||
	Queue.Type = sec.Key("TYPE").MustString("")
 | 
						Queue.Type = sec.Key("TYPE").MustString("persistable-channel")
 | 
				
			||||||
	Queue.Network, Queue.Addresses, Queue.Password, Queue.DBIndex, _ = ParseQueueConnStr(Queue.ConnectionString)
 | 
						Queue.Network, Queue.Addresses, Queue.Password, Queue.DBIndex, _ = ParseQueueConnStr(Queue.ConnectionString)
 | 
				
			||||||
	Queue.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(true)
 | 
						Queue.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(true)
 | 
				
			||||||
	Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10)
 | 
						Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10)
 | 
				
			||||||
@@ -110,27 +110,27 @@ func NewQueueService() {
 | 
				
			|||||||
	if _, ok := sectionMap["TYPE"]; !ok {
 | 
						if _, ok := sectionMap["TYPE"]; !ok {
 | 
				
			||||||
		switch Indexer.IssueQueueType {
 | 
							switch Indexer.IssueQueueType {
 | 
				
			||||||
		case LevelQueueType:
 | 
							case LevelQueueType:
 | 
				
			||||||
			section.Key("TYPE").SetValue("level")
 | 
								_, _ = section.NewKey("TYPE", "level")
 | 
				
			||||||
		case ChannelQueueType:
 | 
							case ChannelQueueType:
 | 
				
			||||||
			section.Key("TYPE").SetValue("persistable-channel")
 | 
								_, _ = section.NewKey("TYPE", "persistable-channel")
 | 
				
			||||||
		case RedisQueueType:
 | 
							case RedisQueueType:
 | 
				
			||||||
			section.Key("TYPE").SetValue("redis")
 | 
								_, _ = section.NewKey("TYPE", "redis")
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			log.Fatal("Unsupported indexer queue type: %v",
 | 
								log.Fatal("Unsupported indexer queue type: %v",
 | 
				
			||||||
				Indexer.IssueQueueType)
 | 
									Indexer.IssueQueueType)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, ok := sectionMap["LENGTH"]; !ok {
 | 
						if _, ok := sectionMap["LENGTH"]; !ok {
 | 
				
			||||||
		section.Key("LENGTH").SetValue(fmt.Sprintf("%d", Indexer.UpdateQueueLength))
 | 
							_, _ = section.NewKey("LENGTH", fmt.Sprintf("%d", Indexer.UpdateQueueLength))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, ok := sectionMap["BATCH_LENGTH"]; !ok {
 | 
						if _, ok := sectionMap["BATCH_LENGTH"]; !ok {
 | 
				
			||||||
		section.Key("BATCH_LENGTH").SetValue(fmt.Sprintf("%d", Indexer.IssueQueueBatchNumber))
 | 
							_, _ = section.NewKey("BATCH_LENGTH", fmt.Sprintf("%d", Indexer.IssueQueueBatchNumber))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, ok := sectionMap["DATADIR"]; !ok {
 | 
						if _, ok := sectionMap["DATADIR"]; !ok {
 | 
				
			||||||
		section.Key("DATADIR").SetValue(Indexer.IssueQueueDir)
 | 
							_, _ = section.NewKey("DATADIR", Indexer.IssueQueueDir)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, ok := sectionMap["CONN_STR"]; !ok {
 | 
						if _, ok := sectionMap["CONN_STR"]; !ok {
 | 
				
			||||||
		section.Key("CONN_STR").SetValue(Indexer.IssueQueueConnStr)
 | 
							_, _ = section.NewKey("CONN_STR", Indexer.IssueQueueConnStr)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Handle the old mailer configuration
 | 
						// Handle the old mailer configuration
 | 
				
			||||||
@@ -140,7 +140,7 @@ func NewQueueService() {
 | 
				
			|||||||
		sectionMap[key.Name()] = true
 | 
							sectionMap[key.Name()] = true
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, ok := sectionMap["LENGTH"]; !ok {
 | 
						if _, ok := sectionMap["LENGTH"]; !ok {
 | 
				
			||||||
		section.Key("LENGTH").SetValue(fmt.Sprintf("%d", Cfg.Section("mailer").Key("SEND_BUFFER_LEN").MustInt(100)))
 | 
							_, _ = section.NewKey("LENGTH", fmt.Sprintf("%d", Cfg.Section("mailer").Key("SEND_BUFFER_LEN").MustInt(100)))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -2083,6 +2083,10 @@ monitor.queue.pool.addworkers.numberworkers.placeholder = Number of Workers
 | 
				
			|||||||
monitor.queue.pool.addworkers.timeout.placeholder = Set to 0 for no timeout
 | 
					monitor.queue.pool.addworkers.timeout.placeholder = Set to 0 for no timeout
 | 
				
			||||||
monitor.queue.pool.addworkers.mustnumbergreaterzero = Number of Workers to add must be greater than zero
 | 
					monitor.queue.pool.addworkers.mustnumbergreaterzero = Number of Workers to add must be greater than zero
 | 
				
			||||||
monitor.queue.pool.addworkers.musttimeoutduration = Timeout must be a golang duration eg. 5m or be 0
 | 
					monitor.queue.pool.addworkers.musttimeoutduration = Timeout must be a golang duration eg. 5m or be 0
 | 
				
			||||||
 | 
					monitor.queue.pool.flush.title = Flush Queue
 | 
				
			||||||
 | 
					monitor.queue.pool.flush.desc = Flush will add a worker that will terminate once the queue is empty, or it times out.
 | 
				
			||||||
 | 
					monitor.queue.pool.flush.submit = Add Flush Worker
 | 
				
			||||||
 | 
					monitor.queue.pool.flush.added = Flush Worker added for %[1]s
 | 
				
			||||||
 | 
					
 | 
				
			||||||
monitor.queue.settings.title = Pool Settings
 | 
					monitor.queue.settings.title = Pool Settings
 | 
				
			||||||
monitor.queue.settings.desc = Pools dynamically grow with a boost in response to their worker queue blocking. These changes will not affect current worker groups.
 | 
					monitor.queue.settings.desc = Pools dynamically grow with a boost in response to their worker queue blocking. These changes will not affect current worker groups.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -404,6 +404,28 @@ func WorkerCancel(ctx *context.Context) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Flush flushes a queue
 | 
				
			||||||
 | 
					func Flush(ctx *context.Context) {
 | 
				
			||||||
 | 
						qid := ctx.ParamsInt64("qid")
 | 
				
			||||||
 | 
						mq := queue.GetManager().GetManagedQueue(qid)
 | 
				
			||||||
 | 
						if mq == nil {
 | 
				
			||||||
 | 
							ctx.Status(404)
 | 
				
			||||||
 | 
							return
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						timeout, err := time.ParseDuration(ctx.Query("timeout"))
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							timeout = -1
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						ctx.Flash.Info(ctx.Tr("admin.monitor.queue.pool.flush.added", mq.Name))
 | 
				
			||||||
 | 
						go func() {
 | 
				
			||||||
 | 
							err := mq.Flush(timeout)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								log.Error("Flushing failure for %s: Error %v", mq.Name, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
						ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// AddWorkers adds workers to a worker group
 | 
					// AddWorkers adds workers to a worker group
 | 
				
			||||||
func AddWorkers(ctx *context.Context) {
 | 
					func AddWorkers(ctx *context.Context) {
 | 
				
			||||||
	qid := ctx.ParamsInt64("qid")
 | 
						qid := ctx.ParamsInt64("qid")
 | 
				
			||||||
@@ -424,7 +446,7 @@ func AddWorkers(ctx *context.Context) {
 | 
				
			|||||||
		ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
							ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if mq.Pool == nil {
 | 
						if _, ok := mq.Managed.(queue.ManagedPool); !ok {
 | 
				
			||||||
		ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.none"))
 | 
							ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.none"))
 | 
				
			||||||
		ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
							ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
@@ -442,7 +464,7 @@ func SetQueueSettings(ctx *context.Context) {
 | 
				
			|||||||
		ctx.Status(404)
 | 
							ctx.Status(404)
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if mq.Pool == nil {
 | 
						if _, ok := mq.Managed.(queue.ManagedPool); !ok {
 | 
				
			||||||
		ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.none"))
 | 
							ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.none"))
 | 
				
			||||||
		ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
							ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
@@ -488,10 +510,10 @@ func SetQueueSettings(ctx *context.Context) {
 | 
				
			|||||||
			return
 | 
								return
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		timeout = mq.Pool.BoostTimeout()
 | 
							timeout = mq.BoostTimeout()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mq.SetSettings(maxNumber, number, timeout)
 | 
						mq.SetPoolSettings(maxNumber, number, timeout)
 | 
				
			||||||
	ctx.Flash.Success(ctx.Tr("admin.monitor.queue.settings.changed"))
 | 
						ctx.Flash.Success(ctx.Tr("admin.monitor.queue.settings.changed"))
 | 
				
			||||||
	ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
						ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -89,5 +89,9 @@ func RegisterRoutes(m *macaron.Macaron) {
 | 
				
			|||||||
		m.Post("/hook/set-default-branch/:owner/:repo/:branch", SetDefaultBranch)
 | 
							m.Post("/hook/set-default-branch/:owner/:repo/:branch", SetDefaultBranch)
 | 
				
			||||||
		m.Get("/serv/none/:keyid", ServNoCommand)
 | 
							m.Get("/serv/none/:keyid", ServNoCommand)
 | 
				
			||||||
		m.Get("/serv/command/:keyid/:owner/:repo", ServCommand)
 | 
							m.Get("/serv/command/:keyid/:owner/:repo", ServCommand)
 | 
				
			||||||
 | 
							m.Post("/manager/shutdown", Shutdown)
 | 
				
			||||||
 | 
							m.Post("/manager/restart", Restart)
 | 
				
			||||||
 | 
							m.Post("/manager/flush-queues", bind(private.FlushOptions{}), FlushQueues)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}, CheckInternalToken)
 | 
						}, CheckInternalToken)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										41
									
								
								routers/private/manager.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								routers/private/manager.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,41 @@
 | 
				
			|||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package private
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"net/http"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/graceful"
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/log"
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/private"
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/queue"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"gitea.com/macaron/macaron"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FlushQueues flushes all the Queues
 | 
				
			||||||
 | 
					func FlushQueues(ctx *macaron.Context, opts private.FlushOptions) {
 | 
				
			||||||
 | 
						if opts.NonBlocking {
 | 
				
			||||||
 | 
							// Save the hammer ctx here - as a new one is created each time you call this.
 | 
				
			||||||
 | 
							baseCtx := graceful.GetManager().HammerContext()
 | 
				
			||||||
 | 
							go func() {
 | 
				
			||||||
 | 
								err := queue.GetManager().FlushAll(baseCtx, opts.Timeout)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									log.Error("Flushing request timed-out with error: %v", err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}()
 | 
				
			||||||
 | 
							ctx.JSON(http.StatusAccepted, map[string]interface{}{
 | 
				
			||||||
 | 
								"err": "Flushing",
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
 | 
							return
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err := queue.GetManager().FlushAll(ctx.Req.Request.Context(), opts.Timeout)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							ctx.JSON(http.StatusRequestTimeout, map[string]interface{}{
 | 
				
			||||||
 | 
								"err": err,
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						ctx.PlainText(http.StatusOK, []byte("success"))
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										28
									
								
								routers/private/manager_unix.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								routers/private/manager_unix.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
				
			|||||||
 | 
					// +build !windows
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package private
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"net/http"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/graceful"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"gitea.com/macaron/macaron"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Restart causes the server to perform a graceful restart
 | 
				
			||||||
 | 
					func Restart(ctx *macaron.Context) {
 | 
				
			||||||
 | 
						graceful.GetManager().DoGracefulRestart()
 | 
				
			||||||
 | 
						ctx.PlainText(http.StatusOK, []byte("success"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Shutdown causes the server to perform a graceful shutdown
 | 
				
			||||||
 | 
					func Shutdown(ctx *macaron.Context) {
 | 
				
			||||||
 | 
						graceful.GetManager().DoGracefulShutdown()
 | 
				
			||||||
 | 
						ctx.PlainText(http.StatusOK, []byte("success"))
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										28
									
								
								routers/private/manager_windows.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								routers/private/manager_windows.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
				
			|||||||
 | 
					// +build windows
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Copyright 2020 The Gitea Authors. All rights reserved.
 | 
				
			||||||
 | 
					// Use of this source code is governed by a MIT-style
 | 
				
			||||||
 | 
					// license that can be found in the LICENSE file.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package private
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"net/http"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"code.gitea.io/gitea/modules/graceful"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"gitea.com/macaron/macaron"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Restart is not implemented for Windows based servers as they can't fork
 | 
				
			||||||
 | 
					func Restart(ctx *macaron.Context) {
 | 
				
			||||||
 | 
						ctx.JSON(http.StatusNotImplemented, map[string]interface{}{
 | 
				
			||||||
 | 
							"err": "windows servers cannot be gracefully restarted - shutdown and restart manually",
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Shutdown causes the server to perform a graceful shutdown
 | 
				
			||||||
 | 
					func Shutdown(ctx *macaron.Context) {
 | 
				
			||||||
 | 
						graceful.GetManager().DoGracefulShutdown()
 | 
				
			||||||
 | 
						ctx.PlainText(http.StatusOK, []byte("success"))
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -423,6 +423,7 @@ func RegisterRoutes(m *macaron.Macaron) {
 | 
				
			|||||||
				m.Post("/set", admin.SetQueueSettings)
 | 
									m.Post("/set", admin.SetQueueSettings)
 | 
				
			||||||
				m.Post("/add", admin.AddWorkers)
 | 
									m.Post("/add", admin.AddWorkers)
 | 
				
			||||||
				m.Post("/cancel/:pid", admin.WorkerCancel)
 | 
									m.Post("/cancel/:pid", admin.WorkerCancel)
 | 
				
			||||||
 | 
									m.Post("/flush", admin.Flush)
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -92,6 +92,24 @@
 | 
				
			|||||||
				</div>
 | 
									</div>
 | 
				
			||||||
			</form>
 | 
								</form>
 | 
				
			||||||
		</div>
 | 
							</div>
 | 
				
			||||||
 | 
							<h4 class="ui top attached header">
 | 
				
			||||||
 | 
								{{.i18n.Tr "admin.monitor.queue.pool.flush.title"}}
 | 
				
			||||||
 | 
							</h4>
 | 
				
			||||||
 | 
							<div class="ui attached segment">
 | 
				
			||||||
 | 
								<p>{{.i18n.Tr "admin.monitor.queue.pool.flush.desc"}}</p>
 | 
				
			||||||
 | 
								<form method="POST" action="{{.Link}}/flush">
 | 
				
			||||||
 | 
									{{$.CsrfTokenHtml}}
 | 
				
			||||||
 | 
									<div class="ui form">
 | 
				
			||||||
 | 
										<div class="fields">
 | 
				
			||||||
 | 
											<div class="field">
 | 
				
			||||||
 | 
												<label>{{.i18n.Tr "admin.monitor.queue.pool.timeout"}}</label>
 | 
				
			||||||
 | 
												<input name="timeout" type="text" placeholder="{{.i18n.Tr "admin.monitor.queue.pool.addworkers.timeout.placeholder"}}">
 | 
				
			||||||
 | 
											</div>
 | 
				
			||||||
 | 
										</div>
 | 
				
			||||||
 | 
										<button class="ui submit button">{{.i18n.Tr "admin.monitor.queue.pool.flush.submit"}}</button>
 | 
				
			||||||
 | 
									</div>
 | 
				
			||||||
 | 
								</form>
 | 
				
			||||||
 | 
							</div>
 | 
				
			||||||
		<h4 class="ui top attached header">
 | 
							<h4 class="ui top attached header">
 | 
				
			||||||
			{{.i18n.Tr "admin.monitor.queue.pool.workers.title"}}
 | 
								{{.i18n.Tr "admin.monitor.queue.pool.workers.title"}}
 | 
				
			||||||
		</h4>
 | 
							</h4>
 | 
				
			||||||
@@ -108,7 +126,7 @@
 | 
				
			|||||||
				<tbody>
 | 
									<tbody>
 | 
				
			||||||
					{{range .Queue.Workers}}
 | 
										{{range .Queue.Workers}}
 | 
				
			||||||
					<tr>
 | 
										<tr>
 | 
				
			||||||
						<td>{{.Workers}}</td>
 | 
											<td>{{.Workers}}{{if .IsFlusher}}<i class="icon sync-alternate" title="{{.i18n.Tr "admin.monitor.queue.flush"}}"></i>{{end}}</td>
 | 
				
			||||||
						<td>{{DateFmtLong .Start}}</td>
 | 
											<td>{{DateFmtLong .Start}}</td>
 | 
				
			||||||
						<td>{{if .HasTimeout}}{{DateFmtLong .Timeout}}{{else}}-{{end}}</td>
 | 
											<td>{{if .HasTimeout}}{{DateFmtLong .Timeout}}{{else}}-{{end}}</td>
 | 
				
			||||||
						<td>
 | 
											<td>
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user