summaryrefslogtreecommitdiff
path: root/internal/middleware/throttling.go
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2023-12-16 11:53:42 +0000
committerLibravatar GitHub <noreply@github.com>2023-12-16 12:53:42 +0100
commitd56a8d095e8fe84422ef4098d1e1a25198da17a1 (patch)
treecb34cb50335098492c863e4630dfd0b8da10d6c5 /internal/middleware/throttling.go
parent[docs]: Update FAQ and ROADMAP (#2458) (diff)
downloadgotosocial-d56a8d095e8fe84422ef4098d1e1a25198da17a1.tar.xz
[performance] simpler throttling logic (#2407)
* reduce complexity of throttling logic to use 1 queue and an atomic int * use atomic add instead of CAS, add throttling test
Diffstat (limited to 'internal/middleware/throttling.go')
-rw-r--r--internal/middleware/throttling.go81
1 files changed, 45 insertions, 36 deletions
diff --git a/internal/middleware/throttling.go b/internal/middleware/throttling.go
index 589671547..33f46f175 100644
--- a/internal/middleware/throttling.go
+++ b/internal/middleware/throttling.go
@@ -29,9 +29,12 @@ import (
"net/http"
"runtime"
"strconv"
+ "sync/atomic"
"time"
"github.com/gin-gonic/gin"
+
+ apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
)
// token represents a request that is being processed.
@@ -80,55 +83,61 @@ func Throttle(cpuMultiplier int, retryAfter time.Duration) gin.HandlerFunc {
}
var (
- limit = runtime.GOMAXPROCS(0) * cpuMultiplier
- backlogLimit = limit * cpuMultiplier
- backlogChannelSize = limit + backlogLimit
- tokens = make(chan token, limit)
- backlogTokens = make(chan token, backlogChannelSize)
- retryAfterStr = strconv.FormatUint(uint64(retryAfter/time.Second), 10)
+ limit = runtime.GOMAXPROCS(0) * cpuMultiplier
+ queueLimit = limit * cpuMultiplier
+ tokens = make(chan token, limit)
+ requestCount = atomic.Int64{}
+ retryAfterStr = strconv.FormatUint(uint64(retryAfter/time.Second), 10)
)
- // prefill token channels
+ // prefill token channel
for i := 0; i < limit; i++ {
tokens <- token{}
}
- for i := 0; i < backlogChannelSize; i++ {
- backlogTokens <- token{}
- }
return func(c *gin.Context) {
- // inside this select, the caller tries to get a backlog token
+ // Always decrement request counter.
+ defer func() { requestCount.Add(-1) }()
+
+ // Increment request count.
+ n := requestCount.Add(1)
+
+ // Check whether the request
+ // count is over queue limit.
+ if n > int64(queueLimit) {
+ c.Header("Retry-After", retryAfterStr)
+ apiutil.Data(c,
+ http.StatusTooManyRequests,
+ apiutil.AppJSON,
+ apiutil.ErrorCapacityExceeded,
+ )
+ c.Abort()
+ return
+ }
+
+ // Sit and wait in the
+ // queue for free token.
select {
+
case <-c.Request.Context().Done():
- // request context has been canceled already
+ // request context has
+ // been canceled already.
return
- case btok := <-backlogTokens:
+
+ case tok := <-tokens:
+ // caller has successfully
+ // received a token, allowing
+ // request to be processed.
+
defer func() {
- // when we're finished, return the backlog token to the bucket
- backlogTokens <- btok
+ // when we're finished, return
+ // this token to the bucket.
+ tokens <- tok
}()
- // inside *this* select, the caller has a backlog token,
- // and they're waiting for their turn to be processed
- select {
- case <-c.Request.Context().Done():
- // the request context has been canceled already
- return
- case tok := <-tokens:
- // the caller gets a token, so their request can now be processed
- defer func() {
- // whatever happens to the request, put the
- // token back in the bucket when we're finished
- tokens <- tok
- }()
- c.Next() // <- finally process the caller's request
- }
-
- default:
- // we don't have space in the backlog queue
- c.Header("Retry-After", retryAfterStr)
- c.JSON(http.StatusTooManyRequests, gin.H{"error": "server capacity exceeded"})
- c.Abort()
+ // Process
+ // request!
+ c.Next()
}
}
}