diff options
author | 2023-02-16 14:18:53 +0100 | |
---|---|---|
committer | 2023-02-16 13:18:53 +0000 | |
commit | 6ee0dc8c7df5486fd7c130a1f70712cfdd813bc4 (patch) | |
tree | fc99a51689dda8d1abd2f60d3c0641abe4c7f8a0 /internal/api/fileserver.go | |
parent | Fix 410 Gone race on account deletes (#1507) (diff) | |
download | gotosocial-6ee0dc8c7df5486fd7c130a1f70712cfdd813bc4.tar.xz |
[bugfix] Set cache-control max-age dynamically for s3 (#1510)
* [bugfix] set cache-control max-age dynamically for s3
* woops
* double whoops
* time until, thank you linter, bless you, you're the best, no matter what kim says
* aa
Diffstat (limited to 'internal/api/fileserver.go')
-rw-r--r-- | internal/api/fileserver.go | 36 |
1 files changed, 15 insertions, 21 deletions
diff --git a/internal/api/fileserver.go b/internal/api/fileserver.go index 042936551..b1ebae045 100644 --- a/internal/api/fileserver.go +++ b/internal/api/fileserver.go @@ -31,31 +31,25 @@ type Fileserver struct { fileserver *fileserver.Module } -// maxAge returns an appropriate max-age value for the -// storage method that's being used. -// -// The default max-age is very long to reflect that we -// never host different files at the same URL (since -// ULIDs are generated per piece of media), so we can -// easily prevent clients having to fetch files repeatedly. -// -// If we're using non-proxying s3, however, the max age is -// significantly shorter, to ensure that clients don't -// cache redirect responses to expired pre-signed URLs. -func maxAge() string { - if config.GetStorageBackend() == "s3" && !config.GetStorageS3Proxy() { - return "max-age=86400" // 24h - } - - return "max-age=604800" // 7d -} - func (f *Fileserver) Route(r router.Router, m ...gin.HandlerFunc) { fileserverGroup := r.AttachGroup("fileserver") - // attach middlewares appropriate for this group + // Attach middlewares appropriate for this group. fileserverGroup.Use(m...) - fileserverGroup.Use(middleware.CacheControl("private", maxAge())) + // If we're using local storage or proxying s3, we can set a + // long max-age on all file requests to reflect that we + // never host different files at the same URL (since + // ULIDs are generated per piece of media), so we can + // easily prevent clients having to fetch files repeatedly. + // + // If we *are* using non-proxying s3, however, the max age + // must be set dynamically within the request handler, + // based on how long the signed URL has left to live before + // it expires. This ensures that clients won't cache expired + // links. This is done within fileserver/servefile.go. + if config.GetStorageBackend() == "local" || config.GetStorageS3Proxy() { + fileserverGroup.Use(middleware.CacheControl("private", "max-age=604800")) // 7d + } f.fileserver.Route(fileserverGroup.Handle) } |