mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-04-05 20:52:50 +08:00
s3: collect metrics
This commit is contained in:
parent
852e5f7cbc
commit
23e9ede068
@ -49,46 +49,46 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||||||
for _, bucket := range routers {
|
for _, bucket := range routers {
|
||||||
|
|
||||||
// HeadObject
|
// HeadObject
|
||||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ))
|
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
|
||||||
// HeadBucket
|
// HeadBucket
|
||||||
bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN))
|
bucket.Methods("HEAD").HandlerFunc(stats(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET"))
|
||||||
|
|
||||||
// CopyObjectPart
|
// CopyObjectPart
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(stats(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||||
// PutObjectPart
|
// PutObjectPart
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||||
// CompleteMultipartUpload
|
// CompleteMultipartUpload
|
||||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
|
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}")
|
||||||
// NewMultipartUpload
|
// NewMultipartUpload
|
||||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "")
|
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "")
|
||||||
// AbortMultipartUpload
|
// AbortMultipartUpload
|
||||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
|
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), , "DELETE")).Queries("uploadId", "{uploadId:.*}")
|
||||||
// ListObjectParts
|
// ListObjectParts
|
||||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
|
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE), "GET")).Queries("uploadId", "{uploadId:.*}")
|
||||||
// ListMultipartUploads
|
// ListMultipartUploads
|
||||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "")
|
bucket.Methods("GET").HandlerFunc(stats(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE), "GET")).Queries("uploads", "")
|
||||||
|
|
||||||
// CopyObject
|
// CopyObject
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE))
|
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(stats(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
|
||||||
// PutObject
|
// PutObject
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE))
|
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), , "PUT"))
|
||||||
// PutBucket
|
// PutBucket
|
||||||
bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN))
|
bucket.Methods("PUT").HandlerFunc(stats(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT"))
|
||||||
|
|
||||||
// DeleteObject
|
// DeleteObject
|
||||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE))
|
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE"))
|
||||||
// DeleteBucket
|
// DeleteBucket
|
||||||
bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE))
|
bucket.Methods("DELETE").HandlerFunc(stats(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
|
||||||
|
|
||||||
// ListObjectsV2
|
// ListObjectsV2
|
||||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2")
|
bucket.Methods("GET").HandlerFunc(stats(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ), "LIST")).Queries("list-type", "2")
|
||||||
// GetObject, but directory listing is not supported
|
// GetObject, but directory listing is not supported
|
||||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ))
|
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(stats(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
|
||||||
// ListObjectsV1 (Legacy)
|
// ListObjectsV1 (Legacy)
|
||||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ))
|
bucket.Methods("GET").HandlerFunc(stats(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ), "LIST"))
|
||||||
|
|
||||||
// DeleteMultipleObjects
|
// DeleteMultipleObjects
|
||||||
bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "")
|
bucket.Methods("POST").HandlerFunc(stats(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
|
||||||
/*
|
/*
|
||||||
|
|
||||||
// not implemented
|
// not implemented
|
||||||
@ -111,7 +111,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListBuckets
|
// ListBuckets
|
||||||
apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_READ))
|
apiRouter.Methods("GET").Path("/").HandlerFunc(stats(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_READ), "LIST"))
|
||||||
|
|
||||||
// NotFound
|
// NotFound
|
||||||
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
|
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
|
||||||
|
17
weed/s3api/stats.go
Normal file
17
weed/s3api/stats.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package s3api
|
||||||
|
|
||||||
|
import (
|
||||||
|
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func stats(f http.HandlerFunc, action string) http.HandlerFunc {
|
||||||
|
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
start := time.Now()
|
||||||
|
stats_collect.S3RequestCounter.WithLabelValues(action).Inc()
|
||||||
|
f(w, r)
|
||||||
|
stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
}
|
@ -91,6 +91,23 @@ var (
|
|||||||
Name: "total_disk_size",
|
Name: "total_disk_size",
|
||||||
Help: "Actual disk size used by volumes.",
|
Help: "Actual disk size used by volumes.",
|
||||||
}, []string{"collection", "type"})
|
}, []string{"collection", "type"})
|
||||||
|
|
||||||
|
S3RequestCounter = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "SeaweedFS",
|
||||||
|
Subsystem: "s3",
|
||||||
|
Name: "request_total",
|
||||||
|
Help: "Counter of s3 requests.",
|
||||||
|
}, []string{"type"})
|
||||||
|
S3RequestHistogram = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "SeaweedFS",
|
||||||
|
Subsystem: "s3",
|
||||||
|
Name: "request_seconds",
|
||||||
|
Help: "Bucketed histogram of s3 request processing time.",
|
||||||
|
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
|
||||||
|
}, []string{"type"})
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -107,6 +124,8 @@ func init() {
|
|||||||
VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter)
|
VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter)
|
||||||
VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge)
|
VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge)
|
||||||
|
|
||||||
|
S3Gather.MustRegister(S3RequestCounter)
|
||||||
|
S3Gather.MustRegister(S3RequestHistogram)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, addr string, intervalSeconds int) {
|
func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, addr string, intervalSeconds int) {
|
||||||
|
Loading…
Reference in New Issue
Block a user