diff --git a/code/go/0chain.net/blobbercore/blobberhttp/response.go b/code/go/0chain.net/blobbercore/blobberhttp/response.go index e09b4430a..0a8c0bd06 100644 --- a/code/go/0chain.net/blobbercore/blobberhttp/response.go +++ b/code/go/0chain.net/blobbercore/blobberhttp/response.go @@ -34,6 +34,14 @@ type ReferencePathResult struct { LatestWM *writemarker.WriteMarker `json:"latest_write_marker"` } +type RefResult struct { + TotalPages int `json:"total_pages"` + NewOffsetPath string `json:"offsetPath,omitempty"` + NewOffsetDate string `json:"offsetDate,omitempty"` + Refs *[]reference.Ref `json:"refs"` + LatestWM *writemarker.WriteMarker `json:"latest_write_marker"` +} + type ObjectPathResult struct { *reference.ObjectPath LatestWM *writemarker.WriteMarker `json:"latest_write_marker"` diff --git a/code/go/0chain.net/blobbercore/handler/handler.go b/code/go/0chain.net/blobbercore/handler/handler.go index c2404b3d6..3b4072c13 100644 --- a/code/go/0chain.net/blobbercore/handler/handler.go +++ b/code/go/0chain.net/blobbercore/handler/handler.go @@ -58,7 +58,7 @@ func SetupHandlers(r *mux.Router) { r.HandleFunc("/v1/file/objectpath/{allocation}", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectPathHandler)))) r.HandleFunc("/v1/file/referencepath/{allocation}", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ReferencePathHandler)))) r.HandleFunc("/v1/file/objecttree/{allocation}", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectTreeHandler)))) - + r.HandleFunc("/v1/file/refs/{allocation}", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(RefsHandler)))).Methods("GET") //admin related r.HandleFunc("/_debug", common.UserRateLimit(common.ToJSONResponse(DumpGoRoutines))) r.HandleFunc("/_config", common.UserRateLimit(common.ToJSONResponse(GetConfig))) @@ -252,6 +252,17 @@ func ObjectTreeHandler(ctx context.Context, r *http.Request) (interface{}, error return response, nil } +func RefsHandler(ctx context.Context, r *http.Request) (interface{}, error) { + ctx = setupHandlerContext(ctx, r) + + response, err := storageHandler.GetRefs(ctx, r) + if err != nil { + return nil, err + } + + return response, nil +} + func RenameHandler(ctx context.Context, r *http.Request) (interface{}, error) { ctx = setupHandlerContext(ctx, r) response, err := storageHandler.RenameObject(ctx, r) diff --git a/code/go/0chain.net/blobbercore/handler/helper.go b/code/go/0chain.net/blobbercore/handler/helper.go new file mode 100644 index 000000000..0a5414a49 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/helper.go @@ -0,0 +1,17 @@ +package handler + +import ( + "time" + + "github.com/0chain/blobber/code/go/0chain.net/core/common" +) + +func checkValidDate(s string) error { + if s != "" { + _, err := time.Parse("2006-01-02 15:04:05.999999999", s) + if err != nil { + return common.NewError("invalid_parameters", err.Error()) + } + } + return nil +} diff --git a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go index 29f0e499f..0bd1d6d6e 100644 --- a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go +++ b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go @@ -208,7 +208,7 @@ func (fsh *StorageHandler) DownloadFile( } // get and parse file params - if err = r.ParseMultipartForm(FORM_FILE_PARSE_MAX_MEMORY); nil != err { + if err = r.ParseMultipartForm(FormFileParseMaxMemory); nil != err { Logger.Info("download_file - request_parse_error", zap.Error(err)) return nil, common.NewErrorf("download_file", "request_parse_error: %v", err) @@ -394,7 +394,7 @@ func (fsh *StorageHandler) DownloadFile( downloadMode = r.FormValue("content") respData []byte ) - if len(downloadMode) > 0 && downloadMode == DOWNLOAD_CONTENT_THUMB { + if len(downloadMode) > 0 && downloadMode == DownloadContentThumb { var fileData = &filestore.FileInputData{} fileData.Name = fileref.Name fileData.Path = fileref.Path @@ -561,7 +561,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*b return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } - if err = r.ParseMultipartForm(FORM_FILE_PARSE_MAX_MEMORY); nil != err { + if err = r.ParseMultipartForm(FormFileParseMaxMemory); nil != err { Logger.Info("Error Parsing the request", zap.Any("error", err)) return nil, common.NewError("request_parse_error", err.Error()) } @@ -1122,7 +1122,7 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*blo return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") } - if err := r.ParseMultipartForm(FORM_FILE_PARSE_MAX_MEMORY); err != nil { + if err := r.ParseMultipartForm(FormFileParseMaxMemory); err != nil { Logger.Info("Error Parsing the request", zap.Any("error", err)) return nil, common.NewError("request_parse_error", err.Error()) } diff --git a/code/go/0chain.net/blobbercore/handler/storage_handler.go b/code/go/0chain.net/blobbercore/handler/storage_handler.go index 99f9d7200..c42fca496 100644 --- a/code/go/0chain.net/blobbercore/handler/storage_handler.go +++ b/code/go/0chain.net/blobbercore/handler/storage_handler.go @@ -22,10 +22,11 @@ import ( ) const ( - FORM_FILE_PARSE_MAX_MEMORY = 10 * 1024 * 1024 + FormFileParseMaxMemory = 10 * 1024 * 1024 - DOWNLOAD_CONTENT_FULL = "full" - DOWNLOAD_CONTENT_THUMB = "thumbnail" + DownloadCcontentFull = "full" + DownloadContentThumb = "thumbnail" + PageLimit = 100 //100 rows will make upto 100 KB ) type StorageHandler struct{} @@ -636,6 +637,7 @@ func (fsh *StorageHandler) GetObjectTree(ctx context.Context, r *http.Request) ( refPath := &reference.ReferencePath{Ref: rootRef} refsToProcess := make([]*reference.ReferencePath, 0) refsToProcess = append(refsToProcess, refPath) + for len(refsToProcess) > 0 { refToProcess := refsToProcess[0] refToProcess.Meta = refToProcess.Ref.GetListingData(ctx) @@ -667,6 +669,118 @@ func (fsh *StorageHandler) GetObjectTree(ctx context.Context, r *http.Request) ( return &refPathResult, nil } +//Retrieves file refs. One can use three types to refer to regular, updated and deleted. Regular type gives all undeleted rows. +//Updated gives rows that is updated compared to the date given. And deleted gives deleted refs compared to the date given. +func (fsh *StorageHandler) GetRefs(ctx context.Context, r *http.Request) (*blobberhttp.RefResult, error) { + allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) + + if err != nil { + return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) + } + + clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) + if !valid || err != nil { + return nil, common.NewError("invalid_signature", "Invalid signature") + } + + allocationID := allocationObj.ID + clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + if len(clientID) == 0 || allocationObj.OwnerID != clientID { + return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") + } + path := r.FormValue("path") + if len(path) == 0 { + return nil, common.NewError("invalid_parameters", "Invalid path") + } + + pageLimitStr := r.FormValue("pageLimit") + var pageLimit int + if len(pageLimitStr) == 0 { + pageLimit = PageLimit + } else { + o, err := strconv.Atoi(pageLimitStr) + if err != nil { + return nil, common.NewError("invalid_parameters", "Invalid page limit value type") + } + if o <= 0 { + return nil, common.NewError("invalid_parameters", "Zero/Negative page limit value is not allowed") + } else if o > PageLimit { + pageLimit = PageLimit + } else { + pageLimit = o + } + } + offsetPath := r.FormValue("offsetPath") + offsetDate := r.FormValue("offsetDate") + updatedDate := r.FormValue("updatedDate") + err = checkValidDate(offsetDate) + if err != nil { + return nil, err + } + err = checkValidDate(updatedDate) + if err != nil { + return nil, err + } + fileType := r.FormValue("fileType") + levelStr := r.FormValue("level") + var level int + if len(levelStr) != 0 { + level, err = strconv.Atoi(levelStr) + if err != nil { + return nil, common.NewError("invalid_parameters", err.Error()) + } + if level < 0 { + return nil, common.NewError("invalid_parameters", "Negative level value is not allowed") + } + } + + refType := r.FormValue("refType") + var refs *[]reference.Ref + var totalPages int + var newOffsetPath string + var newOffsetDate string + + switch { + case refType == "regular": + refs, totalPages, newOffsetPath, err = reference.GetRefs(ctx, allocationID, path, offsetPath, fileType, level, pageLimit) + + case refType == "updated": + refs, totalPages, newOffsetPath, newOffsetDate, err = reference.GetUpdatedRefs(ctx, allocationID, path, offsetPath, fileType, updatedDate, offsetDate, level, pageLimit) + + case refType == "deleted": + refs, totalPages, newOffsetPath, newOffsetDate, err = reference.GetDeletedRefs(ctx, allocationID, updatedDate, offsetPath, offsetDate, pageLimit) + + default: + return nil, common.NewError("invalid_parameters", "refType param should have value regular/updated/deleted") + } + + if err != nil { + return nil, err + } + var latestWM *writemarker.WriteMarkerEntity + if len(allocationObj.AllocationRoot) == 0 { + latestWM = nil + } else { + latestWM, err = writemarker.GetWriteMarkerEntity(ctx, allocationObj.AllocationRoot) + if err != nil { + return nil, common.NewError("latest_write_marker_read_error", "Error reading the latest write marker for allocation."+err.Error()) + } + } + + var refResult blobberhttp.RefResult + refResult.Refs = refs + refResult.TotalPages = totalPages + refResult.NewOffsetPath = newOffsetPath + refResult.NewOffsetDate = newOffsetDate + if latestWM != nil { + refResult.LatestWM = &latestWM.WM + } + // Refs will be returned as it is and object tree will be build in client side + return &refResult, nil +} + func (fsh *StorageHandler) CalculateHash(ctx context.Context, r *http.Request) (interface{}, error) { if r.Method != "POST" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") diff --git a/code/go/0chain.net/blobbercore/reference/ref.go b/code/go/0chain.net/blobbercore/reference/ref.go index 6d7b3813e..ecacdb741 100644 --- a/code/go/0chain.net/blobbercore/reference/ref.go +++ b/code/go/0chain.net/blobbercore/reference/ref.go @@ -56,40 +56,40 @@ func (a *Attributes) Validate() (err error) { } type Ref struct { - ID int64 `gorm:"column:id;primary_key"` - Type string `gorm:"column:type" dirlist:"type" filelist:"type"` - AllocationID string `gorm:"column:allocation_id"` - LookupHash string `gorm:"column:lookup_hash" dirlist:"lookup_hash" filelist:"lookup_hash"` - Name string `gorm:"column:name" dirlist:"name" filelist:"name"` - Path string `gorm:"column:path" dirlist:"path" filelist:"path"` - Hash string `gorm:"column:hash" dirlist:"hash" filelist:"hash"` - NumBlocks int64 `gorm:"column:num_of_blocks" dirlist:"num_of_blocks" filelist:"num_of_blocks"` - PathHash string `gorm:"column:path_hash" dirlist:"path_hash" filelist:"path_hash"` - ParentPath string `gorm:"column:parent_path"` - PathLevel int `gorm:"column:level"` - CustomMeta string `gorm:"column:custom_meta" filelist:"custom_meta"` - ContentHash string `gorm:"column:content_hash" filelist:"content_hash"` - Size int64 `gorm:"column:size" dirlist:"size" filelist:"size"` - MerkleRoot string `gorm:"column:merkle_root" filelist:"merkle_root"` - ActualFileSize int64 `gorm:"column:actual_file_size" filelist:"actual_file_size"` - ActualFileHash string `gorm:"column:actual_file_hash" filelist:"actual_file_hash"` - MimeType string `gorm:"column:mimetype" filelist:"mimetype"` - WriteMarker string `gorm:"column:write_marker"` - ThumbnailSize int64 `gorm:"column:thumbnail_size" filelist:"thumbnail_size"` - ThumbnailHash string `gorm:"column:thumbnail_hash" filelist:"thumbnail_hash"` - ActualThumbnailSize int64 `gorm:"column:actual_thumbnail_size" filelist:"actual_thumbnail_size"` - ActualThumbnailHash string `gorm:"column:actual_thumbnail_hash" filelist:"actual_thumbnail_hash"` - EncryptedKey string `gorm:"column:encrypted_key" filelist:"encrypted_key"` - Attributes datatypes.JSON `gorm:"column:attributes" filelist:"attributes"` - Children []*Ref `gorm:"-"` - childrenLoaded bool - - OnCloud bool `gorm:"column:on_cloud" filelist:"on_cloud"` - CommitMetaTxns []CommitMetaTxn `gorm:"foreignkey:ref_id" filelist:"commit_meta_txns"` - CreatedAt time.Time `gorm:"column:created_at" dirlist:"created_at" filelist:"created_at"` - UpdatedAt time.Time `gorm:"column:updated_at" dirlist:"updated_at" filelist:"updated_at"` - - DeletedAt gorm.DeletedAt `gorm:"column:deleted_at"` // soft deletion + ID int64 `gorm:"column:id;primary_key" json:"id,omitempty"` + Type string `gorm:"column:type" dirlist:"type" filelist:"type" json:"type,omitempty"` + AllocationID string `gorm:"column:allocation_id" json:"allocation_id,omitempty"` + LookupHash string `gorm:"column:lookup_hash" dirlist:"lookup_hash" filelist:"lookup_hash" json:"lookup_hash,omitempty"` + Name string `gorm:"column:name" dirlist:"name" filelist:"name" json:"name,omitempty"` + Path string `gorm:"column:path" dirlist:"path" filelist:"path" json:"path,omitempty"` + Hash string `gorm:"column:hash" dirlist:"hash" filelist:"hash" json:"hash,omitempty"` + NumBlocks int64 `gorm:"column:num_of_blocks" dirlist:"num_of_blocks" filelist:"num_of_blocks" json:"num_blocks,omitempty"` + PathHash string `gorm:"column:path_hash" dirlist:"path_hash" filelist:"path_hash" json:"path_hash,omitempty"` + ParentPath string `gorm:"column:parent_path" json:"parent_path,omitempty"` + PathLevel int `gorm:"column:level" json:"level,omitempty"` + CustomMeta string `gorm:"column:custom_meta" filelist:"custom_meta" json:"custom_meta,omitempty"` + ContentHash string `gorm:"column:content_hash" filelist:"content_hash" json:"content_hash,omitempty"` + Size int64 `gorm:"column:size" dirlist:"size" filelist:"size" json:"size,omitempty"` + MerkleRoot string `gorm:"column:merkle_root" filelist:"merkle_root" json:"merkle_root,omitempty"` + ActualFileSize int64 `gorm:"column:actual_file_size" filelist:"actual_file_size" json:"actual_file_size,omitempty"` + ActualFileHash string `gorm:"column:actual_file_hash" filelist:"actual_file_hash" json:"actual_file_hash,omitempty"` + MimeType string `gorm:"column:mimetype" filelist:"mimetype" json:"mimetype,omitempty"` + WriteMarker string `gorm:"column:write_marker" json:"write_marker,omitempty"` + ThumbnailSize int64 `gorm:"column:thumbnail_size" filelist:"thumbnail_size" json:"thumbnail_size,omitempty"` + ThumbnailHash string `gorm:"column:thumbnail_hash" filelist:"thumbnail_hash" json:"thumbnail_hash,omitempty"` + ActualThumbnailSize int64 `gorm:"column:actual_thumbnail_size" filelist:"actual_thumbnail_size" json:"actual_thumbnail_size,omitempty"` + ActualThumbnailHash string `gorm:"column:actual_thumbnail_hash" filelist:"actual_thumbnail_hash" json:"actual_thumbnail_hash,omitempty"` + EncryptedKey string `gorm:"column:encrypted_key" filelist:"encrypted_key" json:"encrypted_key,omitempty"` + Attributes datatypes.JSON `gorm:"column:attributes" filelist:"attributes" json:"attributes,omitempty"` + Children []*Ref `gorm:"-" json:"-"` + childrenLoaded bool `json:"-"` + + OnCloud bool `gorm:"column:on_cloud" filelist:"on_cloud" json:"on_cloud,omitempty"` + CommitMetaTxns []CommitMetaTxn `gorm:"foreignkey:ref_id" filelist:"commit_meta_txns" json:"-"` + CreatedAt time.Time `gorm:"column:created_at" dirlist:"created_at" filelist:"created_at" json:"created_at,omitempty"` + UpdatedAt time.Time `gorm:"column:updated_at" dirlist:"updated_at" filelist:"updated_at" json:"updated_at,omitempty"` + + DeletedAt gorm.DeletedAt `gorm:"column:deleted_at" json:"-"` // soft deletion } func (Ref) TableName() string { diff --git a/code/go/0chain.net/blobbercore/reference/referencepath.go b/code/go/0chain.net/blobbercore/reference/referencepath.go index 4103d8dc0..7283ced2e 100644 --- a/code/go/0chain.net/blobbercore/reference/referencepath.go +++ b/code/go/0chain.net/blobbercore/reference/referencepath.go @@ -2,10 +2,13 @@ package reference import ( "context" + "math" "path/filepath" + "sync" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/core/common" + "gorm.io/gorm" ) type ReferencePath struct { @@ -99,3 +102,178 @@ func GetObjectTree(ctx context.Context, allocationID string, path string) (*Ref, } return &refs[0], nil } + +//This function retrieves refrence_objects tables rows with pagination. Check for issue https://github.com/0chain/gosdk/issues/117 +//Might need to consider covering index for efficient search https://blog.crunchydata.com/blog/why-covering-indexes-are-incredibly-helpful +func GetRefs(ctx context.Context, allocationID, path, offsetPath, _type string, level, pageLimit int) (refs *[]Ref, totalPages int, newOffsetPath string, err error) { + var totalRows int64 + var pRefs []Ref + path = filepath.Clean(path) + + db := datastore.GetStore().GetDB() + db1 := db.Session(&gorm.Session{}) + db2 := db.Session(&gorm.Session{}) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + db1 = db1.Model(&Ref{}).Where("allocation_id = ?", allocationID). + Where(db1.Where("path = ?", path).Or("path LIKE ?", (path + "%"))) + if _type != "" { + db1 = db1.Where("type = ?", _type) + } + if level != 0 { + db1 = db1.Where("level >= ?", level) + } + db1 = db1.Count(&totalRows) + + db1 = db1.Where("path > ?", offsetPath) + + db1 = db1.Order("path") + err = db1.Limit(pageLimit).Find(&pRefs).Error + wg.Done() + }() + + go func() { + db2 = db2.Model(&Ref{}).Where("allocation_id = ?", allocationID). + Where(db2.Where("path = ?", path).Or("path LIKE ?", (path + "%"))) + if _type != "" { + db2 = db2.Where("type = ?", _type) + } + if level != 0 { + db2 = db2.Where("level >= ?", level) + } + db2.Count(&totalRows) + wg.Done() + }() + wg.Wait() + if err != nil { + return + } + + refs = &pRefs + if len(pRefs) > 0 { + newOffsetPath = pRefs[len(pRefs)-1].Path + + } + totalPages = int(math.Ceil(float64(totalRows) / float64(pageLimit))) + return +} + +//Retrieves updated refs compared to some update_at value. Useful to localCache +func GetUpdatedRefs(ctx context.Context, allocationID, path, offsetPath, _type, updatedDate, offsetDate string, level, pageLimit int) (refs *[]Ref, totalPages int, newOffsetPath, newOffsetDate string, err error) { + var totalRows int64 + var pRefs []Ref + db := datastore.GetStore().GetDB() + db1 := db.Session(&gorm.Session{}) //TODO Might need to use transaction from db1/db2 to avoid injection attack + db2 := db.Session(&gorm.Session{}) + + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + db1 = db1.Model(&Ref{}).Where("allocation_id = ?", allocationID). + Where(db1.Where("path = ?", path).Or("path LIKE ?", (path + "%"))) + if _type != "" { + db1 = db1.Where("type = ?", _type) + } + if level != 0 { + db1 = db1.Where("level >= ?", level) + } + if updatedDate != "" { + db1 = db1.Where("updated_at > ?", updatedDate) + } + + if offsetDate != "" { + db1 = db1.Where("(updated_at, path) > (?, ?)", offsetDate, offsetPath) + } + db1 = db1.Order("updated_at, path") + db1 = db1.Limit(pageLimit) + err = db1.Find(&pRefs).Error + wg.Done() + }() + + go func() { + db2 = db2.Model(&Ref{}).Where("allocation_id = ?", allocationID). + Where(db2.Where("path = ?", path).Or("path LIKE ?", (path + "%"))) + if _type != "" { + db2 = db2.Where("type > ?", level) + } + if level != 0 { + db2 = db2.Where("level >= ?", level) + } + if updatedDate != "" { + db2 = db2.Where("updated_at > ?", updatedDate) + } + db2 = db2.Count(&totalRows) + wg.Done() + }() + wg.Wait() + if err != nil { + return + } + + if len(pRefs) != 0 { + lastIdx := len(pRefs) - 1 + newOffsetDate = pRefs[lastIdx].UpdatedAt.String() + newOffsetPath = pRefs[lastIdx].Path + } + refs = &pRefs + totalPages = int(math.Ceil(float64(totalRows) / float64(pageLimit))) + return +} + +//Retrieves deleted refs compared to some update_at value. Useful for localCache. +func GetDeletedRefs(ctx context.Context, allocationID, updatedDate, offsetPath, offsetDate string, pageLimit int) (refs *[]Ref, totalPages int, newOffsetPath, newOffsetDate string, err error) { + var totalRows int64 + var pRefs []Ref + db := datastore.GetStore().GetDB() + + db1 := db.Session(&gorm.Session{}) + db2 := db.Session(&gorm.Session{}) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + db1 = db1.Model(&Ref{}).Unscoped(). + Select("path", "path_hash", "deleted_at", "updated_at"). + Where("allocation_id = ?", allocationID) + + if updatedDate == "" { + db1 = db1.Where("deleted_at IS NOT null") + } else { + db1 = db1.Where("deleted_at > ?", updatedDate) + } + + if offsetDate != "" { + db1 = db1.Where("(updated_at, path) > (?, ?)", offsetDate, offsetPath) + } + + err = db1.Order("updated_at, path").Limit(pageLimit).Find(&pRefs).Error + wg.Done() + }() + + go func() { + + db2 = db2.Model(&Ref{}).Unscoped().Where("allocation_id = ?", allocationID) + + if updatedDate == "" { + db2 = db2.Where("deleted_at IS NOT null") + } else { + db2 = db2.Where("deleted_at > ?", updatedDate) + } + + db2 = db2.Count(&totalRows) + wg.Done() + }() + wg.Wait() + if len(pRefs) != 0 { + lastIdx := len(pRefs) - 1 + newOffsetDate = pRefs[lastIdx].DeletedAt.Time.String() + newOffsetPath = pRefs[lastIdx].Path + + } + refs = &pRefs + totalPages = int(math.Ceil(float64(totalRows) / float64(pageLimit))) + return +} diff --git a/sql/create-indexes.sql b/sql/create-indexes.sql new file mode 100644 index 000000000..f0d57dbe6 --- /dev/null +++ b/sql/create-indexes.sql @@ -0,0 +1,8 @@ +\connect blobber_meta; +BEGIN; +DROP INDEX IF EXISTS path_idx; +DROP INDEX IF EXISTS update_idx; +-- Create index on path column; It cannot be Unique index because of soft delete by gorm +CREATE INDEX path_idx ON reference_objects (path); +CREATE INDEX update_idx ON reference_objects (updated_at); +COMMIT; \ No newline at end of file