Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add option to limit the numbers of upload files #621

Open
wants to merge 2 commits into
base: 10.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions common/rpc-service.c
Original file line number Diff line number Diff line change
Expand Up @@ -2400,6 +2400,16 @@ seafile_post_file (const char *repo_id, const char *temp_file_path,
goto out;
}

if (seaf->repo_file_number_limit >= 0) {
gint64 file_number = seaf_get_origin_repo_file_number(repo_id);
if (file_number >= seaf->repo_file_number_limit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_TOO_MANY_FILES,
"Too many files in library.");
ret = -1;
goto out;
}
}

rpath = format_dir_path (norm_parent_dir);

if (seaf_repo_manager_post_file (seaf->repo_mgr, repo_id,
Expand Down Expand Up @@ -2705,6 +2715,16 @@ seafile_post_empty_file (const char *repo_id, const char *parent_dir,
goto out;
}

if (seaf->repo_file_number_limit >= 0) {
gint64 file_number = seaf_get_origin_repo_file_number(repo_id);
if (file_number >= seaf->repo_file_number_limit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_TOO_MANY_FILES,
"Too many files in library.");
ret = -1;
goto out;
}
}

norm_file_name = normalize_utf8_path (new_file_name);
if (!norm_file_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
Expand Down
29 changes: 28 additions & 1 deletion fileserver/fileserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,32 @@ func loadSeafileDB() {
dbType = dbEngine
}

func loadRepoFileNumberLimit() int64 {
seafileConfPath := filepath.Join(centralDir, "seafile.conf")

opts := ini.LoadOptions{}
opts.SpaceBeforeInlineComment = true
config, err := ini.LoadSources(opts, seafileConfPath)
if err != nil {
return -1
}

section, err := config.GetSection("quota")
if err != nil {
return -1
}

key, err := section.GetKey("library_file_number")
if err != nil {
return -1
}
fileLimit, err := key.Int64()
if err != nil {
return -1
}
return fileLimit
}

func writePidFile(pid_file_path string) error {
file, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664)
if err != nil {
Expand Down Expand Up @@ -314,6 +340,7 @@ func main() {
}
loadSeafileDB()
option.LoadFileServerOptions(centralDir)
fileLimit := loadRepoFileNumberLimit()

if logFile == "" {
absLogFile = filepath.Join(absDataDir, "fileserver.log")
Expand Down Expand Up @@ -368,7 +395,7 @@ func main() {

fileopInit()

syncAPIInit()
syncAPIInit(fileLimit)

sizeSchedulerInit()

Expand Down
14 changes: 14 additions & 0 deletions fileserver/repomgr/repomgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -660,3 +660,17 @@ func UpdateRepoInfo(repoID, commitID string) error {

return nil
}

func GetFileNumber(repoID string) (int64, error) {
sqlStr := "SELECT file_count FROM RepoFileCount WHERE repo_id = ?"

row := seafileDB.QueryRow(sqlStr, repoID)
var fileNumber sql.NullInt64
if err := row.Scan(&fileNumber); err != nil {
if err != sql.ErrNoRows {
return 0, err
}
return 0, nil
}
return fileNumber.Int64, nil
}
50 changes: 49 additions & 1 deletion fileserver/sync_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ var (
permCache sync.Map
virtualRepoInfoCache sync.Map
calFsIdPool *workerpool.WorkPool
repoFileNumberLimit int64
)

type tokenInfo struct {
Expand Down Expand Up @@ -90,7 +91,8 @@ type statusEventData struct {
bytes uint64
}

func syncAPIInit() {
func syncAPIInit(fileLimit int64) {
repoFileNumberLimit = fileLimit
ticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec)
go RecoverWrapper(func() {
for range ticker.C {
Expand Down Expand Up @@ -1014,6 +1016,22 @@ func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError {

vars := mux.Vars(r)
repoID := vars["repoid"]

var fileNumber int64

if repoFileNumberLimit >= 0 {
vInfo, _ := repomgr.GetVirtualRepoInfo(repoID)
rRepoID := repoID
if vInfo != nil {
rRepoID = vInfo.OriginRepoID
}
fileNumber, _ = repomgr.GetFileNumber(rRepoID)
if fileNumber >= repoFileNumberLimit {
msg := "Too many files in library.\n"
return &appError{nil, msg, http.StatusForbidden}
}
}

user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
Expand Down Expand Up @@ -1042,6 +1060,14 @@ func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError {
return &appError{err, "", http.StatusInternalServerError}
}

if repoFileNumberLimit >= 0 {
addedNumber := calculateNumberOfAddedFile(base, newCommit)
if fileNumber+addedNumber >= repoFileNumberLimit {
msg := "Too many files in library.\n"
return &appError{nil, msg, http.StatusForbidden}
}
}

ret, err := checkQuota(repoID, 0)
if err != nil {
err := fmt.Errorf("Failed to check quota: %v", err)
Expand All @@ -1065,6 +1091,28 @@ func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError {
return nil
}

func calculateNumberOfAddedFile(baseCommit, newCommit *commitmgr.Commit) int64 {
if repoFileNumberLimit < 0 {
return 0
}
var results []*diff.DiffEntry
if err := diff.DiffCommits(baseCommit, newCommit, &results, true); err != nil {
log.Printf("Failed to diff commits: %v", err)
return 0
}

var fileNumber int64
for _, entry := range results {
if entry.Status == diff.DiffStatusAdded {
fileNumber++
} else if entry.Status == diff.DiffStatusDeleted {
fileNumber--
}
}

return fileNumber
}

func getHeadCommit(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
Expand Down
2 changes: 2 additions & 0 deletions include/seafile-error.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
#define POST_FILE_ERR_FILENAME 517
#define POST_FILE_ERR_BLOCK_MISSING 518
#define POST_FILE_ERR_QUOTA_FULL 519

#define SEAF_ERR_CONCURRENT_UPLOAD 520
#define SEAF_ERR_TOO_MANY_FILES 521

#endif
55 changes: 55 additions & 0 deletions server/http-server.c
Original file line number Diff line number Diff line change
Expand Up @@ -969,6 +969,40 @@ gen_merge_description (SeafRepo *repo,
return desc;
}

static gint64
calculate_number_of_added_file (SeafCommit *base_commit, SeafCommit *new_commit) {
gint64 file_number = 0;
GList *diff_entries = NULL;

if (seaf->repo_file_number_limit < 0) {
return 0;
}

int rc = diff_commits (base_commit, new_commit, &diff_entries, TRUE);
if (rc < 0) {
seaf_warning ("Failed to calculate number of new file.\n");
return 0;
}

GList *ptr;
DiffEntry *diff_entry;
for (ptr = diff_entries; ptr; ptr = ptr->next) {
diff_entry = ptr->data;
if (diff_entry->status == DIFF_STATUS_ADDED) {
file_number++;
} else if (diff_entry->status == DIFF_STATUS_DELETED) {
file_number--;
}
diff_entry_free ((DiffEntry *)ptr->data);
}

if (diff_entries) {
g_list_free (diff_entries);
}

return file_number;
}

static int
fast_forward_or_merge (const char *repo_id,
SeafCommit *base,
Expand Down Expand Up @@ -1101,6 +1135,7 @@ put_update_branch_cb (evhtp_request_t *req, void *arg)
char *username = NULL;
SeafRepo *repo = NULL;
SeafCommit *new_commit = NULL, *base = NULL;
gint64 file_number = 0;

const char *new_commit_id = evhtp_kv_find (req->uri->query, "head");
if (new_commit_id == NULL || !is_object_id_valid (new_commit_id)) {
Expand All @@ -1111,6 +1146,16 @@ put_update_branch_cb (evhtp_request_t *req, void *arg)
parts = g_strsplit (req->uri->path->full + 1, "/", 0);
repo_id = parts[1];

if (seaf->repo_file_number_limit >= 0) {
file_number = seaf_get_origin_repo_file_number (repo_id);
if (file_number >= seaf->repo_file_number_limit) {
char *error = "Too many files in library.\n";
evbuffer_add (req->buffer_out, error, strlen (error));
evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);
goto out;
}
}

int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);
if (token_status != EVHTP_RES_OK) {
evhtp_send_reply (req, token_status);
Expand Down Expand Up @@ -1148,6 +1193,16 @@ put_update_branch_cb (evhtp_request_t *req, void *arg)
goto out;
}

if (seaf->repo_file_number_limit >= 0) {
gint64 new_file_number = calculate_number_of_added_file (base, new_commit);
if (file_number + new_file_number >= seaf->repo_file_number_limit) {
char *error = "Too many files in library.\n";
evbuffer_add (req->buffer_out, error, strlen (error));
evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);
goto out;
}
}

if (seaf_quota_manager_check_quota (seaf->quota_mgr, repo_id) < 0) {
evhtp_send_reply (req, SEAF_HTTP_RES_NOQUOTA);
goto out;
Expand Down
22 changes: 22 additions & 0 deletions server/repo-mgr.c
Original file line number Diff line number Diff line change
Expand Up @@ -4189,6 +4189,28 @@ seaf_repo_manager_edit_repo (const char *repo_id,
return ret;
}

gint64
seaf_get_repo_file_number(const char *repo_id)
{
char *sql = "SELECT file_count FROM RepoFileCount WHERE repo_id = ?";

return seaf_db_statement_get_int64 (seaf->db, sql, 1, "string", repo_id);;
}

gint64
seaf_get_origin_repo_file_number(const char *repo_id)
{
const char *r_repo_id = repo_id;
SeafVirtRepo *vinfo = seaf_repo_manager_get_virtual_repo_info(seaf->repo_mgr, repo_id);
if (vinfo) {
r_repo_id = vinfo->origin_repo_id;
seaf_virtual_repo_info_free(vinfo);
}
gint64 file_number = seaf_get_repo_file_number (r_repo_id);

return file_number;
}

gboolean
get_total_file_number_cb (SeafDBRow *row, void *vdata)
{
Expand Down
6 changes: 6 additions & 0 deletions server/repo-mgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -834,6 +834,12 @@ seaf_repo_manager_edit_repo (const char *repo_id,
const char *user,
GError **error);

gint64
seaf_get_repo_file_number(const char *repo_id);

gint64
seaf_get_origin_repo_file_number(const char *repo_id);

gint64
seaf_get_total_file_number (GError **error);

Expand Down
10 changes: 10 additions & 0 deletions server/seafile-session.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ seafile_session_new(const char *central_config_dir,
char *notif_server = NULL;
int notif_port = 8083;
char *private_key = NULL;
gint64 repo_file_number_limit = -1;

abs_ccnet_dir = ccnet_expand_path (ccnet_dir);
abs_seafile_dir = ccnet_expand_path (seafile_dir);
Expand Down Expand Up @@ -127,6 +128,15 @@ seafile_session_new(const char *central_config_dir,
g_free (type);
}

repo_file_number_limit = g_key_file_get_int64 (config,
"quota", "library_file_number",
&error);
if (error) {
g_clear_error(&error);
repo_file_number_limit = -1;
}
session->repo_file_number_limit = repo_file_number_limit;

notif_enabled = g_key_file_get_boolean (config,
"notification", "enabled",
NULL);
Expand Down
2 changes: 2 additions & 0 deletions server/seafile-session.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ struct _SeafileSession {
gboolean ccnet_create_tables;

gboolean go_fileserver;
// Used to limit the number of files in the repo.
gint64 repo_file_number_limit;

// For notification server
NotifManager *notif_mgr;
Expand Down
10 changes: 10 additions & 0 deletions server/web-accesstoken-mgr.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,16 @@ seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr,
return NULL;
}

if ((strcmp(op, "upload") ==0 || strcmp(op, "upload-link") == 0) &&
seaf->repo_file_number_limit >= 0) {
gint64 file_number = seaf_get_origin_repo_file_number (repo_id);
if (file_number >= seaf->repo_file_number_limit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_TOO_MANY_FILES,
"Too many files in library.");
return NULL;
}
}

pthread_mutex_lock (&mgr->priv->lock);

t = gen_new_token (mgr->priv->access_token_hash);
Expand Down