Skip to content

Commit 9eb5419

Browse files
Eric W. Biedermangitster
Eric W. Biederman
authored andcommitted
bulk-checkin: only support blobs in index_bulk_checkin
As the code is written today index_bulk_checkin only accepts blobs. Remove the enum object_type parameter and rename index_bulk_checkin to index_blob_bulk_checkin, index_stream to index_blob_stream, deflate_to_pack to deflate_blob_to_pack, stream_to_pack to stream_blob_to_pack, to make this explicit. Not supporting commits, tags, or trees has no downside as it is not currently supported now, and commits, tags, and trees being smaller by design do not have the problem that the problem that index_bulk_checkin was built to solve. Before we start adding code to support the hash function transition supporting additional objects types in index_bulk_checkin has no real additional cost, just an extra function parameter to know what the object type is. Once we begin the hash function transition this is not the case. The hash function transition document specifies that a repository with compatObjectFormat enabled will compute and store both the SHA-1 and SHA-256 hash of every object in the repository. What makes this a challenge is that it is not just an additional hash over the same object. Instead the hash function transition document specifies that the compatibility hash (specified with compatObjectFormat) be computed over the equivalent object that another git repository whose storage hash (specified with objectFormat) would store. When comparing equivalent repositories built with different storage hash functions, the oids embedded in objects used to refer to other objects differ and the location of signatures within objects differ. As blob objects have neither oids referring to other objects nor stored signatures their storage hash and their compatibility hash are computed over the same object. The other kinds of objects: trees, commits, and tags, all store oids referring to other objects. Signatures are stored in commit and tag objects. As oids and the tags to store signatures are not the same size in repositories built with different storage hashes the size of the equivalent objects are also different. A version of index_bulk_checkin that supports more than just blobs when computing both the SHA-1 and the SHA-256 of every object added would need a different, and more expensive structure. The structure is more expensive because it would be required to temporarily buffering the equivalent object the compatibility hash needs to be computed over. A temporary object is needed, because before a hash over an object can computed it's object header needs to be computed. One of the members of the object header is the entire size of the object. To know the size of an equivalent object an entire pass over the original object needs to be made, as trees, commits, and tags are composed of a variable number of variable sized pieces. Unfortunately there is no formula to compute the size of an equivalent object from just the size of the original object. Avoid all of those future complications by limiting index_bulk_checkin to only work on blobs. Inspired-by: brian m. carlson <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
1 parent 43c8a30 commit 9eb5419

File tree

3 files changed

+26
-27
lines changed

3 files changed

+26
-27
lines changed

Diff for: bulk-checkin.c

+17-18
Original file line numberDiff line numberDiff line change
@@ -155,10 +155,10 @@ static int already_written(struct bulk_checkin_packfile *state, struct object_id
155155
* status before calling us just in case we ask it to call us again
156156
* with a new pack.
157157
*/
158-
static int stream_to_pack(struct bulk_checkin_packfile *state,
159-
git_hash_ctx *ctx, off_t *already_hashed_to,
160-
int fd, size_t size, enum object_type type,
161-
const char *path, unsigned flags)
158+
static int stream_blob_to_pack(struct bulk_checkin_packfile *state,
159+
git_hash_ctx *ctx, off_t *already_hashed_to,
160+
int fd, size_t size, const char *path,
161+
unsigned flags)
162162
{
163163
git_zstream s;
164164
unsigned char ibuf[16384];
@@ -170,7 +170,7 @@ static int stream_to_pack(struct bulk_checkin_packfile *state,
170170

171171
git_deflate_init(&s, pack_compression_level);
172172

173-
hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
173+
hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
174174
s.next_out = obuf + hdrlen;
175175
s.avail_out = sizeof(obuf) - hdrlen;
176176

@@ -247,11 +247,10 @@ static void prepare_to_stream(struct bulk_checkin_packfile *state,
247247
die_errno("unable to write pack header");
248248
}
249249

250-
static int deflate_to_pack(struct bulk_checkin_packfile *state,
251-
struct object_id *result_oid,
252-
int fd, size_t size,
253-
enum object_type type, const char *path,
254-
unsigned flags)
250+
static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
251+
struct object_id *result_oid,
252+
int fd, size_t size,
253+
const char *path, unsigned flags)
255254
{
256255
off_t seekback, already_hashed_to;
257256
git_hash_ctx ctx;
@@ -265,7 +264,7 @@ static int deflate_to_pack(struct bulk_checkin_packfile *state,
265264
return error("cannot find the current offset");
266265

267266
header_len = format_object_header((char *)obuf, sizeof(obuf),
268-
type, size);
267+
OBJ_BLOB, size);
269268
the_hash_algo->init_fn(&ctx);
270269
the_hash_algo->update_fn(&ctx, obuf, header_len);
271270

@@ -282,8 +281,8 @@ static int deflate_to_pack(struct bulk_checkin_packfile *state,
282281
idx->offset = state->offset;
283282
crc32_begin(state->f);
284283
}
285-
if (!stream_to_pack(state, &ctx, &already_hashed_to,
286-
fd, size, type, path, flags))
284+
if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
285+
fd, size, path, flags))
287286
break;
288287
/*
289288
* Writing this object to the current pack will make
@@ -350,12 +349,12 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename)
350349
}
351350
}
352351

353-
int index_bulk_checkin(struct object_id *oid,
354-
int fd, size_t size, enum object_type type,
355-
const char *path, unsigned flags)
352+
int index_blob_bulk_checkin(struct object_id *oid,
353+
int fd, size_t size,
354+
const char *path, unsigned flags)
356355
{
357-
int status = deflate_to_pack(&bulk_checkin_packfile, oid, fd, size, type,
358-
path, flags);
356+
int status = deflate_blob_to_pack(&bulk_checkin_packfile, oid, fd, size,
357+
path, flags);
359358
if (!odb_transaction_nesting)
360359
flush_bulk_checkin_packfile(&bulk_checkin_packfile);
361360
return status;

Diff for: bulk-checkin.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
void prepare_loose_object_bulk_checkin(void);
1010
void fsync_loose_object_bulk_checkin(int fd, const char *filename);
1111

12-
int index_bulk_checkin(struct object_id *oid,
13-
int fd, size_t size, enum object_type type,
14-
const char *path, unsigned flags);
12+
int index_blob_bulk_checkin(struct object_id *oid,
13+
int fd, size_t size,
14+
const char *path, unsigned flags);
1515

1616
/*
1717
* Tell the object database to optimize for adding

Diff for: object-file.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -2446,11 +2446,11 @@ static int index_core(struct index_state *istate,
24462446
* binary blobs, they generally do not want to get any conversion, and
24472447
* callers should avoid this code path when filters are requested.
24482448
*/
2449-
static int index_stream(struct object_id *oid, int fd, size_t size,
2450-
enum object_type type, const char *path,
2451-
unsigned flags)
2449+
static int index_blob_stream(struct object_id *oid, int fd, size_t size,
2450+
const char *path,
2451+
unsigned flags)
24522452
{
2453-
return index_bulk_checkin(oid, fd, size, type, path, flags);
2453+
return index_blob_bulk_checkin(oid, fd, size, path, flags);
24542454
}
24552455

24562456
int index_fd(struct index_state *istate, struct object_id *oid,
@@ -2472,8 +2472,8 @@ int index_fd(struct index_state *istate, struct object_id *oid,
24722472
ret = index_core(istate, oid, fd, xsize_t(st->st_size),
24732473
type, path, flags);
24742474
else
2475-
ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
2476-
flags);
2475+
ret = index_blob_stream(oid, fd, xsize_t(st->st_size), path,
2476+
flags);
24772477
close(fd);
24782478
return ret;
24792479
}

0 commit comments

Comments
 (0)