xbps_file_hash: reduce memory footprint and process file in small chunks.

With xbps-rindex(8) -c/-r being multithreaded, the memory usage was
insanely high in the build server, with peaks of almost 4GB (this depends
on the file size being processed and number of threads).

Process the file in small chunks instead to compute the SHA256 hash.
This commit is contained in:
Juan RP 2014-09-17 06:50:46 +02:00
parent 94b9455175
commit 72c64c7626
2 changed files with 19 additions and 18 deletions

4
NEWS
View File

@ -1,5 +1,9 @@
xbps-0.40 (???):
* Reduced considerably memory requirement when checking SHA256 hashes on
files. Previous way of allocating heap memory as big as the file being
processed wasn't a great idea.
* xbps-{install,remove}: the post-install/pre-remove messages are now
printed with some guards, i.e:

View File

@ -61,17 +61,16 @@ char *
xbps_file_hash(const char *file)
{
struct stat st;
SHA256_CTX ctx;
char hash[SHA256_DIGEST_LENGTH * 2 + 1];
unsigned char *buf = NULL, digest[SHA256_DIGEST_LENGTH];
unsigned char digest[SHA256_DIGEST_LENGTH];
ssize_t ret;
unsigned char buf[256];
int fd;
assert(file != NULL);
if ((fd = open(file, O_RDONLY|O_CLOEXEC)) == -1) {
free(buf);
if ((fd = open(file, O_RDONLY)) == -1)
return NULL;
}
memset(&st, 0, sizeof(st));
if (fstat(fd, &st) == -1) {
(void)close(fd);
return NULL;
@ -81,20 +80,18 @@ xbps_file_hash(const char *file)
return NULL;
}
buf = malloc(st.st_size);
assert(buf);
SHA256_Init(&ctx);
while ((ret = read(fd, buf, sizeof(buf))) > 0)
SHA256_Update(&ctx, buf, ret);
if (read(fd, buf, st.st_size) != st.st_size) {
free(buf);
(void)close(fd);
return NULL;
}
(void)close(fd);
if (SHA256(buf, st.st_size, digest) == NULL) {
free(buf);
if (ret == -1) {
/* read error */
(void)close(fd);
return NULL;
}
free(buf);
SHA256_Final(digest, &ctx);
(void)close(fd);
digest2string(digest, hash, SHA256_DIGEST_LENGTH);
return strdup(hash);