library: improve and/or standardize 'errno' management
With older library logic having been modified to avoid using those potentially deadly alloc.h routines, while improving 'errno' handling, we're ready to standardize and enhance newlib's approach to any potential errors. In so doing, we'll establish the following objectives: . . . . . . . . . . . . . functions returning an 'int' . an error will be indicated by a negative number that is always the inverse of some well known errno.h value . . . . . . . . . . . functions returning an 'address' . any error will be indicated by a NULL return pointer with the actual reason found in the formal errno value And, when errno is manipulated directly we will strive to do so whenever possible within those routines which have been declared with PROCPS_EXPORT. In other words, in the user callable functions defined in source last. [ But, that won't always be possible. In particular, ] [ all the 'read_failed' functions will sometimes set ] [ 'errno' so that they can serve callers returning a ] [ NULL or an int without duplicating a lot of logic. ] [ Also, that includes one subordinate function which ] [ was called by 'read_failed' in the <slabinfo> API. ] ------------------------------------------------------ Along the way, several additional miscellaneous issues were addressed. They're listed here now for posterity. . the '-1' return value passed outside the library was eliminated since it would erroneously equate to -EPERM . the stacks_fetch functions in <diskstats> and <stat> weren't checked for their possible minus return values . hash create was not checked in <meminfo> or <vmstat> . fixed 'new' function faulty parm check in <slabinfo> Signed-off-by: Jim Warner <james.warner@comcast.net>
This commit is contained in:
parent
7453f8719b
commit
06be33b43e
@ -390,13 +390,13 @@ static int node_update (
|
||||
|
||||
if (!target) {
|
||||
if (!(target = malloc(sizeof(struct dev_node))))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
memcpy(target, source, sizeof(struct dev_node));
|
||||
// let's not distort the deltas when a new node is created ...
|
||||
memcpy(&target->old, &target->new, sizeof(struct dev_data));
|
||||
node_classify(target);
|
||||
node_add(info, target);
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
// remember history from last time around ...
|
||||
memcpy(&source->old, &target->new, sizeof(struct dev_data));
|
||||
@ -405,7 +405,7 @@ static int node_update (
|
||||
source->next = target->next;
|
||||
// finally 'update' the existing node struct ...
|
||||
memcpy(target, source, sizeof(struct dev_node));
|
||||
return 0;
|
||||
return 1;
|
||||
} // end: node_update
|
||||
|
||||
|
||||
@ -517,14 +517,14 @@ static inline int diskstats_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)(unsigned long)(2 * DISKSTATS_logical_end))
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
// a diskstats_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= DISKSTATS_logical_end)
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -539,7 +539,7 @@ static inline int diskstats_items_check_failed (
|
||||
* Read the data out of /proc/diskstats putting the information
|
||||
* into the supplied info structure
|
||||
*
|
||||
* Returns: 0 on success, negative on error
|
||||
* Returns: 0 on success, 1 on error
|
||||
*/
|
||||
static int diskstats_read_failed (
|
||||
struct diskstats_info *info)
|
||||
@ -552,10 +552,10 @@ static int diskstats_read_failed (
|
||||
|
||||
if (!info->diskstats_fp
|
||||
&& (!(info->diskstats_fp = fopen(DISKSTATS_FILE, "r"))))
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
if (fseek(info->diskstats_fp, 0L, SEEK_SET) == -1)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
info->old_stamp = info->new_stamp;
|
||||
info->new_stamp = time(NULL);
|
||||
@ -581,13 +581,12 @@ static int diskstats_read_failed (
|
||||
, &node.new.io_wtime);
|
||||
|
||||
if (rc != 14) {
|
||||
if (errno != 0)
|
||||
return -errno;
|
||||
return -EIO;
|
||||
errno = ERANGE;
|
||||
return 1;
|
||||
}
|
||||
node.stamped = info->new_stamp;
|
||||
if ((rc = node_update(info, &node)))
|
||||
return rc;
|
||||
if (!node_update(info, &node))
|
||||
return 1; // here, errno was set to ENOMEM
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -672,7 +671,7 @@ static int diskstats_stacks_fetch (
|
||||
}
|
||||
if (!info->fetch_ext.extents) {
|
||||
if (!(ext = diskstats_stacks_alloc(&info->fetch_ext, n_alloc)))
|
||||
return -ENOMEM;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
|
||||
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
|
||||
diskstats_itemize_stacks_all(&info->fetch_ext);
|
||||
@ -687,7 +686,7 @@ static int diskstats_stacks_fetch (
|
||||
n_alloc += STACKS_INCR;
|
||||
if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
|
||||
|| (!(ext = diskstats_stacks_alloc(&info->fetch_ext, STACKS_INCR))))
|
||||
return -1;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
|
||||
}
|
||||
diskstats_assign_results(info->fetch.anchor[n_inuse], node);
|
||||
@ -703,7 +702,7 @@ static int diskstats_stacks_fetch (
|
||||
if (n_saved < n_inuse + 1) {
|
||||
n_saved = n_inuse + 1;
|
||||
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
|
||||
info->fetch.results.stacks[n_inuse] = NULL;
|
||||
@ -722,14 +721,14 @@ static int diskstats_stacks_reconfig_maybe (
|
||||
int numitems)
|
||||
{
|
||||
if (diskstats_items_check_failed(this, items, numitems))
|
||||
return -EINVAL;
|
||||
return -1;
|
||||
/* is this the first time or have things changed since we were last called?
|
||||
if so, gotta' redo all of our stacks stuff ... */
|
||||
if (this->numitems != numitems + 1
|
||||
|| memcmp(this->items, items, sizeof(enum diskstats_item) * numitems)) {
|
||||
// allow for our DISKSTATS_logical_end
|
||||
if (!(this->items = realloc(this->items, sizeof(enum diskstats_item) * (numitems + 1))))
|
||||
return -ENOMEM;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(this->items, items, sizeof(enum diskstats_item) * numitems);
|
||||
this->items[numitems] = DISKSTATS_logical_end;
|
||||
this->numitems = numitems + 1;
|
||||
@ -757,11 +756,9 @@ PROCPS_EXPORT int procps_diskstats_new (
|
||||
struct diskstats_info **info)
|
||||
{
|
||||
struct diskstats_info *p;
|
||||
int rc;
|
||||
|
||||
if (info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(p = calloc(1, sizeof(struct diskstats_info))))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -771,9 +768,9 @@ PROCPS_EXPORT int procps_diskstats_new (
|
||||
1) ensure there will be no problems with subsequent access |
|
||||
2) make delta results potentially useful, even if 1st time |
|
||||
3) elimnate need for history distortions 1st time 'switch' | */
|
||||
if ((rc = diskstats_read_failed(p))) {
|
||||
if (diskstats_read_failed(p)) {
|
||||
procps_diskstats_unref(&p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*info = p;
|
||||
@ -803,6 +800,8 @@ PROCPS_EXPORT int procps_diskstats_unref (
|
||||
(*info)->refcount--;
|
||||
|
||||
if ((*info)->refcount < 1) {
|
||||
int errno_sav = errno;
|
||||
|
||||
if ((*info)->diskstats_fp) {
|
||||
fclose((*info)->diskstats_fp);
|
||||
(*info)->diskstats_fp = NULL;
|
||||
@ -830,6 +829,8 @@ PROCPS_EXPORT int procps_diskstats_unref (
|
||||
|
||||
free(*info);
|
||||
*info = NULL;
|
||||
|
||||
errno = errno_sav;
|
||||
return 0;
|
||||
}
|
||||
return (*info)->refcount;
|
||||
@ -846,10 +847,12 @@ PROCPS_EXPORT struct diskstats_result *procps_diskstats_get (
|
||||
struct dev_node *node;
|
||||
time_t cur_secs;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (item < 0 || item >= DISKSTATS_logical_end)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* we will NOT read the diskstat file with every call - rather, we'll offer
|
||||
a granularity of 1 second between reads ... */
|
||||
@ -860,12 +863,13 @@ PROCPS_EXPORT struct diskstats_result *procps_diskstats_get (
|
||||
}
|
||||
|
||||
info->get_this.item = item;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
// if (item > DISKSTATS_noop)
|
||||
info->get_this.result.ul_int = 0;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
info->get_this.result.ul_int = 0;
|
||||
|
||||
if (!(node = node_get(info, name)))
|
||||
if (!(node = node_get(info, name))) {
|
||||
errno = ENXIO;
|
||||
return NULL;
|
||||
}
|
||||
Item_table[item].setsfunc(&info->get_this, node);
|
||||
|
||||
return &info->get_this;
|
||||
@ -884,18 +888,20 @@ PROCPS_EXPORT struct diskstats_reap *procps_diskstats_reap (
|
||||
enum diskstats_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
|
||||
if (0 > diskstats_stacks_reconfig_maybe(&info->fetch_ext, items, numitems))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
errno = 0;
|
||||
|
||||
if (info->fetch_ext.dirty_stacks)
|
||||
diskstats_cleanup_stacks_all(&info->fetch_ext);
|
||||
|
||||
if (diskstats_read_failed(info))
|
||||
return NULL;
|
||||
diskstats_stacks_fetch(info);
|
||||
if (0 > diskstats_stacks_fetch(info))
|
||||
return NULL;
|
||||
info->fetch_ext.dirty_stacks = 1;
|
||||
|
||||
return &info->fetch.results;
|
||||
@ -917,11 +923,12 @@ PROCPS_EXPORT struct diskstats_stack *procps_diskstats_select (
|
||||
{
|
||||
struct dev_node *node;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
|
||||
if (0 > diskstats_stacks_reconfig_maybe(&info->select_ext, items, numitems))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
errno = 0;
|
||||
|
||||
if (!info->select_ext.extents
|
||||
&& (!diskstats_stacks_alloc(&info->select_ext, 1)))
|
||||
@ -932,8 +939,10 @@ PROCPS_EXPORT struct diskstats_stack *procps_diskstats_select (
|
||||
|
||||
if (diskstats_read_failed(info))
|
||||
return NULL;
|
||||
if (!(node = node_get(info, name)))
|
||||
if (!(node = node_get(info, name))) {
|
||||
errno = ENXIO;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
diskstats_assign_results(info->select_ext.extents->stacks[0], node);
|
||||
info->select_ext.dirty_stacks = 1;
|
||||
@ -963,9 +972,9 @@ PROCPS_EXPORT struct diskstats_stack **procps_diskstats_sort (
|
||||
struct sort_parms parms;
|
||||
int offset;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || stacks == NULL)
|
||||
return NULL;
|
||||
|
||||
// a diskstats_item is currently unsigned, but we'll protect our future
|
||||
if (sortitem < 0 || sortitem >= DISKSTATS_logical_end)
|
||||
return NULL;
|
||||
@ -984,9 +993,9 @@ PROCPS_EXPORT struct diskstats_stack **procps_diskstats_sort (
|
||||
return NULL;
|
||||
++p;
|
||||
}
|
||||
parms.offset = offset;
|
||||
parms.order = order;
|
||||
errno = 0;
|
||||
|
||||
parms.order = order;
|
||||
qsort_r(stacks, numstacked, sizeof(void *), (QSR_t)Item_table[p->item].sortfunc, &parms);
|
||||
return stacks;
|
||||
} // end: procps_diskstats_sort
|
||||
|
@ -471,14 +471,14 @@ static inline int meminfo_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)(unsigned long)(2 * MEMINFO_logical_end))
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
// a meminfo_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= MEMINFO_logical_end)
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -489,16 +489,17 @@ static int meminfo_make_hash_failed (
|
||||
struct meminfo_info *info)
|
||||
{
|
||||
#define htVAL(f) e.key = STRINGIFY(f) ":"; e.data = &info->hist.new. f; \
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
|
||||
#define htXTRA(k,f) e.key = STRINGIFY(k) ":"; e.data = &info->hist.new. f; \
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
|
||||
ENTRY e, *ep;
|
||||
size_t n;
|
||||
|
||||
// will also include those derived fields (more is better)
|
||||
n = sizeof(struct meminfo_data) / sizeof(unsigned long);
|
||||
// we'll follow the hsearch recommendation of an extra 25%
|
||||
hcreate_r(n + (n / 4), &info->hashtab);
|
||||
if (!hcreate_r(n + (n / 4), &info->hashtab))
|
||||
return 1;
|
||||
|
||||
htVAL(Active)
|
||||
htXTRA(Active(anon), Active_anon)
|
||||
@ -587,21 +588,23 @@ static int meminfo_read_failed (
|
||||
|
||||
if (-1 == info->meminfo_fd
|
||||
&& (info->meminfo_fd = open(MEMINFO_FILE, O_RDONLY)) == -1)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
if (lseek(info->meminfo_fd, 0L, SEEK_SET) == -1)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
for (;;) {
|
||||
if ((size = read(info->meminfo_fd, buf, sizeof(buf)-1)) < 0) {
|
||||
if (errno == EINTR || errno == EAGAIN)
|
||||
continue;
|
||||
return -errno;
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (size == 0)
|
||||
return -1;
|
||||
if (size == 0) {
|
||||
errno = EIO;
|
||||
return 1;
|
||||
}
|
||||
buf[size] = '\0';
|
||||
|
||||
head = buf;
|
||||
@ -744,7 +747,6 @@ PROCPS_EXPORT int procps_meminfo_new (
|
||||
struct meminfo_info **info)
|
||||
{
|
||||
struct meminfo_info *p;
|
||||
int rc;
|
||||
|
||||
if (info == NULL || *info != NULL)
|
||||
return -EINVAL;
|
||||
@ -754,18 +756,18 @@ PROCPS_EXPORT int procps_meminfo_new (
|
||||
p->refcount = 1;
|
||||
p->meminfo_fd = -1;
|
||||
|
||||
if ((rc = meminfo_make_hash_failed(p))) {
|
||||
if (meminfo_make_hash_failed(p)) {
|
||||
free(p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
/* do a priming read here for the following potential benefits: |
|
||||
1) ensure there will be no problems with subsequent access |
|
||||
2) make delta results potentially useful, even if 1st time |
|
||||
3) elimnate need for history distortions 1st time 'switch' | */
|
||||
if ((rc = meminfo_read_failed(p))) {
|
||||
if (meminfo_read_failed(p)) {
|
||||
procps_meminfo_unref(&p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*info = p;
|
||||
@ -793,6 +795,8 @@ PROCPS_EXPORT int procps_meminfo_unref (
|
||||
(*info)->refcount--;
|
||||
|
||||
if ((*info)->refcount < 1) {
|
||||
int errno_sav = errno;
|
||||
|
||||
if ((*info)->extents)
|
||||
meminfo_extents_free_all((*info));
|
||||
if ((*info)->items)
|
||||
@ -801,6 +805,8 @@ PROCPS_EXPORT int procps_meminfo_unref (
|
||||
|
||||
free(*info);
|
||||
*info = NULL;
|
||||
|
||||
errno = errno_sav;
|
||||
return 0;
|
||||
}
|
||||
return (*info)->refcount;
|
||||
@ -816,10 +822,12 @@ PROCPS_EXPORT struct meminfo_result *procps_meminfo_get (
|
||||
static time_t sav_secs;
|
||||
time_t cur_secs;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (item < 0 || item >= MEMINFO_logical_end)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* we will NOT read the meminfo file with every call - rather, we'll offer
|
||||
a granularity of 1 second between reads ... */
|
||||
@ -831,9 +839,8 @@ PROCPS_EXPORT struct meminfo_result *procps_meminfo_get (
|
||||
}
|
||||
|
||||
info->get_this.item = item;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
// if (item > MEMINFO_noop)
|
||||
info->get_this.result.ul_int = 0;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
info->get_this.result.ul_int = 0;
|
||||
Item_table[item].setsfunc(&info->get_this, &info->hist);
|
||||
|
||||
return &info->get_this;
|
||||
@ -852,10 +859,12 @@ PROCPS_EXPORT struct meminfo_stack *procps_meminfo_select (
|
||||
enum meminfo_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
if (meminfo_items_check_failed(numitems, items))
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* is this the first time or have things changed since we were last called?
|
||||
if so, gotta' redo all of our stacks stuff ... */
|
||||
|
96
proc/pids.c
96
proc/pids.c
@ -88,6 +88,7 @@ struct pids_info {
|
||||
PROCTAB *get_PT; // oldlib interface for active 'get'
|
||||
struct stacks_extent *get_ext; // an extent used for active 'get'
|
||||
enum pids_fetch_type get_type; // last known type of 'get' request
|
||||
int seterr; // an ENOMEM encountered during assign
|
||||
};
|
||||
|
||||
|
||||
@ -101,7 +102,7 @@ static char** pids_vectorize_this (const char* src) {
|
||||
tot = strlen(src) + 1; // prep for our vectors
|
||||
adj = (pSZ-1) - ((tot + pSZ-1) & (pSZ-1)); // calc alignment bytes
|
||||
cpy = calloc(1, tot + adj + (2 * pSZ)); // get new larger buffer
|
||||
if (!cpy) return NULL; // we no longer use xcalloc
|
||||
if (!cpy) return NULL; // oops, looks like ENOMEM
|
||||
snprintf(cpy, tot, "%s", src); // duplicate their string
|
||||
vec = (char**)(cpy + tot + adj); // prep pointer to pointers
|
||||
*vec = cpy; // point 1st vector to string
|
||||
@ -120,20 +121,22 @@ static char** pids_vectorize_this (const char* src) {
|
||||
R->result. t = (long)(P-> x) << I -> pgs2k_shift; }
|
||||
/* strdup of a static char array */
|
||||
#define DUP_set(e,x) setDECL(e) { \
|
||||
(void)I; R->result.str = strdup(P-> x); }
|
||||
if (!(R->result.str = strdup(P-> x))) I->seterr = 1; }
|
||||
/* regular assignment copy */
|
||||
#define REG_set(e,t,x) setDECL(e) { \
|
||||
(void)I; R->result. t = P-> x; }
|
||||
/* take ownership of a normal single string if possible, else return
|
||||
some sort of hint that they duplicated this char * item ... */
|
||||
#define STR_set(e,x) setDECL(e) { \
|
||||
(void)I; if (NULL != P-> x) { R->result.str = P-> x; P-> x = NULL; } \
|
||||
else R->result.str = strdup("[ duplicate " STRINGIFY(e) " ]"); }
|
||||
if (NULL != P-> x) { R->result.str = P-> x; P-> x = NULL; } \
|
||||
else { R->result.str = strdup("[ duplicate " STRINGIFY(e) " ]"); \
|
||||
if (!R->result.str) I->seterr = 1; } }
|
||||
/* take ownership of true vectorized strings if possible, else return
|
||||
some sort of hint that they duplicated this char ** item ... */
|
||||
#define VEC_set(e,x) setDECL(e) { \
|
||||
(void)I; if (NULL != P-> x) { R->result.strv = P-> x; P-> x = NULL; } \
|
||||
else R->result.strv = pids_vectorize_this("[ duplicate " STRINGIFY(e) " ]"); }
|
||||
if (NULL != P-> x) { R->result.strv = P-> x; P-> x = NULL; } \
|
||||
else { R->result.strv = pids_vectorize_this("[ duplicate " STRINGIFY(e) " ]"); \
|
||||
if (!R->result.str) I->seterr = 1; } }
|
||||
|
||||
|
||||
setDECL(noop) { (void)I; (void)R; (void)P; return; }
|
||||
@ -238,8 +241,8 @@ setDECL(TIME_ALL) { R->result.ull_int = (P->utime + P->stime) / I->hertz;
|
||||
setDECL(TIME_ELAPSED) { unsigned long long t = P->start_time / I->hertz; R->result.ull_int = I->boot_seconds >= t ? (I->boot_seconds - t) : 0; }
|
||||
REG_set(TIME_START, ull_int, start_time)
|
||||
REG_set(TTY, s_int, tty)
|
||||
setDECL(TTY_NAME) { char buf[64]; (void)I; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV); R->result.str = strdup(buf); }
|
||||
setDECL(TTY_NUMBER) { char buf[64]; (void)I; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV|ABBREV_TTY|ABBREV_PTS); R->result.str = strdup(buf); }
|
||||
setDECL(TTY_NAME) { char buf[64]; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV); if (!(R->result.str = strdup(buf))) I->seterr = 1; }
|
||||
setDECL(TTY_NUMBER) { char buf[64]; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV|ABBREV_TTY|ABBREV_PTS); if (!(R->result.str = strdup(buf))) I->seterr = 1; }
|
||||
REG_set(VM_DATA, ul_int, vm_data)
|
||||
REG_set(VM_EXE, ul_int, vm_exe)
|
||||
REG_set(VM_LIB, ul_int, vm_lib)
|
||||
@ -253,7 +256,7 @@ REG_set(VM_STACK, ul_int, vm_stack)
|
||||
REG_set(VM_SWAP, ul_int, vm_swap)
|
||||
setDECL(VM_USED) { (void)I; R->result.ul_int = P->vm_swap + P->vm_rss; }
|
||||
REG_set(VSIZE_PGS, ul_int, vsize)
|
||||
setDECL(WCHAN_NAME) { (void)I; R->result.str = strdup(lookup_wchan(P->tid)); }
|
||||
setDECL(WCHAN_NAME) { if (!(R->result.str = strdup(lookup_wchan(P->tid)))) I->seterr = 1;; }
|
||||
|
||||
#undef setDECL
|
||||
#undef CVT_set
|
||||
@ -626,7 +629,7 @@ static inline int pids_make_hist (
|
||||
Hr(PHist_sav) = realloc(Hr(PHist_sav), sizeof(HST_t) * Hr(HHist_siz));
|
||||
Hr(PHist_new) = realloc(Hr(PHist_new), sizeof(HST_t) * Hr(HHist_siz));
|
||||
if (!Hr(PHist_sav) || !Hr(PHist_new))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
Hr(PHist_new[nSLOT].pid) = p->tid;
|
||||
Hr(PHist_new[nSLOT].maj) = p->maj_flt;
|
||||
@ -642,7 +645,7 @@ static inline int pids_make_hist (
|
||||
}
|
||||
|
||||
nSLOT++;
|
||||
return 0;
|
||||
return 1;
|
||||
#undef nSLOT
|
||||
} // end: pids_make_hist
|
||||
|
||||
@ -756,13 +759,14 @@ static void pids_unref_rpthash (
|
||||
|
||||
// ___ Standard Private Functions |||||||||||||||||||||||||||||||||||||||||||||
|
||||
|
||||
static inline void pids_assign_results (
|
||||
static inline int pids_assign_results (
|
||||
struct pids_info *info,
|
||||
struct pids_stack *stack,
|
||||
proc_t *p)
|
||||
{
|
||||
struct pids_result *this = stack->head;
|
||||
|
||||
info->seterr = 0;
|
||||
for (;;) {
|
||||
enum pids_item item = this->item;
|
||||
if (item >= PIDS_logical_end)
|
||||
@ -770,7 +774,7 @@ static inline void pids_assign_results (
|
||||
Item_table[item].setsfunc(info, this, p);
|
||||
++this;
|
||||
}
|
||||
return;
|
||||
return !info->seterr;
|
||||
} // end: pids_assign_results
|
||||
|
||||
|
||||
@ -880,13 +884,14 @@ static inline int pids_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)0x8000) // twice as big as our largest enum
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
// a pids_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= PIDS_logical_end) {
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -918,8 +923,10 @@ static inline void pids_oldproc_close (
|
||||
PROCTAB **this)
|
||||
{
|
||||
if (*this != NULL) {
|
||||
int errsav = errno;
|
||||
closeproc(*this);
|
||||
*this = NULL;
|
||||
errno = errsav;
|
||||
}
|
||||
} // end: pids_oldproc_close
|
||||
|
||||
@ -970,7 +977,7 @@ static inline int pids_proc_tally (
|
||||
++counts->total;
|
||||
|
||||
if (info->history_yes)
|
||||
return !pids_make_hist(info, p);
|
||||
return pids_make_hist(info, p);
|
||||
return 1;
|
||||
} // end: pids_proc_tally
|
||||
|
||||
@ -1048,12 +1055,12 @@ static int pids_stacks_fetch (
|
||||
// initialize stuff -----------------------------------
|
||||
if (!info->fetch.anchor) {
|
||||
if (!(info->fetch.anchor = calloc(sizeof(void *), STACKS_INCR)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
n_alloc = STACKS_INCR;
|
||||
}
|
||||
if (!info->extents) {
|
||||
if (!(ext = pids_stacks_alloc(info, n_alloc)))
|
||||
return -ENOMEM;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
|
||||
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
|
||||
}
|
||||
@ -1066,15 +1073,21 @@ static int pids_stacks_fetch (
|
||||
while (info->read_something(info->fetch_PT, &task)) {
|
||||
if (!(n_inuse < n_alloc)) {
|
||||
n_alloc += STACKS_INCR;
|
||||
if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
|
||||
if (!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc))
|
||||
|| (!(ext = pids_stacks_alloc(info, STACKS_INCR))))
|
||||
return -1;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
|
||||
}
|
||||
if (!pids_proc_tally(info, &info->fetch.counts, &task))
|
||||
return -1;
|
||||
pids_assign_results(info, info->fetch.anchor[n_inuse++], &task);
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
if (!pids_assign_results(info, info->fetch.anchor[n_inuse++], &task))
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
}
|
||||
/* while the possibility is extremely remote, the readproc.c (read_something)
|
||||
simple_readproc and simple_readtask guys could have encountered this error
|
||||
in which case they would have returned a NULL, thus ending our while loop. */
|
||||
if (errno == ENOMEM)
|
||||
return -1;
|
||||
|
||||
// finalize stuff -------------------------------------
|
||||
/* note: we go to this trouble of maintaining a duplicate of the consolidated |
|
||||
@ -1084,7 +1097,7 @@ static int pids_stacks_fetch (
|
||||
if (n_saved < n_inuse + 1) {
|
||||
n_saved = n_inuse + 1;
|
||||
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
|
||||
info->fetch.results.stacks[n_inuse] = NULL;
|
||||
@ -1120,7 +1133,6 @@ PROCPS_EXPORT int procps_pids_new (
|
||||
|
||||
if (info == NULL || *info != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(p = calloc(1, sizeof(struct pids_info))))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1241,26 +1253,29 @@ PROCPS_EXPORT struct pids_stack *fatal_proc_unmounted (
|
||||
|
||||
/* this is very likely the *only* newlib function where the
|
||||
context (pids_info) of NULL will ever be permitted */
|
||||
look_up_our_self(&self);
|
||||
if (!return_self)
|
||||
if (!look_up_our_self(&self)
|
||||
|| (!return_self))
|
||||
return NULL;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
|
||||
/* with items & numitems technically optional at 'new' time, it's
|
||||
expected 'reset' will have been called -- but just in case ... */
|
||||
if (!info->curitems)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
if (!(ext = pids_stacks_alloc(info, 1)))
|
||||
return NULL;
|
||||
if (!pids_extent_cut(info, ext))
|
||||
if (!pids_extent_cut(info, ext)) {
|
||||
errno = EADDRNOTAVAIL;
|
||||
return NULL;
|
||||
|
||||
}
|
||||
ext->next = info->otherexts;
|
||||
info->otherexts = ext;
|
||||
pids_assign_results(info, ext->stacks[0], &self);
|
||||
if (!pids_assign_results(info, ext->stacks[0], &self))
|
||||
return NULL;
|
||||
|
||||
return ext->stacks[0];
|
||||
} // end: fatal_proc_unmounted
|
||||
@ -1272,6 +1287,7 @@ PROCPS_EXPORT struct pids_stack *procps_pids_get (
|
||||
{
|
||||
static proc_t task; // static for initial zeroes + later dynamic free(s)
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (which != PIDS_FETCH_TASKS_ONLY && which != PIDS_FETCH_THREADS_TOO)
|
||||
@ -1284,9 +1300,9 @@ PROCPS_EXPORT struct pids_stack *procps_pids_get (
|
||||
fresh_start:
|
||||
if (!info->get_ext) {
|
||||
if (!(info->get_ext = pids_stacks_alloc(info, 1)))
|
||||
return NULL;
|
||||
return NULL; // here, errno was overridden with ENOMEM
|
||||
if (!pids_oldproc_open(&info->get_PT, info->oldflags))
|
||||
return NULL;
|
||||
return NULL; // here, errno was overridden with ENOMEM/others
|
||||
info->get_type = which;
|
||||
info->read_something = which ? readeither : readproc;
|
||||
}
|
||||
@ -1299,13 +1315,14 @@ fresh_start:
|
||||
info->get_ext = NULL;
|
||||
goto fresh_start;
|
||||
}
|
||||
errno = 0;
|
||||
|
||||
pids_cleanup_stack(info->get_ext->stacks[0]->head);
|
||||
|
||||
if (NULL == info->read_something(info->get_PT, &task))
|
||||
return NULL;
|
||||
pids_assign_results(info, info->get_ext->stacks[0], &task);
|
||||
|
||||
if (!pids_assign_results(info, info->get_ext->stacks[0], &task))
|
||||
return NULL;
|
||||
return info->get_ext->stacks[0];
|
||||
} // end: procps_pids_get
|
||||
|
||||
@ -1323,6 +1340,7 @@ PROCPS_EXPORT struct pids_fetch *procps_pids_reap (
|
||||
{
|
||||
int rc;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (which != PIDS_FETCH_TASKS_ONLY && which != PIDS_FETCH_THREADS_TOO)
|
||||
@ -1331,6 +1349,7 @@ PROCPS_EXPORT struct pids_fetch *procps_pids_reap (
|
||||
expected 'reset' will have been called -- but just in case ... */
|
||||
if (!info->curitems)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
if (!pids_oldproc_open(&info->fetch_PT, info->oldflags))
|
||||
return NULL;
|
||||
@ -1405,6 +1424,7 @@ PROCPS_EXPORT struct pids_fetch *procps_pids_select (
|
||||
unsigned ids[FILL_ID_MAX + 1];
|
||||
int rc;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || these == NULL)
|
||||
return NULL;
|
||||
if (numthese < 1 || numthese > FILL_ID_MAX)
|
||||
@ -1415,6 +1435,7 @@ PROCPS_EXPORT struct pids_fetch *procps_pids_select (
|
||||
expected 'reset' will have been called -- but just in case ... */
|
||||
if (!info->curitems)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
// this zero delimiter is really only needed with PIDS_SELECT_PID
|
||||
memcpy(ids, these, sizeof(unsigned) * numthese);
|
||||
@ -1428,7 +1449,7 @@ PROCPS_EXPORT struct pids_fetch *procps_pids_select (
|
||||
|
||||
pids_oldproc_close(&info->fetch_PT);
|
||||
// no guarantee any pids/uids were found
|
||||
return (rc > -1) ? &info->fetch.results : NULL;
|
||||
return (rc >= 0) ? &info->fetch.results : NULL;
|
||||
} // end: procps_pids_select
|
||||
|
||||
|
||||
@ -1453,6 +1474,7 @@ PROCPS_EXPORT struct pids_stack **procps_pids_sort (
|
||||
struct pids_result *p;
|
||||
int offset;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || stacks == NULL)
|
||||
return NULL;
|
||||
// a pids_item is currently unsigned, but we'll protect our future
|
||||
@ -1475,6 +1497,8 @@ PROCPS_EXPORT struct pids_stack **procps_pids_sort (
|
||||
return NULL;
|
||||
++p;
|
||||
}
|
||||
errno = 0;
|
||||
|
||||
parms.offset = offset;
|
||||
parms.order = order;
|
||||
|
||||
|
111
proc/slabinfo.c
111
proc/slabinfo.c
@ -320,16 +320,16 @@ static int alloc_slabnodes (
|
||||
int new_count;
|
||||
|
||||
if (info->nodes_used < info->nodes_alloc)
|
||||
return 0;
|
||||
return 1;
|
||||
/* Increment the allocated number of slabs */
|
||||
new_count = info->nodes_alloc * 5/4+30;
|
||||
|
||||
new_nodes = realloc(info->nodes, sizeof(struct slabs_node) * new_count);
|
||||
if (!new_nodes)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
info->nodes = new_nodes;
|
||||
info->nodes_alloc = new_count;
|
||||
return 0;
|
||||
return 1;
|
||||
} // end: alloc_slabnodes
|
||||
|
||||
|
||||
@ -345,14 +345,12 @@ static int get_slabnode (
|
||||
struct slabinfo_info *info,
|
||||
struct slabs_node **node)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (info->nodes_used == info->nodes_alloc) {
|
||||
if ((retval = alloc_slabnodes(info)) < 0)
|
||||
return retval;
|
||||
if (!alloc_slabnodes(info))
|
||||
return 0; // here, errno was set to ENOMEM
|
||||
}
|
||||
*node = &(info->nodes[info->nodes_used++]);
|
||||
return 0;
|
||||
return 1;
|
||||
} // end: get_slabnode
|
||||
|
||||
|
||||
@ -393,7 +391,6 @@ static int parse_slabinfo20 (
|
||||
{
|
||||
struct slabs_node *node;
|
||||
char buffer[SLABINFO_LINE_LEN];
|
||||
int retval;
|
||||
int page_size = getpagesize();
|
||||
struct slabs_summ *slabs = &(info->slabs.new);
|
||||
|
||||
@ -404,8 +401,8 @@ static int parse_slabinfo20 (
|
||||
if (buffer[0] == '#')
|
||||
continue;
|
||||
|
||||
if ((retval = get_slabnode(info, &node)) < 0)
|
||||
return retval;
|
||||
if (!get_slabnode(info, &node))
|
||||
return 1; // here, errno was set to ENOMEM
|
||||
|
||||
if (sscanf(buffer,
|
||||
"%" STRINGIFY(SLABINFO_NAME_LEN) "s" \
|
||||
@ -415,9 +412,8 @@ static int parse_slabinfo20 (
|
||||
&node->obj_size, &node->objs_per_slab,
|
||||
&node->pages_per_slab, &node->nr_active_slabs,
|
||||
&node->nr_slabs) < 8) {
|
||||
if (errno != 0)
|
||||
return -errno;
|
||||
return -EINVAL;
|
||||
errno = ERANGE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!node->name[0])
|
||||
@ -428,8 +424,7 @@ static int parse_slabinfo20 (
|
||||
if (node->obj_size > slabs->max_obj_size)
|
||||
slabs->max_obj_size = node->obj_size;
|
||||
|
||||
node->cache_size = (unsigned long)node->nr_slabs * node->pages_per_slab
|
||||
* page_size;
|
||||
node->cache_size = (unsigned long)node->nr_slabs * node->pages_per_slab * page_size;
|
||||
|
||||
if (node->nr_objs) {
|
||||
node->use = (unsigned int)100 * (node->nr_active_objs / node->nr_objs);
|
||||
@ -459,42 +454,40 @@ static int parse_slabinfo20 (
|
||||
* Read the data out of /proc/slabinfo putting the information
|
||||
* into the supplied info container
|
||||
*
|
||||
* Returns: 0 on success, negative on error
|
||||
* Returns: 0 on success, 1 on error
|
||||
*/
|
||||
static int slabinfo_read_failed (
|
||||
struct slabinfo_info *info)
|
||||
{
|
||||
char line[SLABINFO_LINE_LEN];
|
||||
int retval, major, minor;
|
||||
int major, minor;
|
||||
|
||||
memcpy(&info->slabs.old, &info->slabs.new, sizeof(struct slabs_summ));
|
||||
memset(&(info->slabs.new), 0, sizeof(struct slabs_summ));
|
||||
if ((retval = alloc_slabnodes(info)) < 0)
|
||||
return retval;
|
||||
if (!alloc_slabnodes(info))
|
||||
return 1; // here, errno was set to ENOMEM
|
||||
|
||||
memset(info->nodes, 0, sizeof(struct slabs_node)*info->nodes_alloc);
|
||||
info->nodes_used = 0;
|
||||
|
||||
if (NULL == info->slabinfo_fp
|
||||
&& (info->slabinfo_fp = fopen(SLABINFO_FILE, "r")) == NULL)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
if (fseek(info->slabinfo_fp, 0L, SEEK_SET) < 0)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
/* Parse the version string */
|
||||
if (!fgets(line, SLABINFO_LINE_LEN, info->slabinfo_fp))
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
if (sscanf(line, "slabinfo - version: %d.%d", &major, &minor) != 2)
|
||||
return -EINVAL;
|
||||
if (2 != sscanf(line, "slabinfo - version: %d.%d", &major, &minor)
|
||||
|| (major != 2)) {
|
||||
errno = ERANGE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (major == 2)
|
||||
retval = parse_slabinfo20(info);
|
||||
else
|
||||
return -ERANGE;
|
||||
|
||||
return retval;
|
||||
return parse_slabinfo20(info);
|
||||
} // end: slabinfo_read_failed
|
||||
|
||||
|
||||
@ -607,7 +600,7 @@ static inline int slabinfo_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)(unsigned long)(2 * SLABINFO_logical_end))
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
#ifdef ENFORCE_LOGICAL
|
||||
@ -616,13 +609,13 @@ static inline int slabinfo_items_check_failed (
|
||||
continue;
|
||||
if (items[i] < this->lowest
|
||||
|| (items[i] > this->highest))
|
||||
return -1;
|
||||
return 1;
|
||||
#else
|
||||
// a slabinfo_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= SLABINFO_logical_end)
|
||||
return -1;
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -702,12 +695,12 @@ static int slabinfo_stacks_fetch (
|
||||
// initialize stuff -----------------------------------
|
||||
if (!info->fetch.anchor) {
|
||||
if (!(info->fetch.anchor = calloc(sizeof(void *), STACKS_INCR)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
n_alloc = STACKS_INCR;
|
||||
}
|
||||
if (!info->fetch_ext.extents) {
|
||||
if (!(ext = slabinfo_stacks_alloc(&info->fetch_ext, n_alloc)))
|
||||
return -ENOMEM;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
|
||||
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
|
||||
slabinfo_itemize_stacks_all(&info->fetch_ext);
|
||||
@ -721,7 +714,7 @@ static int slabinfo_stacks_fetch (
|
||||
n_alloc += STACKS_INCR;
|
||||
if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
|
||||
|| (!(ext = slabinfo_stacks_alloc(&info->fetch_ext, STACKS_INCR))))
|
||||
return -1;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
|
||||
}
|
||||
slabinfo_assign_results(info->fetch.anchor[n_inuse], &info->slabs, &info->nodes[n_inuse]);
|
||||
@ -736,7 +729,7 @@ static int slabinfo_stacks_fetch (
|
||||
if (n_saved < n_inuse + 1) {
|
||||
n_saved = n_inuse + 1;
|
||||
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
|
||||
info->fetch.results.stacks[n_inuse] = NULL;
|
||||
@ -755,14 +748,14 @@ static int slabinfo_stacks_reconfig_maybe (
|
||||
int numitems)
|
||||
{
|
||||
if (slabinfo_items_check_failed(this, items, numitems))
|
||||
return -EINVAL;
|
||||
return -1;
|
||||
/* is this the first time or have things changed since we were last called?
|
||||
if so, gotta' redo all of our stacks stuff ... */
|
||||
if (this->numitems != numitems + 1
|
||||
|| memcmp(this->items, items, sizeof(enum slabinfo_item) * numitems)) {
|
||||
// allow for our SLABINFO_logical_end
|
||||
if (!(this->items = realloc(this->items, sizeof(enum slabinfo_item) * (numitems + 1))))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
memcpy(this->items, items, sizeof(enum slabinfo_item) * numitems);
|
||||
this->items[numitems] = SLABINFO_logical_end;
|
||||
this->numitems = numitems + 1;
|
||||
@ -790,11 +783,9 @@ PROCPS_EXPORT int procps_slabinfo_new (
|
||||
struct slabinfo_info **info)
|
||||
{
|
||||
struct slabinfo_info *p;
|
||||
int rc;
|
||||
|
||||
if (info == NULL)
|
||||
if (info == NULL || *info != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(p = calloc(1, sizeof(struct slabinfo_info))))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -811,9 +802,9 @@ PROCPS_EXPORT int procps_slabinfo_new (
|
||||
1) see if that caller's permissions were sufficient (root) |
|
||||
2) make delta results potentially useful, even if 1st time |
|
||||
3) elimnate need for history distortions 1st time 'switch' | */
|
||||
if ((rc = slabinfo_read_failed(p))) {
|
||||
if (slabinfo_read_failed(p)) {
|
||||
procps_slabinfo_unref(&p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*info = p;
|
||||
@ -841,6 +832,8 @@ PROCPS_EXPORT int procps_slabinfo_unref (
|
||||
(*info)->refcount--;
|
||||
|
||||
if ((*info)->refcount < 1) {
|
||||
int errno_sav = errno;
|
||||
|
||||
if ((*info)->slabinfo_fp) {
|
||||
fclose((*info)->slabinfo_fp);
|
||||
(*info)->slabinfo_fp = NULL;
|
||||
@ -864,6 +857,8 @@ PROCPS_EXPORT int procps_slabinfo_unref (
|
||||
|
||||
free(*info);
|
||||
*info = NULL;
|
||||
|
||||
errno = errno_sav;
|
||||
return 0;
|
||||
}
|
||||
return (*info)->refcount;
|
||||
@ -879,10 +874,12 @@ PROCPS_EXPORT struct slabinfo_result *procps_slabinfo_get (
|
||||
static time_t sav_secs;
|
||||
time_t cur_secs;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (item < 0 || item >= SLABINFO_logical_end)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* we will NOT read the slabinfo file with every call - rather, we'll offer
|
||||
a granularity of 1 second between reads ... */
|
||||
@ -894,9 +891,8 @@ PROCPS_EXPORT struct slabinfo_result *procps_slabinfo_get (
|
||||
}
|
||||
|
||||
info->get_this.item = item;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
// if (item > SLABINFO_noop)
|
||||
info->get_this.result.ul_int = 0;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
info->get_this.result.ul_int = 0;
|
||||
Item_table[item].setsfunc(&info->get_this, &info->slabs, &info->nul_node);
|
||||
|
||||
return &info->get_this;
|
||||
@ -915,18 +911,20 @@ PROCPS_EXPORT struct slabinfo_reap *procps_slabinfo_reap (
|
||||
enum slabinfo_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
|
||||
if (0 > slabinfo_stacks_reconfig_maybe(&info->fetch_ext, items, numitems))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
errno = 0;
|
||||
|
||||
if (info->fetch_ext.dirty_stacks)
|
||||
slabinfo_cleanup_stacks_all(&info->fetch_ext);
|
||||
|
||||
if (slabinfo_read_failed(info))
|
||||
return NULL;
|
||||
slabinfo_stacks_fetch(info);
|
||||
if (0 > slabinfo_stacks_fetch(info))
|
||||
return NULL;
|
||||
info->fetch_ext.dirty_stacks = 1;
|
||||
|
||||
return &info->fetch.results;
|
||||
@ -945,11 +943,12 @@ PROCPS_EXPORT struct slabinfo_stack *procps_slabinfo_select (
|
||||
enum slabinfo_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
|
||||
if (0 > slabinfo_stacks_reconfig_maybe(&info->select_ext, items, numitems))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
errno = 0;
|
||||
|
||||
if (!info->select_ext.extents
|
||||
&& (!slabinfo_stacks_alloc(&info->select_ext, 1)))
|
||||
@ -988,9 +987,9 @@ PROCPS_EXPORT struct slabinfo_stack **procps_slabinfo_sort (
|
||||
struct sort_parms parms;
|
||||
int offset;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || stacks == NULL)
|
||||
return NULL;
|
||||
|
||||
// a slabinfo_item is currently unsigned, but we'll protect our future
|
||||
if (sortitem < 0 || sortitem >= SLABINFO_logical_end)
|
||||
return NULL;
|
||||
@ -1009,6 +1008,8 @@ PROCPS_EXPORT struct slabinfo_stack **procps_slabinfo_sort (
|
||||
return NULL;
|
||||
++p;
|
||||
}
|
||||
errno = 0;
|
||||
|
||||
parms.offset = offset;
|
||||
parms.order = order;
|
||||
|
||||
|
85
proc/stat.c
85
proc/stat.c
@ -473,14 +473,14 @@ static inline int stat_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)(unsigned long)(2 * STAT_logical_end))
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
// a stat_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= STAT_logical_end) {
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -561,14 +561,14 @@ static int stat_read_failed (
|
||||
if (!info->cpus.hist.n_alloc) {
|
||||
info->cpus.hist.tics = calloc(NEWOLD_INCR, sizeof(struct hist_tic));
|
||||
if (!(info->cpus.hist.tics))
|
||||
return -ENOMEM;
|
||||
return 1;
|
||||
info->cpus.hist.n_alloc = NEWOLD_INCR;
|
||||
info->cpus.hist.n_inuse = 0;
|
||||
}
|
||||
|
||||
if (!info->stat_fp
|
||||
&& (!(info->stat_fp = fopen(STAT_FILE, "r"))))
|
||||
return -errno;
|
||||
return 1;
|
||||
fflush(info->stat_fp);
|
||||
rewind(info->stat_fp);
|
||||
|
||||
@ -579,21 +579,23 @@ static int stat_read_failed (
|
||||
especially in a massively parallel environment. additionally, each cpu |
|
||||
line is then frozen in time rather than changing until we get around to |
|
||||
accessing it. this helps to minimize (not eliminate) some distortions. | */
|
||||
tot_read = errno = 0;
|
||||
tot_read = 0;
|
||||
while ((0 < (num = fread(curPOS, 1, curSIZ, info->stat_fp)))) {
|
||||
tot_read += num;
|
||||
if (tot_read < maxSIZ)
|
||||
break;
|
||||
maxSIZ += BUFFER_INCR;
|
||||
if (!(info->stat_buf = realloc(info->stat_buf, maxSIZ)))
|
||||
return -ENOMEM;
|
||||
return 1;
|
||||
};
|
||||
#undef maxSIZ
|
||||
#undef curSIZ
|
||||
#undef curPOS
|
||||
|
||||
if (!feof(info->stat_fp))
|
||||
return -errno;
|
||||
if (!feof(info->stat_fp)) {
|
||||
errno = EIO;
|
||||
return 1;
|
||||
}
|
||||
info->stat_buf[tot_read] = '\0';
|
||||
bp = info->stat_buf;
|
||||
|
||||
@ -609,8 +611,10 @@ static int stat_read_failed (
|
||||
, &sum_ptr->new.user, &sum_ptr->new.nice, &sum_ptr->new.system
|
||||
, &sum_ptr->new.idle, &sum_ptr->new.iowait, &sum_ptr->new.irq
|
||||
, &sum_ptr->new.sirq, &sum_ptr->new.stolen
|
||||
, &sum_ptr->new.guest, &sum_ptr->new.gnice))
|
||||
return -1;
|
||||
, &sum_ptr->new.guest, &sum_ptr->new.gnice)) {
|
||||
errno = ERANGE;
|
||||
return 1;
|
||||
}
|
||||
stat_derive_unique(sum_ptr);
|
||||
|
||||
i = 0;
|
||||
@ -642,7 +646,7 @@ reap_em_again:
|
||||
info->cpus.hist.n_alloc += NEWOLD_INCR;
|
||||
info->cpus.hist.tics = realloc(info->cpus.hist.tics, info->cpus.hist.n_alloc * sizeof(struct hist_tic));
|
||||
if (!(info->cpus.hist.tics))
|
||||
return -ENOMEM;
|
||||
return 1;
|
||||
goto reap_em_again;
|
||||
}
|
||||
|
||||
@ -756,18 +760,15 @@ static int stat_stacks_fetch (
|
||||
struct stacks_extent *ext;
|
||||
int i;
|
||||
|
||||
if (this == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
// initialize stuff -----------------------------------
|
||||
if (!this->anchor) {
|
||||
if (!(this->anchor = calloc(sizeof(void *), STACKS_INCR)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
n_alloc = STACKS_INCR;
|
||||
}
|
||||
if (!this->fetch.extents) {
|
||||
if (!(ext = stat_stacks_alloc(&this->fetch, n_alloc)))
|
||||
return -ENOMEM;
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(this->anchor, ext->stacks, sizeof(void *) * n_alloc);
|
||||
}
|
||||
if (this->fetch.dirty_stacks)
|
||||
@ -778,9 +779,8 @@ static int stat_stacks_fetch (
|
||||
if (!(i < n_alloc)) {
|
||||
n_alloc += STACKS_INCR;
|
||||
if ((!(this->anchor = realloc(this->anchor, sizeof(void *) * n_alloc)))
|
||||
|| (!(ext = stat_stacks_alloc(&this->fetch, STACKS_INCR)))) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|| (!(ext = stat_stacks_alloc(&this->fetch, STACKS_INCR))))
|
||||
return -1; // here, errno was set to ENOMEM
|
||||
memcpy(this->anchor + i, ext->stacks, sizeof(void *) * STACKS_INCR);
|
||||
}
|
||||
stat_assign_results(this->anchor[i], &info->sys_hist, &this->hist.tics[i]);
|
||||
@ -794,7 +794,7 @@ static int stat_stacks_fetch (
|
||||
if (n_saved < i + 1) {
|
||||
n_saved = i + 1;
|
||||
if (!(this->result.stacks = realloc(this->result.stacks, sizeof(void *) * n_saved)))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
memcpy(this->result.stacks, this->anchor, sizeof(void *) * i);
|
||||
this->result.stacks[i] = NULL;
|
||||
@ -815,15 +815,14 @@ static int stat_stacks_reconfig_maybe (
|
||||
int numitems)
|
||||
{
|
||||
if (stat_items_check_failed(numitems, items))
|
||||
return -EINVAL;
|
||||
|
||||
return -1;
|
||||
/* is this the first time or have things changed since we were last called?
|
||||
if so, gotta' redo all of our stacks stuff ... */
|
||||
if (this->items->num != numitems + 1
|
||||
|| memcmp(this->items->enums, items, sizeof(enum stat_item) * numitems)) {
|
||||
// allow for our STAT_logical_end
|
||||
if (!(this->items->enums = realloc(this->items->enums, sizeof(enum stat_item) * (numitems + 1))))
|
||||
return -ENOMEM;
|
||||
return -1;
|
||||
memcpy(this->items->enums, items, sizeof(enum stat_item) * numitems);
|
||||
this->items->enums[numitems] = STAT_logical_end;
|
||||
this->items->num = numitems + 1;
|
||||
@ -872,7 +871,6 @@ PROCPS_EXPORT int procps_stat_new (
|
||||
struct stat_info **info)
|
||||
{
|
||||
struct stat_info *p;
|
||||
int rc;
|
||||
|
||||
if (info == NULL || *info != NULL)
|
||||
return -EINVAL;
|
||||
@ -901,9 +899,9 @@ PROCPS_EXPORT int procps_stat_new (
|
||||
1) ensure there will be no problems with subsequent access |
|
||||
2) make delta results potentially useful, even if 1st time |
|
||||
3) elimnate need for history distortions 1st time 'switch' | */
|
||||
if ((rc = stat_read_failed(p))) {
|
||||
if (stat_read_failed(p)) {
|
||||
procps_stat_unref(&p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*info = p;
|
||||
@ -931,6 +929,8 @@ PROCPS_EXPORT int procps_stat_unref (
|
||||
(*info)->refcount--;
|
||||
|
||||
if ((*info)->refcount < 1) {
|
||||
int errno_sav = errno;
|
||||
|
||||
if ((*info)->stat_fp)
|
||||
fclose((*info)->stat_fp);
|
||||
if ((*info)->stat_buf)
|
||||
@ -969,6 +969,8 @@ PROCPS_EXPORT int procps_stat_unref (
|
||||
|
||||
free(*info);
|
||||
*info = NULL;
|
||||
|
||||
errno = errno_sav;
|
||||
return 0;
|
||||
}
|
||||
return (*info)->refcount;
|
||||
@ -984,10 +986,12 @@ PROCPS_EXPORT struct stat_result *procps_stat_get (
|
||||
static time_t sav_secs;
|
||||
time_t cur_secs;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (item < 0 || item >= STAT_logical_end)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* we will NOT read the source file with every call - rather, we'll offer
|
||||
a granularity of 1 second between reads ... */
|
||||
@ -999,9 +1003,8 @@ PROCPS_EXPORT struct stat_result *procps_stat_get (
|
||||
}
|
||||
|
||||
info->get_this.item = item;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
// if (item > STAT_noop)
|
||||
info->get_this.result.ull_int = 0;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
info->get_this.result.ull_int = 0;
|
||||
Item_table[item].setsfunc(&info->get_this, &info->sys_hist, &info->cpu_hist);
|
||||
|
||||
return &info->get_this;
|
||||
@ -1023,6 +1026,7 @@ PROCPS_EXPORT struct stat_reaped *procps_stat_reap (
|
||||
{
|
||||
int rc;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
if (what != STAT_REAP_CPUS_ONLY && what != STAT_REAP_CPUS_AND_NODES)
|
||||
@ -1037,13 +1041,13 @@ PROCPS_EXPORT struct stat_reaped *procps_stat_reap (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (0 > (rc = stat_stacks_reconfig_maybe(&info->cpu_summary, items, numitems)))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
if (rc) {
|
||||
stat_extents_free_all(&info->cpus.fetch);
|
||||
stat_extents_free_all(&info->nodes.fetch);
|
||||
}
|
||||
errno = 0;
|
||||
|
||||
if (stat_read_failed(info))
|
||||
return NULL;
|
||||
@ -1060,7 +1064,7 @@ PROCPS_EXPORT struct stat_reaped *procps_stat_reap (
|
||||
|
||||
switch (what) {
|
||||
case STAT_REAP_CPUS_ONLY:
|
||||
if (!stat_stacks_fetch(info, &info->cpus))
|
||||
if (0 > stat_stacks_fetch(info, &info->cpus))
|
||||
return NULL;
|
||||
break;
|
||||
case STAT_REAP_CPUS_AND_NODES:
|
||||
@ -1069,9 +1073,9 @@ PROCPS_EXPORT struct stat_reaped *procps_stat_reap (
|
||||
will have marked (temporarily) all the cpu node ids as invalid | */
|
||||
if (0 > stat_make_numa_hist(info))
|
||||
return NULL;
|
||||
// tolerate an unexpected absence of libnuma.so ...
|
||||
stat_stacks_fetch(info, &info->nodes);
|
||||
if (!stat_stacks_fetch(info, &info->cpus))
|
||||
if (0 > stat_stacks_fetch(info, &info->nodes))
|
||||
return NULL;
|
||||
if (0 > stat_stacks_fetch(info, &info->cpus))
|
||||
return NULL;
|
||||
break;
|
||||
default:
|
||||
@ -1094,11 +1098,12 @@ PROCPS_EXPORT struct stat_stack *procps_stat_select (
|
||||
enum stat_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
|
||||
if (0 > stat_stacks_reconfig_maybe(&info->select, items, numitems))
|
||||
return NULL;
|
||||
return NULL; // here, errno may be overridden with ENOMEM
|
||||
errno = 0;
|
||||
|
||||
if (stat_read_failed(info))
|
||||
return NULL;
|
||||
@ -1128,9 +1133,9 @@ PROCPS_EXPORT struct stat_stack **procps_stat_sort (
|
||||
struct sort_parms parms;
|
||||
int offset;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL || stacks == NULL)
|
||||
return NULL;
|
||||
|
||||
// a stat_item is currently unsigned, but we'll protect our future
|
||||
if (sortitem < 0 || sortitem >= STAT_logical_end)
|
||||
return NULL;
|
||||
@ -1149,6 +1154,8 @@ PROCPS_EXPORT struct stat_stack **procps_stat_sort (
|
||||
return NULL;
|
||||
++p;
|
||||
}
|
||||
errno = 0;
|
||||
|
||||
parms.offset = offset;
|
||||
parms.order = order;
|
||||
|
||||
|
@ -811,14 +811,14 @@ static inline int vmstat_items_check_failed (
|
||||
*/
|
||||
if (numitems < 1
|
||||
|| (void *)items < (void *)(unsigned long)(2 * VMSTAT_logical_end))
|
||||
return -1;
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < numitems; i++) {
|
||||
// a vmstat_item is currently unsigned, but we'll protect our future
|
||||
if (items[i] < 0)
|
||||
return -1;
|
||||
return 1;
|
||||
if (items[i] >= VMSTAT_logical_end)
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -829,13 +829,14 @@ static int vmstat_make_hash_failed (
|
||||
struct vmstat_info *info)
|
||||
{
|
||||
#define htVAL(f) e.key = STRINGIFY(f); e.data = &info->hist.new. f; \
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
|
||||
if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
|
||||
ENTRY e, *ep;
|
||||
size_t n;
|
||||
|
||||
n = sizeof(struct vmstat_data) / sizeof(unsigned long);
|
||||
// we'll follow the hsearch recommendation of an extra 25%
|
||||
hcreate_r(n + (n / 4), &info->hashtab);
|
||||
if (!hcreate_r(n + (n / 4), &info->hashtab))
|
||||
return 1;
|
||||
|
||||
htVAL(allocstall)
|
||||
htVAL(balloon_deflate)
|
||||
@ -985,21 +986,23 @@ static int vmstat_read_failed (
|
||||
|
||||
if (-1 == info->vmstat_fd
|
||||
&& (info->vmstat_fd = open(VMSTAT_FILE, O_RDONLY)) == -1)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
if (lseek(info->vmstat_fd, 0L, SEEK_SET) == -1)
|
||||
return -errno;
|
||||
return 1;
|
||||
|
||||
for (;;) {
|
||||
if ((size = read(info->vmstat_fd, buf, sizeof(buf)-1)) < 0) {
|
||||
if (errno == EINTR || errno == EAGAIN)
|
||||
continue;
|
||||
return -errno;
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (size == 0)
|
||||
return -1;
|
||||
if (size == 0) {
|
||||
errno = EIO;
|
||||
return 1;
|
||||
}
|
||||
buf[size] = '\0';
|
||||
|
||||
head = buf;
|
||||
@ -1112,7 +1115,6 @@ PROCPS_EXPORT int procps_vmstat_new (
|
||||
struct vmstat_info **info)
|
||||
{
|
||||
struct vmstat_info *p;
|
||||
int rc;
|
||||
|
||||
if (info == NULL || *info != NULL)
|
||||
return -EINVAL;
|
||||
@ -1122,18 +1124,18 @@ PROCPS_EXPORT int procps_vmstat_new (
|
||||
p->refcount = 1;
|
||||
p->vmstat_fd = -1;
|
||||
|
||||
if ((rc = vmstat_make_hash_failed(p))) {
|
||||
if (vmstat_make_hash_failed(p)) {
|
||||
free(p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
/* do a priming read here for the following potential benefits: |
|
||||
1) ensure there will be no problems with subsequent access |
|
||||
2) make delta results potentially useful, even if 1st time |
|
||||
3) elimnate need for history distortions 1st time 'switch' | */
|
||||
if ((rc = vmstat_read_failed(p))) {
|
||||
if (vmstat_read_failed(p)) {
|
||||
procps_vmstat_unref(&p);
|
||||
return rc;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*info = p;
|
||||
@ -1161,6 +1163,8 @@ PROCPS_EXPORT int procps_vmstat_unref (
|
||||
(*info)->refcount--;
|
||||
|
||||
if ((*info)->refcount < 1) {
|
||||
int errno_sav = errno;
|
||||
|
||||
if ((*info)->extents)
|
||||
vmstat_extents_free_all((*info));
|
||||
if ((*info)->items)
|
||||
@ -1169,6 +1173,8 @@ PROCPS_EXPORT int procps_vmstat_unref (
|
||||
|
||||
free(*info);
|
||||
*info = NULL;
|
||||
|
||||
errno = errno_sav;
|
||||
return 0;
|
||||
}
|
||||
return (*info)->refcount;
|
||||
@ -1184,10 +1190,12 @@ PROCPS_EXPORT struct vmstat_result *procps_vmstat_get (
|
||||
static time_t sav_secs;
|
||||
time_t cur_secs;
|
||||
|
||||
errno = EINVAL;
|
||||
if (info == NULL)
|
||||
return NULL;
|
||||
if (item < 0 || item >= VMSTAT_logical_end)
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* we will NOT read the vmstat file with every call - rather, we'll offer
|
||||
a granularity of 1 second between reads ... */
|
||||
@ -1199,9 +1207,8 @@ PROCPS_EXPORT struct vmstat_result *procps_vmstat_get (
|
||||
}
|
||||
|
||||
info->get_this.item = item;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
// if (item > VMSTAT_noop)
|
||||
info->get_this.result.ul_int = 0;
|
||||
// with 'get', we must NOT honor the usual 'noop' guarantee
|
||||
info->get_this.result.ul_int = 0;
|
||||
Item_table[item].setsfunc(&info->get_this, &info->hist);
|
||||
|
||||
return &info->get_this;
|
||||
@ -1220,10 +1227,12 @@ PROCPS_EXPORT struct vmstat_stack *procps_vmstat_select (
|
||||
enum vmstat_item *items,
|
||||
int numitems)
|
||||
{
|
||||
errno = EINVAL;
|
||||
if (info == NULL || items == NULL)
|
||||
return NULL;
|
||||
if (vmstat_items_check_failed(numitems, items))
|
||||
return NULL;
|
||||
errno = 0;
|
||||
|
||||
/* is this the first time or have things changed since we were last called?
|
||||
if so, gotta' redo all of our stacks stuff ... */
|
||||
|
Loading…
Reference in New Issue
Block a user