From 1d8d4be046521e16be6a5caa4d1c1854e5a8b1bd Mon Sep 17 00:00:00 2001 From: Jim Warner Date: Mon, 3 Jun 2019 00:00:00 -0500 Subject: [PATCH] library: add a 'CPU_ZEROTICS' equivalent to api This patch just implements an equivalent to the master branch 'CPU_ZEROTICS' provision. However, the original impetus for that earlier implementation was ultimately attributed to a likely kernel anomaly since corrected. As a result, in this newlib implementation we take the opposite approach to the default behavior. There is no adjustment to TIC_SUM_DELTA values if fewer ticks than expected are recorded, unless the define is activated. The commit shown below explains why the 'CPU_ZEROTICS' define was retained in spite of the fix to the kernel. Reference(s): . issue referencing CPU_ZEROTICS https://gitlab.com/procps-ng/procps/issues/132 . master branch CPU_ZEROTICS summary commit ee3ed4b45edd66c6e0455d3fab08a48e7ea83030 . lengthy thread leading to CPU_ZEROTICS https://www.freelists.org/post/procps/CStates-handling-new-switch Signed-off-by: Jim Warner --- proc/stat.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/proc/stat.c b/proc/stat.c index a587ca87..a7ec0f3f 100644 --- a/proc/stat.c +++ b/proc/stat.c @@ -46,6 +46,17 @@ //#define ENFORCE_LOGICAL // ensure only logical items are accepted by reap | // ------------------------------------------------------------------------- + +/* --------------------------------------------------------------------------+ + this next define is equivalent to the master top's CPU_ZEROTICS provision | + except that here in newlib we'll take an opposite approach to our default | */ +//#define CPU_IDLE_FORCED // show as 100% idle if fewer ticks than expected | +// --------------------------------------------------------------------------+ + +#ifdef CPU_IDLE_FORCED + /* this is the % used in establishing a ticks threshold below which some | + cpu will be treated 'idle' rather than reflect misleading tick values | */ +#define TICS_THRESHOLD ( 100 / 20 ) +#endif struct stat_jifs { unsigned long long user, nice, system, idle, iowait, irq, sirq, stolen, guest, gnice; @@ -72,6 +83,9 @@ struct hist_tic { int count; struct stat_jifs new; struct stat_jifs old; +#ifdef CPU_IDLE_FORCED + unsigned long edge; // only valued/valid with cpu summary | +#endif }; struct stacks_extent { @@ -582,6 +596,13 @@ static int stat_read_failed ( return 1; } stat_derive_unique(sum_ptr); +#ifdef CPU_IDLE_FORCED + /* if any cpu accumulated substantially fewer tics than what is expected | + we'll force it to be treated as 'idle' so as not to return misleading | + statistics (and that sum_ptr->count also serves as first time switch) | */ + if (sum_ptr->count) sum_ptr->edge = + ((sum_ptr->new.xtot - sum_ptr->old.xtot) / sum_ptr->count) / TICS_THRESHOLD; +#endif i = 0; reap_em_again: @@ -604,6 +625,14 @@ reap_em_again: break; // we must tolerate cpus taken offline } stat_derive_unique(cpu_ptr); +#ifdef CPU_IDLE_FORCED + // first time through (that priming read) sum_ptr->edge will be zero | + if (cpu_ptr->new.xtot < sum_ptr->edge) { + cpu_ptr->old.xtot = cpu_ptr->old.xbsy = cpu_ptr->old.xidl = cpu_ptr->old.xusr = cpu_ptr->old.xsys + = cpu_ptr->new.xbsy = cpu_ptr->new.xusr = cpu_ptr->new.xsys = 0; + cpu_ptr->new.xtot = cpu_ptr->new.xidl = 1; + } +#endif ++cpu_ptr; ++i; } while (i < info->cpus.hist.n_alloc);