top: refined memory graphs two abreast summary display <=== port of newlib 5c5bff39

______________________________ original newlib message

When more than two cpus are displayed per summary area
line in graph form, those memory graphs were scaled to
that same width for consistency & aesthetics. However,
they probably shouldn't have been reduced to less than
terminal width due to a resulting loss of information.

[ after all, detailed memory stats are never reduced ]

So now, supporting logic was refactored to behave just
as it did before the 4 toggle was expanded beyond '1'.

[ the changes impact the 2 memory graphs exclusively ]

Reference(s):
https://www.freelists.org/post/procps/top-enhancements-2-bugs-swatted,1

Signed-off-by: Jim Warner <james.warner@comcast.net>
This commit is contained in:
Jim Warner 2022-09-19 00:00:00 -05:00 committed by Craig Small
parent 6f99010358
commit bd463189d7
2 changed files with 57 additions and 43 deletions

View File

@ -258,8 +258,14 @@ static int Numa_node_sel = -1;
#define GRAPH_prefix_std 25 // '%Cpunnn: 100.0/100.0 100[' or 'nnn-nnn: 100.0/100.0 100['
#define GRAPH_prefix_abv 12 // '%Cpunnn:100[' or 'nnn-nnn:100[' or 'GiB Mem 100[' or 'GiB Swap 99['
#define GRAPH_suffix 2 // '] ' (bracket + trailing space)
static float Graph_adj; // bars/blocks scaling factor
static int Graph_len; // scaled length (<= GRAPH_length_max)
// first 3 more static (adj_geometry), last 3 volatile (sum_tics/do_memory)
struct graph_parms {
float adjust; // bars/blocks scaling factor
int length; // scaled length (<= GRAPH_length_max)
int style; // rc.graph_cpus or rc.graph_mems
long total, part1, part2; // elements to be graphed
};
static struct graph_parms *Graph_cpus, *Graph_mems;
static const char Graph_blks[] = " ";
static const char Graph_bars[] = "||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||";
@ -2142,15 +2148,23 @@ static void adj_geometry (void) {
if (Curwin->rc.double_up) {
int num = (Curwin->rc.double_up + 1);
int pfx = (Curwin->rc.double_up < 2) ? GRAPH_prefix_std : GRAPH_prefix_abv;
Graph_len = (Screen_cols - (ADJOIN_space * Curwin->rc.double_up) - (num * (pfx + GRAPH_suffix))) / num;
Graph_cpus->length = (Screen_cols - (ADJOIN_space * Curwin->rc.double_up) - (num * (pfx + GRAPH_suffix))) / num;
Graph_mems->length = (Screen_cols - ADJOIN_space - (2 * (GRAPH_prefix_std + GRAPH_suffix))) / 2;
} else {
Graph_len = Screen_cols - (GRAPH_prefix_std + GRAPH_length_max + GRAPH_suffix);
if (Graph_len >= 0) Graph_len = GRAPH_length_max;
else Graph_len = Screen_cols - GRAPH_prefix_std - GRAPH_suffix;
Graph_cpus->length = Screen_cols - (GRAPH_prefix_std + GRAPH_length_max + GRAPH_suffix);
if (Graph_cpus->length >= 0) Graph_cpus->length = GRAPH_length_max;
else Graph_cpus->length = Screen_cols - GRAPH_prefix_std - GRAPH_suffix;
Graph_mems->length = Graph_cpus->length;
}
if (Graph_len < GRAPH_length_min) Graph_len = GRAPH_length_min;
if (Graph_len > GRAPH_length_max) Graph_len = GRAPH_length_max;
Graph_adj = (float)Graph_len / 100.0;
if (Graph_cpus->length < GRAPH_length_min) Graph_cpus->length = GRAPH_length_min;
if (Graph_cpus->length > GRAPH_length_max) Graph_cpus->length = GRAPH_length_max;
Graph_cpus->adjust = (float)Graph_cpus->length / 100.0;
Graph_cpus->style = Curwin->rc.graph_cpus;
if (Graph_mems->length < GRAPH_length_min) Graph_mems->length = GRAPH_length_min;
if (Graph_mems->length > GRAPH_length_max) Graph_mems->length = GRAPH_length_max;
Graph_mems->adjust = (float)Graph_mems->length / 100.0;
Graph_mems->style = Curwin->rc.graph_mems;
fflush(stdout);
} // end: adj_geometry
@ -3879,6 +3893,9 @@ static void before (char *me) {
memcpy(HHash_one, HHash_nul, sizeof(HHash_nul));
memcpy(HHash_two, HHash_nul, sizeof(HHash_nul));
#endif
// lastly, establish support for graphing cpus & memory
Graph_cpus = alloc_c(sizeof(struct graph_parms));
Graph_mems = alloc_c(sizeof(struct graph_parms));
} // end: before
@ -6265,7 +6282,7 @@ struct rx_st {
* A *Helper* function to produce the actual cpu & memory graphs for |
* these functions -- sum_tics (tertiary) and do_memory (secondary). |
* (sorry about the name, but it keeps the above comment commitment) | */
static struct rx_st *sum_rx (long total, long part1, long part2, int style) {
static struct rx_st *sum_rx (struct graph_parms *these) {
static struct {
const char *part1, *part2, *style;
} gtab[] = {
@ -6274,30 +6291,30 @@ static struct rx_st *sum_rx (long total, long part1, long part2, int style) {
};
static __thread struct rx_st rx;
char buf1[SMLBUFSIZ], buf2[SMLBUFSIZ], buf3[MEDBUFSIZ];
int num1, num2, width;
int ix, num1, num2, width;
float scale;
scale = 100.0 / total;
rx.pcnt_one = scale * part1;
rx.pcnt_two = scale * part2;
scale = 100.0 / these->total;
rx.pcnt_one = scale * these->part1;
rx.pcnt_two = scale * these->part2;
if (rx.pcnt_one + rx.pcnt_two > 100.0 || rx.pcnt_two < 0)
rx.pcnt_two = 0;
rx.pcnt_tot = rx.pcnt_one + rx.pcnt_two;
num1 = (int)((rx.pcnt_one * Graph_adj) + .5),
num2 = (int)((rx.pcnt_two * Graph_adj) + .5);
if (num1 + num2 > Graph_len)
num2 = Graph_len - num1;
num1 = (int)((rx.pcnt_one * these->adjust) + .5),
num2 = (int)((rx.pcnt_two * these->adjust) + .5);
if (num1 + num2 > these->length)
num2 = these->length - num1;
width = Graph_len;
width = these->length;
buf1[0] = buf2[0] = buf3[0] = '\0';
--style; // now relative to zero
ix = these->style - 1; // now relative to zero
if (num1) {
snprintf(buf1, sizeof(buf1), gtab[style].part1, num1, gtab[style].style);
snprintf(buf1, sizeof(buf1), gtab[ix].part1, num1, gtab[ix].style);
width += 2;
}
if (num2) {
snprintf(buf2, sizeof(buf2), gtab[style].part2, num2, gtab[style].style);
snprintf(buf2, sizeof(buf2), gtab[ix].part2, num2, gtab[ix].style);
width += 2;
}
snprintf(buf3, sizeof(buf3), "%s%s", buf1, buf2);
@ -6370,7 +6387,10 @@ static int sum_tics (CPU_t *cpu, const char *pfx, int nobuf) {
/* display some kinda' cpu state percentages
(who or what is explained by the passed prefix) */
if (Curwin->rc.graph_cpus) {
rx = sum_rx(tot_frme, u_frme, s_frme, Curwin->rc.graph_cpus);
Graph_cpus->total = tot_frme;
Graph_cpus->part1 = u_frme;
Graph_cpus->part2 = s_frme;
rx = sum_rx(Graph_cpus);
if (Curwin->rc.double_up > 1)
return sum_see(fmtmk("%s~3%3.0f%s", pfx, rx->pcnt_tot, rx->graph), nobuf);
else {
@ -6548,29 +6568,23 @@ static void do_memory (void) {
kb_main_my_misc = kb_main_total - kb_main_available - kb_main_used;
#endif
if (Curwin->rc.graph_mems) {
Graph_mems->total = kb_main_total;
Graph_mems->part1 = kb_main_used;
Graph_mems->part2 = kb_main_my_misc;
rx = sum_rx(Graph_mems);
prT(bfT(0), mkM(total));
rx = sum_rx(kb_main_total, kb_main_used, kb_main_my_misc, Curwin->rc.graph_mems);
if (Curwin->rc.double_up > 1)
snprintf(row, sizeof(row), "%s %s~3%3.0f%s"
, scT(label), N_txt(WORD_abv_mem_txt), rx->pcnt_tot, rx->graph);
else {
prT(bfT(0), kb_main_total);
snprintf(row, sizeof(row), "%s %s:~3%#5.1f~2/%-9.9s~3%s"
, scT(label), N_txt(WORD_abv_mem_txt), rx->pcnt_tot, bfT(0)
, rx->graph);
}
Msg_row += sum_see(row, mem2UP);
Graph_mems->total = kb_swap_total;
Graph_mems->part1 = 0;
Graph_mems->part2 = kb_main_used;
rx = sum_rx(Graph_mems);
prT(bfT(1), mkS(total));
rx = sum_rx(kb_swap_total, 0, kb_swap_used, Curwin->rc.graph_mems);
if (Curwin->rc.double_up > 1)
snprintf(row, sizeof(row), "%s %s~3%3.0f%s"
, scT(label), N_txt(WORD_abv_swp_txt), rx->pcnt_tot, rx->graph);
else {
prT(bfT(1), kb_swap_total);
snprintf(row, sizeof(row), "%s %s:~3%#5.1f~2/%-9.9s~3%s"
, scT(label), N_txt(WORD_abv_swp_txt), rx->pcnt_two, bfT(1), rx->graph);
}
Msg_row += sum_see(row, 1);
} else {

View File

@ -846,7 +846,7 @@ typedef struct WIN_t {
//atic void keys_window (int ch);
//atic void keys_xtra (int ch);
/*------ Tertiary summary display support (summary_show helpers) -------*/
//atic struct rx_st *sum_rx (long total, long part1, long part2, int style);
//atic struct rx_st *sum_rx (struct graph_parms *these);
//atic inline int sum_see (const char *str, int nobuf);
//atic int sum_tics (CPU_t *cpu, const char *pfx, int nobuf);
//atic int sum_unify (CPU_t *cpu, int nobuf);