Significantly optimize the bit fade and own addr tests for size, by folding near-identical switch case bodies together, and removing code duplication by merging pattern_fill() and pattern_check(). Also, add a rep stos[lq] path in the bit fade test.

Before / after:
   text    data     bss     dec     hex filename
   1830       4       0    1834     72a build32/tests/bit_fade.o
   1191       4       0    1195     4ab build32/tests/bit_fade.o

   1359       0       0    1359     54f build32/tests/own_addr.o
    959       0       0     959     3bf build32/tests/own_addr.o

   1581       4       0    1585     631 build64/tests/bit_fade.o
   1021       4       0    1025     401 build64/tests/bit_fade.o

   1236       0       0    1236     4d4 build64/tests/own_addr.o
    859       0       0     859     35b build64/tests/own_addr.o
This commit is contained in:
Lionel Debroux 2023-10-11 23:37:53 +02:00
parent 2d3b14ed1a
commit 78c6d4b400
3 changed files with 68 additions and 116 deletions

View File

@ -25,11 +25,13 @@
#include "test_funcs.h"
#include "test_helper.h"
#define HAND_OPTIMISED 1 // Use hand-optimised assembler code for performance.
//------------------------------------------------------------------------------
// Private Functions
//------------------------------------------------------------------------------
static int pattern_fill(int my_cpu, testword_t pattern)
static __attribute__((noclone)) int bit_fade_pattern_fill_check(int my_cpu, testword_t pattern, bool fill)
{
int ticks = 0;
@ -58,53 +60,49 @@ static int pattern_fill(int my_cpu, testword_t pattern)
continue;
}
test_addr[my_cpu] = (uintptr_t)p;
do {
write_word(p, pattern);
} while (p++ < pe); // test before increment in case pointer overflows
if (fill) {
#if HAND_OPTIMISED
#ifdef __x86_64__
uint64_t length = pe - p + 1;
__asm__ __volatile__ ("\t"
"rep \n\t"
"stosq \n\t"
:
: "c" (length), "D" (p), "a" (pattern)
:
);
p += length;
#else
uint32_t length = pe - p + 1;
__asm__ __volatile__ ("\t"
"rep \n\t"
"stosl \n\t"
:
: "c" (length), "D" (p), "a" (pattern)
:
);
p += length;
#endif
#else
do {
write_word(p, pattern);
} while (p++ < pe); // test before increment in case pointer overflows
#endif
} else {
do {
testword_t actual = read_word(p);
if (unlikely(actual != pattern)) {
data_error(p, pattern, actual, true);
}
} while (p++ < pe); // test before increment in case pointer overflows
}
do_tick(my_cpu);
BAILOUT;
} while (!at_end && ++pe); // advance pe to next start point
}
flush_caches(my_cpu);
return ticks;
}
static int pattern_check(int my_cpu, testword_t pattern)
{
int ticks = 0;
for (int i = 0; i < vm_map_size; i++) {
testword_t *start = vm_map[i].start;
testword_t *end = vm_map[i].end;
testword_t *p = start;
testword_t *pe = start;
bool at_end = false;
do {
// take care to avoid pointer overflow
if ((end - pe) >= SPIN_SIZE) {
pe += SPIN_SIZE - 1;
} else {
at_end = true;
pe = end;
}
ticks++;
if (my_cpu < 0) {
continue;
}
test_addr[my_cpu] = (uintptr_t)p;
do {
testword_t actual = read_word(p);
if (unlikely(actual != pattern)) {
data_error(p, pattern, actual, true);
}
} while (p++ < pe); // test before increment in case pointer overflows
do_tick(my_cpu);
BAILOUT;
} while (!at_end && ++pe); // advance pe to next start point
if (fill) {
flush_caches(my_cpu);
}
return ticks;
@ -144,9 +142,13 @@ int test_bit_fade(int my_cpu, int stage, int sleep_secs)
int ticks = 0;
testword_t pattern = stage < 3 ? all_zero : all_ones;
stage %= 3;
switch (stage) {
case 0:
ticks = pattern_fill(my_cpu, all_zero);
ticks = bit_fade_pattern_fill_check(my_cpu, pattern, true);
break;
case 1:
// Only sleep once.
@ -155,19 +157,7 @@ int test_bit_fade(int my_cpu, int stage, int sleep_secs)
}
break;
case 2:
ticks = pattern_check(my_cpu, all_zero);
break;
case 3:
ticks = pattern_fill(my_cpu, all_ones);
break;
case 4:
// Only sleep once.
if (stage != last_stage) {
ticks = fade_delay(my_cpu, sleep_secs);
}
break;
case 5:
ticks = pattern_check(my_cpu, all_ones);
ticks = bit_fade_pattern_fill_check(my_cpu, pattern, false);
break;
default:
break;

View File

@ -70,7 +70,7 @@ int test_mov_inv_fixed(int my_cpu, int iterations, testword_t pattern1, testword
: "c" (length), "D" (p), "a" (pattern1)
:
);
p = pe;
p += length;
#else
uint32_t length = pe - p + 1;
__asm__ __volatile__ ("\t"
@ -80,7 +80,7 @@ int test_mov_inv_fixed(int my_cpu, int iterations, testword_t pattern1, testword
: "c" (length), "D" (p), "a" (pattern1)
:
);
p = pe;
p += length;
#endif
#else
do {

View File

@ -29,7 +29,7 @@
// Private Functions
//------------------------------------------------------------------------------
static int pattern_fill(int my_cpu, testword_t offset)
static int __attribute__((noclone)) own_addr_pattern_fill_check(int my_cpu, testword_t offset, bool fill)
{
int ticks = 0;
@ -59,55 +59,26 @@ static int pattern_fill(int my_cpu, testword_t offset)
continue;
}
test_addr[my_cpu] = (uintptr_t)p;
do {
write_word(p, (testword_t)p + offset);
} while (p++ < pe); // test before increment in case pointer overflows
if (fill) {
do {
write_word(p, (testword_t)p + offset);
} while (p++ < pe); // test before increment in case pointer overflows
} else {
do {
testword_t expect = (testword_t)p + offset;
testword_t actual = read_word(p);
if (unlikely(actual != expect)) {
data_error(p, expect, actual, true);
}
} while (p++ < pe); // test before increment in case pointer overflows
}
do_tick(my_cpu);
BAILOUT;
} while (!at_end && ++pe); // advance pe to next start point
}
flush_caches(my_cpu);
return ticks;
}
static int pattern_check(int my_cpu, testword_t offset)
{
int ticks = 0;
// Check each address has its own address.
for (int i = 0; i < vm_map_size; i++) {
testword_t *start = vm_map[i].start;
testword_t *end = vm_map[i].end;
testword_t *p = start;
testword_t *pe = start;
bool at_end = false;
do {
// take care to avoid pointer overflow
if ((end - pe) >= SPIN_SIZE) {
pe += SPIN_SIZE - 1;
} else {
at_end = true;
pe = end;
}
ticks++;
if (my_cpu < 0) {
continue;
}
test_addr[my_cpu] = (uintptr_t)p;
do {
testword_t expect = (testword_t)p + offset;
testword_t actual = read_word(p);
if (unlikely(actual != expect)) {
data_error(p, expect, actual, true);
}
} while (p++ < pe); // test before increment in case pointer overflows
do_tick(my_cpu);
BAILOUT;
} while (!at_end && ++pe); // advance pe to next start point
if (fill) {
flush_caches(my_cpu);
}
return ticks;
@ -121,8 +92,8 @@ int test_own_addr1(int my_cpu)
{
int ticks = 0;
ticks += pattern_fill(my_cpu, 0);
ticks += pattern_check(my_cpu, 0);
ticks += own_addr_pattern_fill_check(my_cpu, 0, true);
ticks += own_addr_pattern_fill_check(my_cpu, 0, false);
return ticks;
}
@ -144,16 +115,7 @@ int test_own_addr2(int my_cpu, int stage)
offset /= VM_WINDOW_SIZE;
#endif
switch (stage) {
case 0:
ticks = pattern_fill(my_cpu, offset);
break;
case 1:
ticks = pattern_check(my_cpu, offset);
break;
default:
break;
}
ticks = own_addr_pattern_fill_check(my_cpu, offset, stage == 0);
return ticks;
}