Skip to content

Commit 93c950e

Browse files
committed
add mappedfile linear resize tests
1 parent 84f27ff commit 93c950e

1 file changed

Lines changed: 389 additions & 1 deletion

File tree

benchmark.cpp

Lines changed: 389 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -448,4 +448,392 @@ TEST(WriteMemory, SequentialBlocks) {
448448
for (int32_t i = 0; i < numBlocksToWrite; ++i)
449449
std::ranges::fill(f.createArray<int32_t>(numIntsPerBlock), i);
450450
});
451-
}
451+
}
452+
453+
TEST(MapFile, LinearResize) {
454+
const TmpFile mappedFile("tmp.dat");
455+
constexpr size_t reservedAddressSpace = 1024 * 1024 * 1024;
456+
constexpr size_t mapAddressSpace = 1024 * 1024;
457+
constexpr size_t stepSize = 1009;
458+
printf("Mapping %zu bytes in increments of %zu bytes\n", mapAddressSpace, stepSize);
459+
nb::Bench()
460+
.minEpochTime(std::chrono::milliseconds(50))
461+
.maxEpochTime(std::chrono::seconds(3))
462+
.minEpochIterations(3)
463+
//.warmup(1)
464+
.relative(true)
465+
#ifdef _WIN32
466+
.run("MapViewOfFile (slow; remap whole file)",
467+
[&] {
468+
size_t pageSize = []() {
469+
SYSTEM_INFO info;
470+
GetSystemInfo(&info);
471+
return info.dwPageSize;
472+
}();
473+
474+
HANDLE hFile = CreateFileW(mappedFile.path.generic_wstring().c_str(),
475+
GENERIC_READ | GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
476+
FILE_ATTRIBUTE_NORMAL, nullptr);
477+
CHECK(hFile != INVALID_HANDLE_VALUE);
478+
size_t mappedBytes = 0;
479+
size_t mappedPages = 0;
480+
HANDLE hMap = nullptr;
481+
LPVOID mapped = nullptr;
482+
for (; mappedBytes + stepSize <= mapAddressSpace;) {
483+
if (mappedBytes + stepSize > mappedPages * pageSize) {
484+
mappedPages++;
485+
if (mapped) {
486+
CHECK(UnmapViewOfFile(mapped));
487+
CloseHandle(hMap);
488+
}
489+
{
490+
LARGE_INTEGER liSize;
491+
liSize.QuadPart = mappedPages * pageSize;
492+
CHECK(SetFilePointerEx(hFile, liSize, nullptr, FILE_BEGIN));
493+
CHECK(SetEndOfFile(hFile));
494+
}
495+
hMap = CreateFileMappingW(hFile, nullptr, PAGE_READWRITE, 0, 0, nullptr);
496+
CHECK(hMap != nullptr);
497+
mapped = MapViewOfFile(hMap, FILE_MAP_WRITE, 0, 0, mappedPages * pageSize);
498+
CHECK(mapped != nullptr);
499+
ankerl::nanobench::doNotOptimizeAway(mapped);
500+
}
501+
mappedBytes += stepSize;
502+
ankerl::nanobench::doNotOptimizeAway(mappedBytes);
503+
}
504+
CHECK(UnmapViewOfFile(mapped));
505+
506+
CloseHandle(hMap);
507+
CloseHandle(hFile);
508+
})
509+
.run("MapViewOfFile (cheat: whole file up-front)",
510+
[&] {
511+
size_t pageSize = []() {
512+
SYSTEM_INFO info;
513+
GetSystemInfo(&info);
514+
return info.dwPageSize;
515+
}();
516+
517+
HANDLE hFile = CreateFileW(mappedFile.path.generic_wstring().c_str(),
518+
GENERIC_READ | GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
519+
FILE_ATTRIBUTE_NORMAL, nullptr);
520+
CHECK(hFile != INVALID_HANDLE_VALUE);
521+
522+
LARGE_INTEGER liSize;
523+
liSize.QuadPart = mapAddressSpace;
524+
CHECK(SetFilePointerEx(hFile, liSize, nullptr, FILE_BEGIN));
525+
CHECK(SetEndOfFile(hFile));
526+
HANDLE hMap = CreateFileMappingW(hFile, nullptr, PAGE_READWRITE, 0, 0, nullptr);
527+
528+
CHECK(hMap != nullptr);
529+
size_t mappedBytes = 0;
530+
size_t mappedPages = 0;
531+
LPVOID mapped = nullptr;
532+
for (; mappedBytes + stepSize <= mapAddressSpace;) {
533+
if (mappedBytes + stepSize > mappedPages * pageSize) {
534+
mappedPages++;
535+
if (mapped)
536+
CHECK(UnmapViewOfFile(mapped));
537+
mapped = MapViewOfFile(hMap, FILE_MAP_WRITE, 0, 0, mappedPages * pageSize);
538+
CHECK(mapped != nullptr);
539+
ankerl::nanobench::doNotOptimizeAway(mapped);
540+
}
541+
mappedBytes += stepSize;
542+
ankerl::nanobench::doNotOptimizeAway(mappedBytes);
543+
}
544+
CHECK(UnmapViewOfFile(mapped));
545+
546+
CloseHandle(hMap);
547+
CloseHandle(hFile);
548+
})
549+
#else
550+
.run("mmap",
551+
[&] {
552+
size_t pageSize = sysconf(_SC_PAGESIZE);
553+
int f = open(mappedFile.path.c_str(), O_RDWR | O_CREAT | O_TRUNC, (mode_t)0600);
554+
size_t mappedBytes = 0;
555+
size_t mappedPages = 0;
556+
void* mappedEnd = nullptr;
557+
for (; mappedBytes + stepSize <= mapAddressSpace;) {
558+
if (mappedBytes + stepSize > mappedPages * pageSize) {
559+
size_t allocPages =
560+
(mappedBytes + stepSize + pageSize - 1) / pageSize - mappedPages;
561+
CHECK(ftruncate(f, pages * pageSize) == 0);
562+
auto mapped =
563+
mmap(mappedEnd,
564+
allocPages * pageSize,
565+
PROT_WRITE,
566+
MAP_SHARED, f, 0);
567+
CHECK(mapped != nullptr)
568+
mappedEnd = (std::byte*)mapped + allocPages * pageSize;
569+
mappedPages += allocPages;
570+
ankerl::nanobench::doNotOptimizeAway(mapped);
571+
}
572+
mappedBytes += stepSize;
573+
ankerl::nanobench::doNotOptimizeAway(mappedBytes);
574+
}
575+
CHECK(munmap(reserved, reservedAddressSpace) == 0);
576+
})
577+
#endif
578+
.run("resizable_file", [&] {
579+
decodeless::resizable_file m(mappedFile.path, reservedAddressSpace);
580+
for (; m.size() + stepSize <= mapAddressSpace;) {
581+
m.resize(m.size() + stepSize);
582+
ankerl::nanobench::doNotOptimizeAway(m.size());
583+
ankerl::nanobench::doNotOptimizeAway(m.data());
584+
}
585+
});
586+
}
587+
588+
TEST(MapFile, LinearResizePages) {
589+
const TmpFile mappedFile("tmp.dat");
590+
constexpr size_t reservedAddressSpace = 1024 * 1024 * 1024;
591+
constexpr size_t mapAddressSpace = 1024 * 1024;
592+
#ifdef _WIN32
593+
size_t pageSize = []() {
594+
SYSTEM_INFO info;
595+
GetSystemInfo(&info);
596+
return info.dwPageSize;
597+
}();
598+
#else
599+
size_t pageSize = sysconf(_SC_PAGESIZE);
600+
#endif
601+
printf("Mapping %zu bytes in increments of %zu bytes\n", mapAddressSpace, pageSize);
602+
nb::Bench()
603+
.minEpochTime(std::chrono::milliseconds(50))
604+
.maxEpochTime(std::chrono::seconds(3))
605+
.minEpochIterations(3)
606+
//.warmup(1)
607+
.relative(true)
608+
#ifdef _WIN32
609+
.run("MapViewOfFile (slow; remap whole file)",
610+
[&] {
611+
size_t pageSize = []() {
612+
SYSTEM_INFO info;
613+
GetSystemInfo(&info);
614+
return info.dwPageSize;
615+
}();
616+
617+
HANDLE hFile = CreateFileW(mappedFile.path.generic_wstring().c_str(),
618+
GENERIC_READ | GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
619+
FILE_ATTRIBUTE_NORMAL, nullptr);
620+
CHECK(hFile != INVALID_HANDLE_VALUE);
621+
for (size_t pages = 1; pages * pageSize <= mapAddressSpace; ++pages) {
622+
LARGE_INTEGER liSize;
623+
liSize.QuadPart = pages * pageSize;
624+
CHECK(SetFilePointerEx(hFile, liSize, nullptr, FILE_BEGIN));
625+
CHECK(SetEndOfFile(hFile));
626+
HANDLE hMap =
627+
CreateFileMappingW(hFile, nullptr, PAGE_READWRITE, 0, 0, nullptr);
628+
CHECK(hMap != nullptr);
629+
auto mapped =
630+
(int32_t*)MapViewOfFile(hMap, FILE_MAP_WRITE, 0, 0, pages * pageSize);
631+
CHECK(mapped != nullptr);
632+
ankerl::nanobench::doNotOptimizeAway(mapped);
633+
CHECK(UnmapViewOfFile(mapped));
634+
CloseHandle(hMap);
635+
}
636+
CloseHandle(hFile);
637+
})
638+
.run("MapViewOfFile (cheat: whole file up-front)",
639+
[&] {
640+
size_t pageSize = []() {
641+
SYSTEM_INFO info;
642+
GetSystemInfo(&info);
643+
return info.dwPageSize;
644+
}();
645+
646+
HANDLE hFile = CreateFileW(mappedFile.path.generic_wstring().c_str(),
647+
GENERIC_READ | GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
648+
FILE_ATTRIBUTE_NORMAL, nullptr);
649+
CHECK(hFile != INVALID_HANDLE_VALUE);
650+
651+
LARGE_INTEGER liSize;
652+
liSize.QuadPart = mapAddressSpace;
653+
CHECK(SetFilePointerEx(hFile, liSize, nullptr, FILE_BEGIN));
654+
CHECK(SetEndOfFile(hFile));
655+
HANDLE hMap = CreateFileMappingW(hFile, nullptr, PAGE_READWRITE, 0, 0, nullptr);
656+
CHECK(hMap != nullptr);
657+
658+
for (size_t pages = 1; pages * pageSize <= mapAddressSpace; ++pages) {
659+
auto mapped =
660+
(int32_t*)MapViewOfFile(hMap, FILE_MAP_WRITE, 0, 0, pages * pageSize);
661+
CHECK(mapped != nullptr);
662+
ankerl::nanobench::doNotOptimizeAway(mapped);
663+
CHECK(UnmapViewOfFile(mapped));
664+
}
665+
666+
CloseHandle(hMap);
667+
CloseHandle(hFile);
668+
})
669+
#else
670+
.run("mmap",
671+
[&] {
672+
size_t pageSize = sysconf(_SC_PAGESIZE);
673+
int f = open(mappedFile.path.c_str(), O_RDWR | O_CREAT | O_TRUNC, (mode_t)0600);
674+
size_t pages;
675+
for (pages = 1; pages * pageSize <= mapAddressSpace; ++pages) {
676+
CHECK(ftruncate(f, pages * pageSize) == 0);
677+
auto mapped = (int32_t*)mmap((pages - 1) * pageSize, pageSize, PROT_WRITE,
678+
MAP_SHARED, f, 0);
679+
ankerl::nanobench::doNotOptimizeAway(mapped);
680+
}
681+
CHECK(fsync(f) == 0);
682+
CHECK(munmap(mapped, (pages - 1) * pageSize) == 0);
683+
})
684+
#endif
685+
.run("resizable_file", [&] {
686+
decodeless::resizable_file m(mappedFile.path, reservedAddressSpace);
687+
for (; m.size() + pageSize <= mapAddressSpace;) {
688+
m.resize(m.size() + pageSize);
689+
ankerl::nanobench::doNotOptimizeAway(m.size());
690+
ankerl::nanobench::doNotOptimizeAway(m.data());
691+
}
692+
});
693+
}
694+
695+
TEST(MapMemory, LinearResize) {
696+
constexpr size_t reservedAddressSpace = 1024 * 1024 * 1024;
697+
constexpr size_t mapAddressSpace = 1024 * 1024;
698+
constexpr size_t stepSize = 1009;
699+
printf("Mapping %zu bytes in increments of %zu bytes\n", mapAddressSpace, stepSize);
700+
nb::Bench()
701+
.minEpochTime(std::chrono::milliseconds(1000))
702+
.maxEpochTime(std::chrono::seconds(3))
703+
.minEpochIterations(3)
704+
//.warmup(1)
705+
.relative(true)
706+
#ifdef _WIN32
707+
.run("VirtualAlloc",
708+
[&] {
709+
size_t pageSize = []() {
710+
SYSTEM_INFO info;
711+
GetSystemInfo(&info);
712+
return info.dwPageSize;
713+
}();
714+
uint32_t* reserved =
715+
(uint32_t*)VirtualAlloc(0, reservedAddressSpace, MEM_RESERVE, PAGE_NOACCESS);
716+
size_t mappedBytes = 0;
717+
size_t mappedPages = 0;
718+
for (; mappedBytes + stepSize <= mapAddressSpace;) {
719+
if (mappedBytes + stepSize > mappedPages * pageSize) {
720+
size_t allocPages =
721+
(mappedBytes + stepSize + pageSize - 1) / pageSize - mappedPages;
722+
std::ignore =
723+
VirtualAlloc(((std::byte*)reserved) + mappedPages * pageSize,
724+
allocPages * pageSize, MEM_COMMIT, PAGE_READWRITE);
725+
mappedPages += allocPages;
726+
}
727+
mappedBytes += stepSize;
728+
// do nothing with the memory
729+
ankerl::nanobench::doNotOptimizeAway(mappedBytes);
730+
ankerl::nanobench::doNotOptimizeAway(reserved);
731+
}
732+
CHECK(VirtualFree(reserved, 0, MEM_RELEASE));
733+
})
734+
#else
735+
.run("mmap",
736+
[&] {
737+
size_t pageSize = sysconf(_SC_PAGESIZE);
738+
uint32_t* reserved =
739+
(uint32_t*)mmap(nullptr, reservedAddressSpace, PROT_NONE,
740+
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
741+
CHECK(reserved != MAP_FAILED);
742+
size_t mappedBytes = 0;
743+
size_t mappedPages = 0;
744+
for (; mappedBytes + stepSize <= mapAddressSpace;) {
745+
if (mappedBytes + stepSize > mappedPages * pageSize) {
746+
size_t allocPages =
747+
(mappedBytes + stepSize + pageSize - 1) / pageSize - mappedPages;
748+
CHECK(mprotect((std::byte*)reserved + mappedPages * pageSize,
749+
allocPages * pageSize,
750+
PROT_READ | PROT_WRITE) == 0);
751+
mappedPages += allocPages;
752+
}
753+
mappedBytes += stepSize;
754+
// do nothing with the memory
755+
ankerl::nanobench::doNotOptimizeAway(mappedBytes);
756+
ankerl::nanobench::doNotOptimizeAway(reserved);
757+
}
758+
CHECK(munmap(reserved, reservedAddressSpace) == 0);
759+
})
760+
#endif
761+
.run("resizable_memory", [&] {
762+
decodeless::resizable_memory m(0, reservedAddressSpace);
763+
for (; m.size() + stepSize <= mapAddressSpace;) {
764+
m.resize(m.size() + stepSize);
765+
ankerl::nanobench::doNotOptimizeAway(m.size());
766+
ankerl::nanobench::doNotOptimizeAway(m.data());
767+
}
768+
});
769+
}
770+
771+
TEST(MapMemory, LinearResizePages) {
772+
constexpr size_t reservedAddressSpace = 1024 * 1024 * 1024;
773+
constexpr size_t mapAddressSpace = 1024 * 1024;
774+
#ifdef _WIN32
775+
size_t pageSize = []() {
776+
SYSTEM_INFO info;
777+
GetSystemInfo(&info);
778+
return info.dwPageSize;
779+
}();
780+
#else
781+
size_t pageSize = sysconf(_SC_PAGESIZE);
782+
#endif
783+
printf("Mapping %zu bytes in increments of %zu bytes\n", mapAddressSpace, pageSize);
784+
nb::Bench()
785+
.minEpochTime(std::chrono::milliseconds(1000))
786+
.maxEpochTime(std::chrono::seconds(3))
787+
.minEpochIterations(3)
788+
//.warmup(1)
789+
.relative(true)
790+
#ifdef _WIN32
791+
.run("VirtualAlloc",
792+
[&] {
793+
size_t pageSize = []() {
794+
SYSTEM_INFO info;
795+
GetSystemInfo(&info);
796+
return info.dwPageSize;
797+
}();
798+
uint32_t* reserved =
799+
(uint32_t*)VirtualAlloc(0, reservedAddressSpace, MEM_RESERVE, PAGE_NOACCESS);
800+
size_t mappedPages = 0;
801+
for (; mappedPages * pageSize <= mapAddressSpace;) {
802+
std::ignore = VirtualAlloc(((std::byte*)reserved) + mappedPages * pageSize,
803+
pageSize, MEM_COMMIT, PAGE_READWRITE);
804+
mappedPages++;
805+
// do nothing with the memory
806+
ankerl::nanobench::doNotOptimizeAway(mappedPages);
807+
ankerl::nanobench::doNotOptimizeAway(reserved);
808+
}
809+
CHECK(VirtualFree(reserved, 0, MEM_RELEASE));
810+
})
811+
#else
812+
.run("mmap",
813+
[&] {
814+
size_t pageSize = sysconf(_SC_PAGESIZE);
815+
uint32_t* reserved =
816+
(uint32_t*)mmap(nullptr, reservedAddressSpace, PROT_NONE,
817+
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
818+
CHECK(reserved != MAP_FAILED);
819+
size_t mappedPages = 0;
820+
for (; mappedPages * pageSize <= mapAddressSpace;) {
821+
CHECK(mprotect((std::byte*)reserved + mappedPages * pageSize, pageSize,
822+
PROT_READ | PROT_WRITE) == 0);
823+
mappedPages++;
824+
// do nothing with the memory
825+
ankerl::nanobench::doNotOptimizeAway(mappedPages);
826+
ankerl::nanobench::doNotOptimizeAway(reserved);
827+
}
828+
CHECK(munmap(reserved, reservedAddressSpace) == 0);
829+
})
830+
#endif
831+
.run("resizable_memory", [&] {
832+
decodeless::resizable_memory m(0, reservedAddressSpace);
833+
for (; m.size() + pageSize <= mapAddressSpace;) {
834+
m.resize(m.size() + pageSize);
835+
ankerl::nanobench::doNotOptimizeAway(m.size());
836+
ankerl::nanobench::doNotOptimizeAway(m.data());
837+
}
838+
});
839+
}

0 commit comments

Comments
 (0)