-
Notifications
You must be signed in to change notification settings - Fork 551
Expand file tree
/
Copy pathstatic_vars.cc
More file actions
219 lines (200 loc) · 9.17 KB
/
static_vars.cc
File metadata and controls
219 lines (200 loc) · 9.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
// Copyright 2019 The TCMalloc Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tcmalloc/static_vars.h"
#include <stddef.h>
#include <atomic>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/optimization.h"
#include "absl/types/span.h"
#include "tcmalloc/allocation_sample.h"
#include "tcmalloc/arena.h"
#include "tcmalloc/common.h"
#include "tcmalloc/cpu_cache.h"
#include "tcmalloc/deallocation_profiler.h"
#include "tcmalloc/experiment.h"
#include "tcmalloc/experiment_config.h"
#include "tcmalloc/guarded_page_allocator.h"
#include "tcmalloc/internal/atomic_stats_counter.h"
#include "tcmalloc/internal/cache_topology.h"
#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/environment.h"
#include "tcmalloc/internal/gwp_asan_state.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/mincore.h"
#include "tcmalloc/internal/numa.h"
#include "tcmalloc/internal/parameter_accessors.h"
#include "tcmalloc/internal/percpu.h"
#include "tcmalloc/internal/sampled_allocation.h"
#include "tcmalloc/internal/size_class_info.h"
#include "tcmalloc/internal/sysinfo.h"
#include "tcmalloc/internal/system_allocator.h"
#include "tcmalloc/malloc_extension.h"
#include "tcmalloc/metadata_object_allocator.h"
#include "tcmalloc/page_allocator.h"
#include "tcmalloc/pagemap.h"
#include "tcmalloc/parameters.h"
#include "tcmalloc/peak_heap_tracker.h"
#include "tcmalloc/sizemap.h"
#include "tcmalloc/span.h"
#include "tcmalloc/stack_trace_table.h"
#include "tcmalloc/thread_cache.h"
#include "tcmalloc/transfer_cache.h"
GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
// Cacheline-align our SizeMap and CpuCache. They both have very hot arrays as
// their first member variables, and aligning them reduces the number of cache
// lines these arrays use.
//
// IF YOU ADD TO THIS LIST, ADD TO STATIC_VAR_SIZE TOO!
// LINT.IfChange(static_vars)
ABSL_CONST_INIT absl::base_internal::SpinLock pageheap_lock(
absl::base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT Arena Static::arena_;
ABSL_CONST_INIT SizeMap ABSL_CACHELINE_ALIGNED Static::sizemap_;
TCMALLOC_ATTRIBUTE_NO_DESTROY ABSL_CONST_INIT TransferCacheManager
Static::transfer_cache_;
ABSL_CONST_INIT ShardedTransferCacheManager
Static::sharded_transfer_cache_(nullptr, nullptr);
ABSL_CONST_INIT CpuCache<Static> ABSL_CACHELINE_ALIGNED Static::cpu_cache_{
tc_globals};
ABSL_CONST_INIT MetadataObjectAllocator<SampledAllocation>
Static::sampledallocation_allocator_{arena_};
ABSL_CONST_INIT MetadataObjectAllocator<Span> Static::span_allocator_{arena_};
ABSL_CONST_INIT MetadataObjectAllocator<ThreadCache>
Static::threadcache_allocator_{arena_};
TCMALLOC_ATTRIBUTE_NO_DESTROY ABSL_CONST_INIT SampledAllocationRecorder
Static::sampled_allocation_recorder_{sampledallocation_allocator_};
ABSL_CONST_INIT tcmalloc_internal::StatsCounter Static::sampled_objects_size_;
ABSL_CONST_INIT tcmalloc_internal::StatsCounter
Static::sampled_internal_fragmentation_;
ABSL_CONST_INIT tcmalloc_internal::StatsCounter Static::total_sampled_count_;
ABSL_CONST_INIT AllocationSampleList Static::allocation_samples;
ABSL_CONST_INIT deallocationz::DeallocationProfilerList
Static::deallocation_samples;
ABSL_CONST_INIT std::atomic<int64_t> Static::sampled_alloc_handle_generator{0};
TCMALLOC_ATTRIBUTE_NO_DESTROY ABSL_CONST_INIT PeakHeapTracker
Static::peak_heap_tracker_{sampledallocation_allocator_};
ABSL_CONST_INIT MetadataObjectAllocator<StackTraceTable::LinkedSample>
Static::linked_sample_allocator_{arena_};
ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
ABSL_CONST_INIT std::atomic<bool> Static::cpu_cache_active_{false};
ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
ABSL_CONST_INIT PageMap Static::pagemap_;
ABSL_CONST_INIT GuardedPageAllocator Static::guardedpage_allocator_;
ABSL_CONST_INIT NumaTopology<kNumaPartitions, kNumBaseClasses>
Static::numa_topology_;
ABSL_CONST_INIT GwpAsanState Static::gwp_asan_state_;
ABSL_CONST_INIT Static::PerSizeClassCounts Static::per_size_class_counts_;
TCMALLOC_ATTRIBUTE_NO_DESTROY ABSL_CONST_INIT SystemAllocator<
NumaTopology<kNumaPartitions, kNumBaseClasses>, kNormalPartitions>
Static::system_allocator_{numa_topology_, kMinMmapAlloc};
// Force kInvalidSpan to be read-protected. Span contains a std::atomic, and
// libc++'s std::atomic implementation contains a mutable field in one of its
// implementation details. This prevents Span from being placed in a read-only
// section automatically, even though we will never mutate this particular
// instance.
ABSL_ATTRIBUTE_SECTION_VARIABLE(.data.rel.ro)
const Span Static::kInvalidSpan = {};
// LINT.ThenChange(:static_vars_size)
ABSL_CONST_INIT Static tc_globals;
size_t Static::metadata_bytes() {
// This is ugly and doesn't nicely account for e.g. alignment losses
// -- I'd like to put all the above in a struct and take that
// struct's size. But we can't due to linking issues.
//
// TODO(b/242550501): Progress on constant initialization guarantees allow
// state to be consolidated directly into an instance, rather than as a
// collection of static variables. Simplify this.
// LINT.IfChange(static_vars_size)
const size_t static_var_size =
sizeof(pageheap_lock) + sizeof(arena_) + sizeof(sizemap_) +
sizeof(sharded_transfer_cache_) + sizeof(transfer_cache_) +
sizeof(cpu_cache_) + sizeof(sampledallocation_allocator_) +
sizeof(span_allocator_) + +sizeof(threadcache_allocator_) +
sizeof(sampled_allocation_recorder_) + sizeof(linked_sample_allocator_) +
sizeof(inited_) + sizeof(cpu_cache_active_) + sizeof(page_allocator_) +
sizeof(pagemap_) + sizeof(sampled_objects_size_) +
sizeof(sampled_internal_fragmentation_) + sizeof(total_sampled_count_) +
sizeof(allocation_samples) + sizeof(deallocation_samples) +
sizeof(sampled_alloc_handle_generator) + sizeof(peak_heap_tracker_) +
sizeof(guardedpage_allocator_) + sizeof(numa_topology_) +
sizeof(CacheTopology::Instance()) + sizeof(gwp_asan_state_) +
sizeof(per_size_class_counts_) + sizeof(system_allocator_) +
sizeof(kInvalidSpan);
// LINT.ThenChange(:static_vars)
const size_t allocated = arena().stats().bytes_allocated +
AddressRegionFactory::InternalBytesAllocated();
return allocated + static_var_size;
}
size_t Static::pagemap_residence() {
// Determine residence of the root node of the pagemap.
size_t total = MInCore::residence(&pagemap_, sizeof(pagemap_));
return total;
}
int ABSL_ATTRIBUTE_WEAK default_want_legacy_size_classes();
SizeClassConfiguration Static::size_class_configuration() {
if (IsExperimentActive(Experiment::TEST_ONLY_TCMALLOC_POW2_SIZECLASS)) {
return SizeClassConfiguration::kPow2Only;
}
// TODO(b/242710633): remove this opt out.
if (default_want_legacy_size_classes != nullptr &&
default_want_legacy_size_classes() > 0) {
return SizeClassConfiguration::kLegacy;
}
const char* e = thread_safe_getenv("TCMALLOC_LEGACY_SIZE_CLASSES");
if (e == nullptr) {
return SizeClassConfiguration::kReuse;
} else if (!strcmp(e, "0")) {
return SizeClassConfiguration::kReuse;
} else {
TC_BUG("bad TCMALLOC_LEGACY_SIZE_CLASSES env var '%s'", e);
}
return SizeClassConfiguration::kReuse;
}
ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
PageHeapSpinLockHolder l;
// double-checked locking
if (!inited_.load(std::memory_order_acquire)) {
TC_CHECK(sizemap_.Init(SizeMap::CurrentClasses().classes));
// Verify we can determine the number of CPUs now, since we will need it
// later for per-CPU caches and initializing the cache topology.
if (ABSL_PREDICT_FALSE(!NumCPUsMaybe().has_value())) {
TCMalloc_Internal_SetPerCpuCachesEnabledNoBuildRequirement(false);
}
(void)subtle::percpu::IsFast();
numa_topology_.Init();
CacheTopology::Instance().Init();
if (IsExperimentActive(Experiment::TCMALLOC_PGHO_EXPERIMENT)) {
TCMalloc_Internal_SetMinHotAccessHint(/*v=*/2);
}
// Do a bit of sanitizing: make sure central_cache is aligned properly
TC_CHECK_EQ((sizeof(transfer_cache_) % ABSL_CACHELINE_SIZE), 0);
transfer_cache_.Init();
// The constructor of the sharded transfer cache leaves it in a disabled
// state.
sharded_transfer_cache_.Init();
new (page_allocator_.memory) PageAllocator;
pagemap_.MapRootWithSmallPages();
guardedpage_allocator_.Init(/*max_allocated_pages=*/64,
/*total_pages=*/128);
inited_.store(true, std::memory_order_release);
}
}
} // namespace tcmalloc_internal
} // namespace tcmalloc
GOOGLE_MALLOC_SECTION_END