-
Notifications
You must be signed in to change notification settings - Fork 124
Expand file tree
/
Copy pathmitigations.h
More file actions
269 lines (255 loc) · 9.24 KB
/
mitigations.h
File metadata and controls
269 lines (255 loc) · 9.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
#pragma once
#include "defines.h"
#include <cstddef>
namespace snmalloc
{
template<typename... Args>
inline SNMALLOC_FAST_PATH void
check_client_error(const char* const str, Args... args)
{
//[[clang::musttail]]
return snmalloc::report_fatal_error(str, args...);
}
template<typename... Args>
inline SNMALLOC_FAST_PATH void
check_client_impl(bool test, const char* const str, Args... args)
{
if (SNMALLOC_UNLIKELY(!test))
{
if constexpr (!DEBUG)
{
UNUSED(str, args...);
SNMALLOC_FAST_FAIL();
}
else
{
check_client_error(str, args...);
}
}
}
#ifdef SNMALLOC_CHECK_CLIENT
static constexpr bool CHECK_CLIENT = true;
#else
static constexpr bool CHECK_CLIENT = false;
#endif
namespace mitigation
{
class type
{
size_t mask;
public:
constexpr type(size_t f) : mask(f){};
constexpr type(const type& t) = default;
constexpr type operator+(const type b) const
{
return {mask | b.mask};
}
constexpr type operator-(const type b) const
{
return {mask & ~(b.mask)};
}
constexpr bool operator()(const type a) const
{
return (mask & a.mask) != 0;
}
};
} // namespace mitigation
/**
* Randomize the location of the pagemap within a larger address space
* allocation. The other pages in that allocation may fault if accessed, on
* platforms that can efficiently express such configurations.
*
* This guards against adversarial attempts to access the pagemap.
*
* This is unnecessary on StrictProvenance architectures.
*/
constexpr mitigation::type random_pagemap{1 << 0};
/**
* Ensure that every slab (especially slabs used for larger "small" size
* classes) has a larger minimum number of objects and that a larger
* percentage of objects in a slab must be free to awaken the slab.
*
* This should frustrate use-after-reallocation attacks by delaying reuse.
* When combined with random_preserve, below, it additionally ensures that at
* least some shuffling of free objects is possible, and, hence, that there
* is at least some unpredictability of reuse.
*
* TODO: should this be split? mjp: Would require changing some thresholds.
* The min waking count needs to be ensure we have enough objects on a slab,
* hence is related to the min count on a slab. Currently we without this, we
* have min count of slab of 16, and a min waking count with this enabled
* of 32. So we would leak memory.
*/
constexpr mitigation::type random_larger_thresholds{1 << 1};
/**
*
* Obfuscate forward-edge pointers in intra-slab free lists.
*
* This helps prevent a UAF write from re-pointing the free list arbitrarily,
* as the de-obfuscation of a corrupted pointer will generate a wild address.
*
* This is not available on StrictProvenance architectures.
*/
constexpr mitigation::type freelist_forward_edge{1 << 2};
/**
* Store obfuscated backward-edge addresses in intra-slab free lists.
*
* Ordinarily, these lists are singly-linked. Storing backward-edges allows
* the allocator to verify the well-formedness of the links and, importantly,
* the acyclicity of the list itself. These backward-edges are also
* obfuscated in an attempt to frustrate an attacker armed with UAF
* attempting to construct a new well-formed list.
*
* Because the backward-edges are not traversed, this is available on
* StrictProvenance architectures, unlike freelist_forward_edge.
*
* This is required to detect double frees as it will break the doubly linked
* nature of the free list.
*/
constexpr mitigation::type freelist_backward_edge{1 << 3};
/**
* When de-purposing a slab (releasing its address space for reuse at a
* different size class or allocation), walk the free list and validate the
* domestication of all nodes along it.
*
* If freelist_forward_edge is also enabled, this will probe the
* domestication status of the de-obfuscated pointers before traversal.
* Each of domestication and traversal may probabilistically catch UAF
* corruption of the free list.
*
* If freelist_backward_edge is also enabled, this will verify the integrity
* of the free list links.
*
* This gives the allocator "one last chance" to catch UAF corruption of a
* slab's free list before the slab is de-purposed.
*
* This is required to comprehensively detect double free.
*/
constexpr mitigation::type freelist_teardown_validate{1 << 4};
/**
* When initializing a slab, shuffle its free list.
*
* This guards against attackers relying on object-adjacency or address-reuse
* properties of the allocation stream.
*/
constexpr mitigation::type random_initial{1 << 5};
/**
* When a slab is operating, randomly assign freed objects to one of two
* intra-slab free lists. When selecting a slab's free list for allocations,
* select the longer of the two.
*
* This guards against attackers relying on object-adjacency or address-reuse
* properties of the allocation stream.
*/
constexpr mitigation::type random_preserve{1 << 6};
/**
* Randomly introduce another slab for a given size-class, rather than use
* the last available to an allocator.
*
* This guards against attackers relying on address-reuse, especially in the
* pathological case of a size-class having only one slab with free entries.
*/
constexpr mitigation::type random_extra_slab{1 << 7};
/**
* Use a LIFO queue, rather than a stack, of slabs with free elements.
*
* This generally increases the time between address reuse.
*/
constexpr mitigation::type reuse_LIFO{1 << 8};
/**
* This performs a variety of inexpensive "sanity" tests throughout the
* allocator:
*
* - Requests to free objects must
* - not be interior pointers
* - be of allocated address space
* - Requests to free objects which also specify the size must specify a size
* that agrees with the current allocation.
*
* This guards gainst various forms of client misbehavior.
*
* TODO: Should this be split? mjp: It could, but let's not do this until
* we have performance numbers to see what this costs.
*/
constexpr mitigation::type sanity_checks{1 << 9};
/**
* On CHERI, perform a series of well-formedness tests on capabilities given
* when requesting to free an object.
*/
constexpr mitigation::type cheri_checks{1 << 10};
/**
* Erase intra-slab free list metadata before completing an allocation.
*
* This mitigates information disclosure.
*/
constexpr mitigation::type clear_meta{1 << 11};
/**
* Protect meta data blocks by allocating separate from chunks for
* user allocations. This involves leaving gaps in address space.
* This is less efficient, so should only be applied for the checked
* build.
*/
constexpr mitigation::type metadata_protection{1 << 12};
/**
* If this mitigation is enabled, then Pal implementations should provide
* exceptions/segfaults if accesses do not obey the
* - using
* - using_readonly
* - not_using
* model.
*/
static constexpr mitigation::type pal_enforce_access{1 << 13};
/**
* If this mitigation is enabled, then deallocations are
* scrubbed before reallocation. This prevents data leaks
* by looking into uninitialised memory.
*/
static constexpr mitigation::type scrub_free{1 << 14};
constexpr mitigation::type full_checks = random_pagemap +
random_larger_thresholds + freelist_forward_edge + freelist_backward_edge +
freelist_teardown_validate + random_initial + random_preserve +
metadata_protection + random_extra_slab + reuse_LIFO + sanity_checks +
clear_meta + pal_enforce_access;
constexpr mitigation::type no_checks{0};
using namespace mitigation;
constexpr mitigation::type mitigations =
#ifdef SNMALLOC_CHECK_CLIENT_MITIGATIONS
no_checks + SNMALLOC_CHECK_CLIENT_MITIGATIONS;
#elif defined(OPEN_ENCLAVE)
/**
* On Open Enclave the address space is limited, so we disable
* metadata-protection feature.
*/
CHECK_CLIENT ? full_checks - metadata_protection - random_pagemap :
no_checks;
#elif defined(__NetBSD__)
/**
* pal_enforce_access was failing on NetBSD, so we disable it.
*/
CHECK_CLIENT ? full_checks - pal_enforce_access : no_checks;
#elif defined(__CHERI_PURE_CAPABILITY__)
CHECK_CLIENT ?
/**
* freelist_forward_edge should not be used on CHERI as we cannot encode
* pointers as the tag will be destroyed.
*
* TODO: There is a known bug in CheriBSD that means round-tripping through
* PROT_NONE sheds capability load and store permissions (while restoring
* data read/write, for added excitement). For the moment, just force this
* down on CHERI.
*/
full_checks + cheri_checks + clear_meta - freelist_forward_edge -
pal_enforce_access :
/**
* clear_meta is important on CHERI to avoid leaking capabilities.
*/
sanity_checks + cheri_checks + clear_meta;
#else
CHECK_CLIENT ? full_checks : no_checks;
#endif
} // namespace snmalloc
#define snmalloc_check_client(mitigation, test, str, ...) \
if constexpr (mitigation) \
{ \
snmalloc::check_client_impl(test, str, ##__VA_ARGS__); \
}