memory_manager_x64.cpp
Go to the documentation of this file.
1 //
2 // Bareflank Hypervisor
3 //
4 // Copyright (C) 2015 Assured Information Security, Inc.
5 // Author: Rian Quinn <quinnr@ainfosec.com>
6 // Author: Brendan Kerrigan <kerriganb@ainfosec.com>
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2.1 of the License, or (at your option) any later version.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 
22 #include <gsl/gsl>
23 
24 #include <constants.h>
25 #include <guard_exceptions.h>
30 
31 #include <intrinsics/x64.h>
32 using namespace x64;
33 
34 // -----------------------------------------------------------------------------
35 // Global Memory
36 // -----------------------------------------------------------------------------
37 
39 
40 uint8_t g_heap_pool_owner[MAX_HEAP_POOL] __attribute__((aligned(page_size))) = {};
41 uint8_t g_page_pool_owner[MAX_PAGE_POOL] __attribute__((aligned(page_size))) = {};
42 
44 
45 // -----------------------------------------------------------------------------
46 // Mutexes
47 // -----------------------------------------------------------------------------
48 
49 #include <mutex>
50 std::mutex g_add_md_mutex;
51 
52 // -----------------------------------------------------------------------------
53 // Implementation
54 // -----------------------------------------------------------------------------
55 
58 {
59  // [[ensures ret: ret != nullptr]]
60 
61  static memory_manager_x64 self;
62  return &self;
63 }
64 
67 {
68  if (size == 0)
69  return nullptr;
70 
71  try
72  {
73  if (lower(size) == 0)
74  return reinterpret_cast<pointer>(g_page_pool.alloc(size));
75 
76  return reinterpret_cast<pointer>(g_heap_pool.alloc(size));
77  }
78  catch (...)
79  { }
80 
81  return nullptr;
82 }
83 
86 {
87  if (size == 0)
88  return nullptr;
89 
90  try
91  {
92  return reinterpret_cast<pointer>(g_mem_map_pool.alloc(size));
93  }
94  catch (...)
95  { }
96 
97  return nullptr;
98 }
99 
100 void
102 {
103  auto uintptr = reinterpret_cast<integer_pointer>(ptr);
104 
105  if (g_heap_pool.contains(uintptr))
106  return g_heap_pool.free(uintptr);
107 
108  if (g_page_pool.contains(uintptr))
109  return g_page_pool.free(uintptr);
110 }
111 
112 void
114 {
115  auto uintptr = reinterpret_cast<integer_pointer>(ptr);
116 
117  if (g_mem_map_pool.contains(uintptr))
118  return g_mem_map_pool.free(uintptr);
119 }
120 
123 {
124  auto uintptr = reinterpret_cast<integer_pointer>(ptr);
125 
126  if (g_heap_pool.contains(uintptr))
127  return g_heap_pool.size(uintptr);
128 
129  if (g_page_pool.contains(uintptr))
130  return g_page_pool.size(uintptr);
131 
132  return 0;
133 }
134 
137 {
138  auto uintptr = reinterpret_cast<integer_pointer>(ptr);
139 
140  if (g_mem_map_pool.contains(uintptr))
141  return g_mem_map_pool.size(uintptr);
142 
143  return 0;
144 }
145 
148 {
149  // [[ensures ret: ret != 0]]
150  expects(virt != 0);
151 
152  std::lock_guard<std::mutex> guard(g_add_md_mutex);
153  return upper(m_virt_to_phys_map.at(upper(virt))) | lower(virt);
154 }
155 
158 { return this->virtint_to_physint(reinterpret_cast<integer_pointer>(virt)); }
159 
162 { return reinterpret_cast<pointer>(this->virtint_to_physint(virt)); }
163 
166 { return reinterpret_cast<pointer>(this->virtptr_to_physint(virt)); }
167 
170 {
171  // [[ensures ret: ret != 0]]
172  expects(phys != 0);
173 
174  std::lock_guard<std::mutex> guard(g_add_md_mutex);
175  return upper(m_phys_to_virt_map.at(upper(phys))) | lower(phys);
176 }
177 
180 { return this->physint_to_virtint(reinterpret_cast<integer_pointer>(phys)); }
181 
184 { return reinterpret_cast<pointer>(this->physint_to_virtint(phys)); }
185 
188 { return reinterpret_cast<pointer>(this->physptr_to_virtint(phys)); }
189 
192 {
193  expects(virt != 0);
194 
195  std::lock_guard<std::mutex> guard(g_add_md_mutex);
196  return m_virt_to_attr_map.at(upper(virt));
197 }
198 
201 { return this->virtint_to_attrint(reinterpret_cast<integer_pointer>(virt)); }
202 
203 void
205 {
206  auto ___ = gsl::on_failure([&]
207  {
208  std::lock_guard<std::mutex> guard(g_add_md_mutex);
209 
210  m_virt_to_phys_map.erase(virt);
211  m_phys_to_virt_map.erase(phys);
212  m_virt_to_attr_map.erase(virt);
213  });
214 
215  expects(attr != 0);
216  expects(lower(virt) == 0);
217  expects(lower(phys) == 0);
218 
219  {
220  std::lock_guard<std::mutex> guard(g_add_md_mutex);
221 
222  m_virt_to_phys_map[virt] = phys;
223  m_phys_to_virt_map[phys] = virt;
224  m_virt_to_attr_map[virt] = attr;
225  }
226 }
227 
228 void
230 {
231  integer_pointer phys;
232 
233  if (virt == 0)
234  {
235  bferror << "remove_md: virt == 0" << bfendl;
236  return;
237  }
238 
239  if (lower(virt) != 0)
240  {
241  bferror << "remove_md: lower(virt) != 0" << bfendl;
242  return;
243  }
244 
245  guard_exceptions([&]
246  {
247  phys = virtint_to_physint(virt);
248  std::lock_guard<std::mutex> guard(g_add_md_mutex);
249 
250  m_virt_to_phys_map.erase(virt);
251  m_phys_to_virt_map.erase(phys);
252  m_virt_to_attr_map.erase(virt);
253  });
254 }
255 
258 {
260  std::lock_guard<std::mutex> guard(g_add_md_mutex);
261 
262  for (const auto &p : m_virt_to_phys_map)
263  {
264  auto virt = p.first;
265  auto phys = p.second;
266  auto attr = m_virt_to_attr_map.at(virt);
267 
268  list.push_back({phys, virt, attr});
269  }
270 
271  return list;
272 }
273 
274 memory_manager_x64::memory_manager_x64() noexcept :
275  g_heap_pool(reinterpret_cast<uintptr_t>(g_heap_pool_owner)),
276  g_page_pool(reinterpret_cast<uintptr_t>(g_page_pool_owner)),
277  g_mem_map_pool(MEM_MAP_POOL_START)
278 { }
279 
281 memory_manager_x64::lower(integer_pointer ptr) const noexcept
282 { return ptr & (page_size - 1); }
283 
285 memory_manager_x64::upper(integer_pointer ptr) const noexcept
286 { return ptr & ~(page_size - 1); }
287 
288 extern "C" int64_t
290 {
292  {
293  expects(md);
294 
295  auto &&virt = reinterpret_cast<memory_manager_x64::integer_pointer>(md->virt);
296  auto &&phys = reinterpret_cast<memory_manager_x64::integer_pointer>(md->phys);
297  auto &&type = reinterpret_cast<memory_manager_x64::attr_type>(md->type);
298 
299  g_mm->add_md(virt, phys, type);
300  });
301 }
302 
303 #ifdef CROSS_COMPILED
304 
305 extern "C" void *
306 _malloc_r(struct _reent *, size_t size)
307 { return g_mm->alloc(size); }
308 
309 extern "C" void
310 _free_r(struct _reent *, void *ptr)
311 { g_mm->free(ptr); }
312 
313 extern "C" void *
314 _calloc_r(struct _reent *, size_t nmemb, size_t size)
315 {
316  if (auto ptr = g_mm->alloc(nmemb * size))
317  return __builtin_memset(ptr, 0, nmemb * size);
318 
319  return nullptr;
320 }
321 
322 extern "C" void *
323 _realloc_r(struct _reent *, void *ptr, size_t size)
324 {
325  auto old_sze = g_mm->size(ptr);
326  auto new_ptr = g_mm->alloc(size);
327 
328  if (!new_ptr || old_sze == 0)
329  return nullptr;
330 
331  if (ptr)
332  {
333  __builtin_memcpy(new_ptr, ptr, size > old_sze ? old_sze : size);
334  g_mm->free(ptr);
335  }
336 
337  return new_ptr;
338 }
339 
340 #endif
virtual pointer alloc_map(size_type size) noexcept
virtual pointer alloc(size_type size) noexcept
virtual void add_md(integer_pointer virt, integer_pointer phys, attr_type attr)
expects(rbx)
virtual integer_pointer virtint_to_physint(integer_pointer virt) const
virtual void free(pointer ptr) noexcept
auto upper(T ptr) noexcept
Definition: upper_lower.h:55
#define MEMORY_MANAGER_FAILURE
Definition: error_codes.h:99
#define MAX_PAGE_POOL
Definition: constants.h:142
virtual pointer physint_to_virtptr(integer_pointer phys) const
std::vector< memory_descriptor > memory_descriptor_list
virtual integer_pointer virtptr_to_physint(pointer virt) const
virtual size_type size_map(pointer ptr) const noexcept
virtual attr_type virtptr_to_attrint(pointer virt) const
constexpr const auto size
void uint64_t uint64_t uint64_t *rdx noexcept
virtual integer_pointer physptr_to_virtint(pointer phys) const
#define MAX_HEAP_POOL
Definition: constants.h:130
int64_t add_md(struct memory_descriptor *md) noexcept
static memory_manager_x64 * instance() noexcept
decltype(memory_descriptor::type) attr_type
constexpr page_table_x64::integer_pointer virt
virtual size_type size(pointer ptr) const noexcept
uintptr_t integer_pointer
Definition: cache_x64.h:36
virtual pointer virtint_to_physptr(integer_pointer virt) const
#define g_mm
virtual integer_pointer physint_to_virtint(integer_pointer phys) const
constexpr const auto page_size
Definition: x64.h:35
virtual memory_descriptor_list descriptors() const
virtual pointer physptr_to_virtptr(pointer phys) const
E guard_exceptions(E error_code, T func)
__attribute__((constructor)) static void ctor_func()
Definition: dummy_misc.cpp:64
std::mutex g_add_md_mutex
virtual void remove_md(integer_pointer virt) noexcept
auto lower(T ptr) noexcept
Definition: upper_lower.h:36
virtual void free_map(pointer ptr) noexcept
virtual attr_type virtint_to_attrint(integer_pointer virt) const
#define MEM_MAP_POOL_START
Definition: constants.h:166
Definition: cache_x64.h:31
virtual pointer virtptr_to_physptr(pointer virt) const
uintptr_t virtptr_to_physint(void *ptr)
Definition: test.cpp:173