VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c@ 18250

Last change on this file since 18250 was 18250, checked in by vboxsync, 16 years ago

enable vm_insert_page() for Linux 2.6.23+

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 38.3 KB
Line 
1/* $Revision: 18250 $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-linux-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/alloc.h>
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include "internal/memobj.h"
44
45/* early 2.6 kernels */
46#ifndef PAGE_SHARED_EXEC
47# define PAGE_SHARED_EXEC PAGE_SHARED
48#endif
49#ifndef PAGE_READONLY_EXEC
50# define PAGE_READONLY_EXEC PAGE_READONLY
51#endif
52
53/*
54 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
55 * track_pfn_vma_new() is apparently not defined for non-RAM pages.
56 * It should be safe to use vm_insert_page() older kernels as well.
57 */
58#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
59# define VBOX_USE_INSERT_PAGE
60#endif
61
62/*******************************************************************************
63* Structures and Typedefs *
64*******************************************************************************/
65/**
66 * The Darwin version of the memory object structure.
67 */
68typedef struct RTR0MEMOBJLNX
69{
70 /** The core structure. */
71 RTR0MEMOBJINTERNAL Core;
72 /** Set if the allocation is contiguous.
73 * This means it has to be given back as one chunk. */
74 bool fContiguous;
75 /** Set if we've vmap'ed thed memory into ring-0. */
76 bool fMappedToRing0;
77 /** The pages in the apPages array. */
78 size_t cPages;
79 /** Array of struct page pointers. (variable size) */
80 struct page *apPages[1];
81} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
82
83
84/**
85 * Helper that converts from a RTR0PROCESS handle to a linux task.
86 *
87 * @returns The corresponding Linux task.
88 * @param R0Process IPRT ring-0 process handle.
89 */
90struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process)
91{
92 /** @todo fix rtR0ProcessToLinuxTask!! */
93 return R0Process == RTR0ProcHandleSelf() ? current : NULL;
94}
95
96
97/**
98 * Compute order. Some functions allocate 2^order pages.
99 *
100 * @returns order.
101 * @param cPages Number of pages.
102 */
103static int rtR0MemObjLinuxOrder(size_t cPages)
104{
105 int iOrder;
106 size_t cTmp;
107
108 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
109 ;
110 if (cPages & ~((size_t)1 << iOrder))
111 ++iOrder;
112
113 return iOrder;
114}
115
116
117/**
118 * Converts from RTMEM_PROT_* to Linux PAGE_*.
119 *
120 * @returns Linux page protection constant.
121 * @param fProt The IPRT protection mask.
122 * @param fKernel Whether it applies to kernel or user space.
123 */
124static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
125{
126 switch (fProt)
127 {
128 default:
129 AssertMsgFailed(("%#x %d\n", fProt, fKernel));
130 case RTMEM_PROT_NONE:
131 return PAGE_NONE;
132
133 case RTMEM_PROT_READ:
134 return fKernel ? PAGE_KERNEL_RO : PAGE_READONLY;
135
136 case RTMEM_PROT_WRITE:
137 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
138 return fKernel ? PAGE_KERNEL : PAGE_SHARED;
139
140 case RTMEM_PROT_EXEC:
141 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
142#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
143 if (fKernel)
144 {
145 pgprot_t fPg = MY_PAGE_KERNEL_EXEC;
146 pgprot_val(fPg) &= ~_PAGE_RW;
147 return fPg;
148 }
149 return PAGE_READONLY_EXEC;
150#else
151 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_READONLY_EXEC;
152#endif
153
154 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
155 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ:
156 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_SHARED_EXEC;
157 }
158}
159
160
161/**
162 * Internal worker that allocates physical pages and creates the memory object for them.
163 *
164 * @returns IPRT status code.
165 * @param ppMemLnx Where to store the memory object pointer.
166 * @param enmType The object type.
167 * @param cb The number of bytes to allocate.
168 * @param fFlagsLnx The page allocation flags (GPFs).
169 * @param fContiguous Whether the allocation must be contiguous.
170 */
171static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb, unsigned fFlagsLnx, bool fContiguous)
172{
173 size_t iPage;
174 size_t cPages = cb >> PAGE_SHIFT;
175 struct page *paPages;
176
177 /*
178 * Allocate a memory object structure that's large enough to contain
179 * the page pointer array.
180 */
181 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
182 if (!pMemLnx)
183 return VERR_NO_MEMORY;
184 pMemLnx->cPages = cPages;
185
186 /*
187 * Allocate the pages.
188 * For small allocations we'll try contiguous first and then fall back on page by page.
189 */
190#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
191 if ( fContiguous
192 || cb <= PAGE_SIZE * 2)
193 {
194#ifdef VBOX_USE_INSERT_PAGE
195 paPages = alloc_pages(fFlagsLnx | __GFP_COMP, rtR0MemObjLinuxOrder(cb >> PAGE_SHIFT));
196#else
197 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cb >> PAGE_SHIFT));
198#endif
199 if (paPages)
200 {
201 fContiguous = true;
202 for (iPage = 0; iPage < cPages; iPage++)
203 pMemLnx->apPages[iPage] = &paPages[iPage];
204 }
205 else if (fContiguous)
206 {
207 rtR0MemObjDelete(&pMemLnx->Core);
208 return VERR_NO_MEMORY;
209 }
210 }
211
212 if (!fContiguous)
213 {
214 for (iPage = 0; iPage < cPages; iPage++)
215 {
216 pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx);
217 if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
218 {
219 while (iPage-- > 0)
220 __free_page(pMemLnx->apPages[iPage]);
221 rtR0MemObjDelete(&pMemLnx->Core);
222 return VERR_NO_MEMORY;
223 }
224 }
225 }
226
227#else /* < 2.4.22 */
228 /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
229 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cb >> PAGE_SHIFT));
230 if (!paPages)
231 {
232 rtR0MemObjDelete(&pMemLnx->Core);
233 return VERR_NO_MEMORY;
234 }
235 for (iPage = 0; iPage < cPages; iPage++)
236 {
237 pMemLnx->apPages[iPage] = &paPages[iPage];
238 MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
239 if (PageHighMem(pMemLnx->apPages[iPage]))
240 BUG();
241 }
242
243 fContiguous = true;
244#endif /* < 2.4.22 */
245 pMemLnx->fContiguous = fContiguous;
246
247 /*
248 * Reserve the pages.
249 */
250 for (iPage = 0; iPage < cPages; iPage++)
251 SetPageReserved(pMemLnx->apPages[iPage]);
252
253 *ppMemLnx = pMemLnx;
254 return VINF_SUCCESS;
255}
256
257
258/**
259 * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
260 *
261 * This method does NOT free the object.
262 *
263 * @param pMemLnx The object which physical pages should be freed.
264 */
265static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
266{
267 size_t iPage = pMemLnx->cPages;
268 if (iPage > 0)
269 {
270 /*
271 * Restore the page flags.
272 */
273 while (iPage-- > 0)
274 {
275 ClearPageReserved(pMemLnx->apPages[iPage]);
276#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
277#else
278 MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
279#endif
280 }
281
282 /*
283 * Free the pages.
284 */
285#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
286 if (!pMemLnx->fContiguous)
287 {
288 iPage = pMemLnx->cPages;
289 while (iPage-- > 0)
290 __free_page(pMemLnx->apPages[iPage]);
291 }
292 else
293#endif
294 __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages));
295
296 pMemLnx->cPages = 0;
297 }
298}
299
300
301/**
302 * Maps the allocation into ring-0.
303 *
304 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
305 *
306 * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
307 * space, so we'll use that mapping if possible. If execute access is required, we'll
308 * play safe and do our own mapping.
309 *
310 * @returns IPRT status code.
311 * @param pMemLnx The linux memory object to map.
312 * @param fExecutable Whether execute access is required.
313 */
314static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
315{
316 int rc = VINF_SUCCESS;
317
318 /*
319 * Choose mapping strategy.
320 */
321 bool fMustMap = fExecutable
322 || !pMemLnx->fContiguous;
323 if (!fMustMap)
324 {
325 size_t iPage = pMemLnx->cPages;
326 while (iPage-- > 0)
327 if (PageHighMem(pMemLnx->apPages[iPage]))
328 {
329 fMustMap = true;
330 break;
331 }
332 }
333
334 Assert(!pMemLnx->Core.pv);
335 Assert(!pMemLnx->fMappedToRing0);
336
337 if (fMustMap)
338 {
339 /*
340 * Use vmap - 2.4.22 and later.
341 */
342#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
343 pgprot_t fPg;
344 pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
345# ifdef _PAGE_NX
346 if (!fExecutable)
347 pgprot_val(fPg) |= _PAGE_NX;
348# endif
349
350# ifdef VM_MAP
351 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
352# else
353 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
354# endif
355 if (pMemLnx->Core.pv)
356 pMemLnx->fMappedToRing0 = true;
357 else
358 rc = VERR_MAP_FAILED;
359#else /* < 2.4.22 */
360 rc = VERR_NOT_SUPPORTED;
361#endif
362 }
363 else
364 {
365 /*
366 * Use the kernel RAM mapping.
367 */
368 pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
369 Assert(pMemLnx->Core.pv);
370 }
371
372 return rc;
373}
374
375
376/**
377 * Undos what rtR0MemObjLinuxVMap() did.
378 *
379 * @param pMemLnx The linux memory object.
380 */
381static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
382{
383#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
384 if (pMemLnx->fMappedToRing0)
385 {
386 Assert(pMemLnx->Core.pv);
387 vunmap(pMemLnx->Core.pv);
388 pMemLnx->fMappedToRing0 = false;
389 }
390#else /* < 2.4.22 */
391 Assert(!pMemLnx->fMappedToRing0);
392#endif
393 pMemLnx->Core.pv = NULL;
394}
395
396
397int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
398{
399 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
400
401 /*
402 * Release any memory that we've allocated or locked.
403 */
404 switch (pMemLnx->Core.enmType)
405 {
406 case RTR0MEMOBJTYPE_LOW:
407 case RTR0MEMOBJTYPE_PAGE:
408 case RTR0MEMOBJTYPE_CONT:
409 case RTR0MEMOBJTYPE_PHYS:
410 case RTR0MEMOBJTYPE_PHYS_NC:
411 rtR0MemObjLinuxVUnmap(pMemLnx);
412 rtR0MemObjLinuxFreePages(pMemLnx);
413 break;
414
415 case RTR0MEMOBJTYPE_LOCK:
416 if (pMemLnx->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
417 {
418 size_t iPage;
419 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
420 Assert(pTask);
421 if (pTask && pTask->mm)
422 down_read(&pTask->mm->mmap_sem);
423
424 iPage = pMemLnx->cPages;
425 while (iPage-- > 0)
426 {
427 if (!PageReserved(pMemLnx->apPages[iPage]))
428 SetPageDirty(pMemLnx->apPages[iPage]);
429 page_cache_release(pMemLnx->apPages[iPage]);
430 }
431
432 if (pTask && pTask->mm)
433 up_read(&pTask->mm->mmap_sem);
434 }
435 else
436 AssertFailed(); /* not implemented for R0 */
437 break;
438
439 case RTR0MEMOBJTYPE_RES_VIRT:
440 Assert(pMemLnx->Core.pv);
441 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
442 {
443 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
444 Assert(pTask);
445 if (pTask && pTask->mm)
446 {
447 down_write(&pTask->mm->mmap_sem);
448 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
449 up_write(&pTask->mm->mmap_sem);
450 }
451 }
452 else
453 {
454 vunmap(pMemLnx->Core.pv);
455
456 Assert(pMemLnx->cPages == 1 && pMemLnx->apPages[0] != NULL);
457 __free_page(pMemLnx->apPages[0]);
458 pMemLnx->apPages[0] = NULL;
459 pMemLnx->cPages = 0;
460 }
461 pMemLnx->Core.pv = NULL;
462 break;
463
464 case RTR0MEMOBJTYPE_MAPPING:
465 Assert(pMemLnx->cPages == 0); Assert(pMemLnx->Core.pv);
466 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
467 {
468 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
469 Assert(pTask);
470 if (pTask && pTask->mm)
471 {
472 down_write(&pTask->mm->mmap_sem);
473 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
474 up_write(&pTask->mm->mmap_sem);
475 }
476 }
477 else
478 vunmap(pMemLnx->Core.pv);
479 pMemLnx->Core.pv = NULL;
480 break;
481
482 default:
483 AssertMsgFailed(("enmType=%d\n", pMemLnx->Core.enmType));
484 return VERR_INTERNAL_ERROR;
485 }
486 return VINF_SUCCESS;
487}
488
489
490int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
491{
492 PRTR0MEMOBJLNX pMemLnx;
493 int rc;
494
495#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
496 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, GFP_HIGHUSER, false /* non-contiguous */);
497#else
498 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, GFP_USER, false /* non-contiguous */);
499#endif
500 if (RT_SUCCESS(rc))
501 {
502 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
503 if (RT_SUCCESS(rc))
504 {
505 *ppMem = &pMemLnx->Core;
506 return rc;
507 }
508
509 rtR0MemObjLinuxFreePages(pMemLnx);
510 rtR0MemObjDelete(&pMemLnx->Core);
511 }
512
513 return rc;
514}
515
516
517int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
518{
519 PRTR0MEMOBJLNX pMemLnx;
520 int rc;
521
522#ifdef RT_ARCH_AMD64
523# ifdef GFP_DMA32
524 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_DMA32, false /* non-contiguous */);
525 if (RT_FAILURE(rc))
526# endif
527 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_DMA, false /* non-contiguous */);
528#else
529 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_USER, false /* non-contiguous */);
530#endif
531 if (RT_SUCCESS(rc))
532 {
533 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
534 if (RT_SUCCESS(rc))
535 {
536 *ppMem = &pMemLnx->Core;
537 return rc;
538 }
539
540 rtR0MemObjLinuxFreePages(pMemLnx);
541 rtR0MemObjDelete(&pMemLnx->Core);
542 }
543
544 return rc;
545}
546
547
548int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
549{
550 PRTR0MEMOBJLNX pMemLnx;
551 int rc;
552
553#ifdef RT_ARCH_AMD64
554# ifdef GFP_DMA32
555 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_DMA32, true /* contiguous */);
556 if (RT_FAILURE(rc))
557# endif
558 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_DMA, true /* contiguous */);
559#else
560 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_USER, true /* contiguous */);
561#endif
562 if (RT_SUCCESS(rc))
563 {
564 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
565 if (RT_SUCCESS(rc))
566 {
567#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G))
568 size_t iPage = pMemLnx->cPages;
569 while (iPage-- > 0)
570 Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G);
571#endif
572 pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]);
573 *ppMem = &pMemLnx->Core;
574 return rc;
575 }
576
577 rtR0MemObjLinuxFreePages(pMemLnx);
578 rtR0MemObjDelete(&pMemLnx->Core);
579 }
580
581 return rc;
582}
583
584
585/**
586 * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
587 *
588 * @returns IPRT status.
589 * @param ppMemLnx Where to
590 * @param enmType The object type.
591 * @param cb The size of the allocation.
592 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
593 * @param fGfp The Linux GFP flags to use for the allocation.
594 */
595static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest, unsigned fGfp)
596{
597 PRTR0MEMOBJLNX pMemLnx;
598 int rc;
599
600 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, fGfp,
601 enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */);
602 if (RT_FAILURE(rc))
603 return rc;
604
605 /*
606 * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
607 */
608 if (PhysHighest != NIL_RTHCPHYS)
609 {
610 size_t iPage = pMemLnx->cPages;
611 while (iPage-- > 0)
612 if (page_to_phys(pMemLnx->apPages[iPage]) >= PhysHighest)
613 {
614 rtR0MemObjLinuxFreePages(pMemLnx);
615 rtR0MemObjDelete(&pMemLnx->Core);
616 return VERR_NO_MEMORY;
617 }
618 }
619
620 /*
621 * Complete the object.
622 */
623 if (enmType == RTR0MEMOBJTYPE_PHYS)
624 {
625 pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]);
626 pMemLnx->Core.u.Phys.fAllocated = true;
627 }
628 *ppMem = &pMemLnx->Core;
629 return rc;
630}
631
632
633/**
634 * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC.
635 *
636 * @returns IPRT status.
637 * @param ppMem Where to store the memory object pointer on success.
638 * @param enmType The object type.
639 * @param cb The size of the allocation.
640 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
641 */
642static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest)
643{
644 int rc;
645
646 /*
647 * There are two clear cases and that's the <=16MB and anything-goes ones.
648 * When the physical address limit is somewhere inbetween those two we'll
649 * just have to try, starting with HIGHUSER and working our way thru the
650 * different types, hoping we'll get lucky.
651 *
652 * We should probably move this physical address restriction logic up to
653 * the page alloc function as it would be more efficient there. But since
654 * we don't expect this to be a performance issue just yet it can wait.
655 */
656 if (PhysHighest == NIL_RTHCPHYS)
657 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_HIGHUSER);
658 else if (PhysHighest <= _1M * 16)
659 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA);
660 else
661 {
662 rc = VERR_NO_MEMORY;
663 if (RT_FAILURE(rc))
664 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_HIGHUSER);
665 if (RT_FAILURE(rc))
666 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_USER);
667#ifdef GFP_DMA32
668 if (RT_FAILURE(rc))
669 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA32);
670#endif
671 if (RT_FAILURE(rc))
672 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA);
673 }
674 return rc;
675}
676
677
678int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
679{
680 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest);
681}
682
683
684int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
685{
686 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest);
687}
688
689
690int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
691{
692 /*
693 * All we need to do here is to validate that we can use
694 * ioremap on the specified address (32/64-bit dma_addr_t).
695 */
696 PRTR0MEMOBJLNX pMemLnx;
697 dma_addr_t PhysAddr = Phys;
698 AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG);
699
700 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb);
701 if (!pMemLnx)
702 return VERR_NO_MEMORY;
703
704 pMemLnx->Core.u.Phys.PhysBase = PhysAddr;
705 pMemLnx->Core.u.Phys.fAllocated = false;
706 Assert(!pMemLnx->cPages);
707 *ppMem = &pMemLnx->Core;
708 return VINF_SUCCESS;
709}
710
711
712int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
713{
714 const int cPages = cb >> PAGE_SHIFT;
715 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
716 struct vm_area_struct **papVMAs;
717 PRTR0MEMOBJLNX pMemLnx;
718 int rc = VERR_NO_MEMORY;
719
720 /*
721 * Check for valid task and size overflows.
722 */
723 if (!pTask)
724 return VERR_NOT_SUPPORTED;
725 if (((size_t)cPages << PAGE_SHIFT) != cb)
726 return VERR_OUT_OF_RANGE;
727
728 /*
729 * Allocate the memory object and a temporary buffer for the VMAs.
730 */
731 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
732 if (!pMemLnx)
733 return VERR_NO_MEMORY;
734
735 papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
736 if (papVMAs)
737 {
738 down_read(&pTask->mm->mmap_sem);
739
740 /*
741 * Get user pages.
742 */
743 rc = get_user_pages(pTask, /* Task for fault acounting. */
744 pTask->mm, /* Whose pages. */
745 R3Ptr, /* Where from. */
746 cPages, /* How many pages. */
747 1, /* Write to memory. */
748 0, /* force. */
749 &pMemLnx->apPages[0], /* Page array. */
750 papVMAs); /* vmas */
751 if (rc == cPages)
752 {
753 /*
754 * Flush dcache (required?), protect against fork and _really_ pin the page
755 * table entries. get_user_pages() will protect against swapping out the
756 * pages but it will NOT protect against removing page table entries. This
757 * can be achieved with
758 * - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
759 * an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
760 * Usual Linux distributions support only a limited size of locked pages
761 * (e.g. 32KB).
762 * - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
763 * or by
764 * - setting the VM_LOCKED flag. This is the same as doing mlock() without
765 * a range check.
766 */
767 /** @todo The Linux fork() protection will require more work if this API
768 * is to be used for anything but locking VM pages. */
769 while (rc-- > 0)
770 {
771 flush_dcache_page(pMemLnx->apPages[rc]);
772 papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
773 }
774
775 up_read(&pTask->mm->mmap_sem);
776
777 RTMemFree(papVMAs);
778
779 pMemLnx->Core.u.Lock.R0Process = R0Process;
780 pMemLnx->cPages = cPages;
781 Assert(!pMemLnx->fMappedToRing0);
782 *ppMem = &pMemLnx->Core;
783
784 return VINF_SUCCESS;
785 }
786
787 /*
788 * Failed - we need to unlock any pages that we succeeded to lock.
789 */
790 while (rc-- > 0)
791 {
792 if (!PageReserved(pMemLnx->apPages[rc]))
793 SetPageDirty(pMemLnx->apPages[rc]);
794 page_cache_release(pMemLnx->apPages[rc]);
795 }
796
797 up_read(&pTask->mm->mmap_sem);
798
799 RTMemFree(papVMAs);
800 rc = VERR_LOCK_FAILED;
801 }
802
803 rtR0MemObjDelete(&pMemLnx->Core);
804 return rc;
805}
806
807
808int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
809{
810 /* What is there to lock? Should/Can we fake this? */
811 return VERR_NOT_SUPPORTED;
812}
813
814
815int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
816{
817#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
818 const size_t cPages = cb >> PAGE_SHIFT;
819 struct page *pDummyPage;
820 struct page **papPages;
821
822 /* check for unsupported stuff. */
823 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
824 AssertMsgReturn(uAlignment <= PAGE_SIZE, ("%#x\n", uAlignment), VERR_NOT_SUPPORTED);
825
826 /*
827 * Allocate a dummy page and create a page pointer array for vmap such that
828 * the dummy page is mapped all over the reserved area.
829 */
830 pDummyPage = alloc_page(GFP_HIGHUSER);
831 if (!pDummyPage)
832 return VERR_NO_MEMORY;
833 papPages = RTMemAlloc(sizeof(*papPages) * cPages);
834 if (papPages)
835 {
836 void *pv;
837 size_t iPage = cPages;
838 while (iPage-- > 0)
839 papPages[iPage] = pDummyPage;
840# ifdef VM_MAP
841 pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
842# else
843 pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
844# endif
845 RTMemFree(papPages);
846 if (pv)
847 {
848 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
849 if (pMemLnx)
850 {
851 pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
852 pMemLnx->cPages = 1;
853 pMemLnx->apPages[0] = pDummyPage;
854 *ppMem = &pMemLnx->Core;
855 return VINF_SUCCESS;
856 }
857 vunmap(pv);
858 }
859 }
860 __free_page(pDummyPage);
861 return VERR_NO_MEMORY;
862
863#else /* < 2.4.22 */
864 /*
865 * Could probably use ioremap here, but the caller is in a better position than us
866 * to select some safe physical memory.
867 */
868 return VERR_NOT_SUPPORTED;
869#endif
870}
871
872
873/**
874 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
875 * an empty user space mapping.
876 *
877 * The caller takes care of acquiring the mmap_sem of the task.
878 *
879 * @returns Pointer to the mapping.
880 * (void *)-1 on failure.
881 * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location.
882 * @param cb The size of the mapping.
883 * @param uAlignment The alignment of the mapping.
884 * @param pTask The Linux task to create this mapping in.
885 * @param fProt The RTMEM_PROT_* mask.
886 */
887static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
888{
889 unsigned fLnxProt;
890 unsigned long ulAddr;
891
892 /*
893 * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
894 */
895 fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
896 if (fProt == RTMEM_PROT_NONE)
897 fLnxProt = PROT_NONE;
898 else
899 {
900 fLnxProt = 0;
901 if (fProt & RTMEM_PROT_READ)
902 fLnxProt |= PROT_READ;
903 if (fProt & RTMEM_PROT_WRITE)
904 fLnxProt |= PROT_WRITE;
905 if (fProt & RTMEM_PROT_EXEC)
906 fLnxProt |= PROT_EXEC;
907 }
908
909 if (R3PtrFixed != (RTR3PTR)-1)
910 ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
911 else
912 {
913 ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
914 if ( !(ulAddr & ~PAGE_MASK)
915 && (ulAddr & (uAlignment - 1)))
916 {
917 /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
918 * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
919 * ourselves) and further by there begin two mmap strategies (top / bottom). */
920 /* For now, just ignore uAlignment requirements... */
921 }
922 }
923 if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
924 return (void *)-1;
925 return (void *)ulAddr;
926}
927
928
929int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
930{
931 PRTR0MEMOBJLNX pMemLnx;
932 void *pv;
933 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
934 if (!pTask)
935 return VERR_NOT_SUPPORTED;
936
937 /*
938 * Let rtR0MemObjLinuxDoMmap do the difficult bits.
939 */
940 down_write(&pTask->mm->mmap_sem);
941 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
942 up_write(&pTask->mm->mmap_sem);
943 if (pv == (void *)-1)
944 return VERR_NO_MEMORY;
945
946 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
947 if (!pMemLnx)
948 {
949 down_write(&pTask->mm->mmap_sem);
950 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, cb);
951 up_write(&pTask->mm->mmap_sem);
952 return VERR_NO_MEMORY;
953 }
954
955 pMemLnx->Core.u.ResVirt.R0Process = R0Process;
956 *ppMem = &pMemLnx->Core;
957 return VINF_SUCCESS;
958}
959
960
961int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
962 unsigned fProt, size_t offSub, size_t cbSub)
963{
964 int rc = VERR_NO_MEMORY;
965 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
966 PRTR0MEMOBJLNX pMemLnx;
967
968 /* Fail if requested to do something we can't. */
969 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
970 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
971 AssertMsgReturn(uAlignment <= PAGE_SIZE, ("%#x\n", uAlignment), VERR_NOT_SUPPORTED);
972
973 /*
974 * Create the IPRT memory object.
975 */
976 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
977 if (pMemLnx)
978 {
979 if (pMemLnxToMap->cPages)
980 {
981#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
982 /*
983 * Use vmap - 2.4.22 and later.
984 */
985 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
986# ifdef VM_MAP
987 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
988# else
989 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
990# endif
991 if (pMemLnx->Core.pv)
992 {
993 pMemLnx->fMappedToRing0 = true;
994 rc = VINF_SUCCESS;
995 }
996 else
997 rc = VERR_MAP_FAILED;
998
999#else /* < 2.4.22 */
1000 /*
1001 * Only option here is to share mappings if possible and forget about fProt.
1002 */
1003 if (rtR0MemObjIsRing3(pMemToMap))
1004 rc = VERR_NOT_SUPPORTED;
1005 else
1006 {
1007 rc = VINF_SUCCESS;
1008 if (!pMemLnxToMap->Core.pv)
1009 rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
1010 if (RT_SUCCESS(rc))
1011 {
1012 Assert(pMemLnxToMap->Core.pv);
1013 pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
1014 }
1015 }
1016#endif
1017 }
1018 else
1019 {
1020 /*
1021 * MMIO / physical memory.
1022 */
1023 Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
1024 pMemLnx->Core.pv = ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
1025 if (pMemLnx->Core.pv)
1026 {
1027 /** @todo fix protection. */
1028 rc = VINF_SUCCESS;
1029 }
1030 }
1031 if (RT_SUCCESS(rc))
1032 {
1033 pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1034 *ppMem = &pMemLnx->Core;
1035 return VINF_SUCCESS;
1036 }
1037 rtR0MemObjDelete(&pMemLnx->Core);
1038 }
1039
1040 return rc;
1041}
1042
1043
1044int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1045{
1046 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
1047 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
1048 int rc = VERR_NO_MEMORY;
1049 PRTR0MEMOBJLNX pMemLnx;
1050
1051 /*
1052 * Check for restrictions.
1053 */
1054 if (!pTask)
1055 return VERR_NOT_SUPPORTED;
1056
1057 /*
1058 * Create the IPRT memory object.
1059 */
1060 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
1061 if (pMemLnx)
1062 {
1063 /*
1064 * Allocate user space mapping.
1065 */
1066 void *pv;
1067 down_write(&pTask->mm->mmap_sem);
1068 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt);
1069 if (pv != (void *)-1)
1070 {
1071 /*
1072 * Map page by page into the mmap area.
1073 * This is generic, paranoid and not very efficient.
1074 */
1075 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, false /* user */);
1076 unsigned long ulAddrCur = (unsigned long)pv;
1077 const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT;
1078 size_t iPage;
1079 rc = 0;
1080 if (pMemLnxToMap->cPages)
1081 {
1082 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
1083 {
1084#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1085 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1086 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1087#endif
1088
1089#if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1090 rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]);
1091 vma->vm_flags |= VM_RESERVED; /* This flag helps making 100% sure some bad stuff wont happen (swap, core, ++). */
1092#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1093 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1094#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1095 rc = remap_page_range(vma, ulAddrCur, page_to_phys(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1096#else /* 2.4 */
1097 rc = remap_page_range(ulAddrCur, page_to_phys(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1098#endif
1099 if (rc)
1100 break;
1101 }
1102 }
1103 else
1104 {
1105 RTHCPHYS Phys;
1106 if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS)
1107 Phys = pMemLnxToMap->Core.u.Phys.PhysBase;
1108 else if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_CONT)
1109 Phys = pMemLnxToMap->Core.u.Cont.Phys;
1110 else
1111 {
1112 AssertMsgFailed(("%d\n", pMemLnxToMap->Core.enmType));
1113 Phys = NIL_RTHCPHYS;
1114 }
1115 if (Phys != NIL_RTHCPHYS)
1116 {
1117 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE)
1118 {
1119#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1120 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1121 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1122#endif
1123
1124#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1125 rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1126#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1127 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1128#else /* 2.4 */
1129 rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
1130#endif
1131 if (rc)
1132 break;
1133 }
1134 }
1135 }
1136 if (!rc)
1137 {
1138 up_write(&pTask->mm->mmap_sem);
1139
1140 pMemLnx->Core.pv = pv;
1141 pMemLnx->Core.u.Mapping.R0Process = R0Process;
1142 *ppMem = &pMemLnx->Core;
1143 return VINF_SUCCESS;
1144 }
1145
1146 /*
1147 * Bail out.
1148 */
1149 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, pMemLnxToMap->Core.cb);
1150 if (rc != VERR_INTERNAL_ERROR)
1151 rc = VERR_NO_MEMORY;
1152 }
1153
1154 up_write(&pTask->mm->mmap_sem);
1155
1156 rtR0MemObjDelete(&pMemLnx->Core);
1157 }
1158
1159 return rc;
1160}
1161
1162
1163RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1164{
1165 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
1166
1167 if (pMemLnx->cPages)
1168 return page_to_phys(pMemLnx->apPages[iPage]);
1169
1170 switch (pMemLnx->Core.enmType)
1171 {
1172 case RTR0MEMOBJTYPE_CONT:
1173 return pMemLnx->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
1174
1175 case RTR0MEMOBJTYPE_PHYS:
1176 return pMemLnx->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1177
1178 /* the parent knows */
1179 case RTR0MEMOBJTYPE_MAPPING:
1180 return rtR0MemObjNativeGetPagePhysAddr(pMemLnx->Core.uRel.Child.pParent, iPage);
1181
1182 /* cPages > 0 */
1183 case RTR0MEMOBJTYPE_LOW:
1184 case RTR0MEMOBJTYPE_LOCK:
1185 case RTR0MEMOBJTYPE_PHYS_NC:
1186 case RTR0MEMOBJTYPE_PAGE:
1187 default:
1188 AssertMsgFailed(("%d\n", pMemLnx->Core.enmType));
1189 /* fall thru */
1190
1191 case RTR0MEMOBJTYPE_RES_VIRT:
1192 return NIL_RTHCPHYS;
1193 }
1194}
1195
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette