VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/alloc/memcache.cpp@ 55252

Last change on this file since 55252 was 55252, checked in by vboxsync, 10 years ago

memcache.cpp: Fixed problem with the free list (unused).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.6 KB
Line 
1/* $Id: memcache.cpp 55252 2015-04-14 14:56:12Z vboxsync $ */
2/** @file
3 * IPRT - Memory Object Allocation Cache.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include <iprt/memcache.h>
32#include "internal/iprt.h"
33
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/critsect.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/param.h>
40
41#include "internal/magics.h"
42
43
44/*******************************************************************************
45* Structures and Typedefs *
46*******************************************************************************/
47/** Pointer to a cache instance. */
48typedef struct RTMEMCACHEINT *PRTMEMCACHEINT;
49/** Pointer to a cache page. */
50typedef struct RTMEMCACHEPAGE *PRTMEMCACHEPAGE;
51
52
53
54/**
55 * A free object.
56 *
57 * @remarks This only works if the objects don't have a constructor or
58 * destructor and are big enough.
59 */
60typedef struct RTMEMCACHEFREEOBJ
61{
62 /** Pointer to the next free object */
63 struct RTMEMCACHEFREEOBJ * volatile pNext;
64} RTMEMCACHEFREEOBJ;
65/** Pointer to a free object. */
66typedef RTMEMCACHEFREEOBJ *PRTMEMCACHEFREEOBJ;
67
68
69/**
70 * A cache page.
71 *
72 * This is a page of memory that we split up in to a bunch object sized chunks
73 * and hand out to the cache users. The bitmap is updated in an atomic fashion
74 * so that we don't have to take any locks when freeing or allocating memory.
75 */
76typedef struct RTMEMCACHEPAGE
77{
78 /** Pointer to the cache owning this page.
79 * This is used for validation purposes only. */
80 PRTMEMCACHEINT pCache;
81 /** Pointer to the next page.
82 * This is marked as volatile since we'll be adding new entries to the list
83 * without taking any locks. */
84 PRTMEMCACHEPAGE volatile pNext;
85 /** Bitmap tracking allocated blocks. */
86 void volatile *pbmAlloc;
87 /** Bitmap tracking which blocks that has been thru the constructor. */
88 void volatile *pbmCtor;
89 /** Pointer to the object array. */
90 uint8_t *pbObjects;
91 /** The number of objects on this page. */
92 uint32_t cObjects;
93
94 /** Padding to force cFree into the next cache line. (ASSUMES CL = 64) */
95 uint8_t abPadding[ARCH_BITS == 32 ? 64 - 6*4 : 64 - 5*8 - 4];
96 /** The number of free objects. */
97 int32_t volatile cFree;
98} RTMEMCACHEPAGE;
99AssertCompileMemberOffset(RTMEMCACHEPAGE, cFree, 64);
100
101
102/**
103 * Memory object cache instance.
104 */
105typedef struct RTMEMCACHEINT
106{
107 /** Magic value (RTMEMCACHE_MAGIC). */
108 uint32_t u32Magic;
109 /** The object size. */
110 uint32_t cbObject;
111 /** Object alignment. */
112 uint32_t cbAlignment;
113 /** The per page object count. */
114 uint32_t cPerPage;
115 /** Number of bits in the bitmap.
116 * @remarks This is higher or equal to cPerPage and it is aligned such that
117 * the search operation will be most efficient on x86/AMD64. */
118 uint32_t cBits;
119 /** The maximum number of objects. */
120 uint32_t cMax;
121 /** Whether to the use the free list or not. */
122 bool fUseFreeList;
123 /** Head of the page list. */
124 PRTMEMCACHEPAGE pPageHead;
125 /** Poiner to the insertion point in the page list. */
126 PRTMEMCACHEPAGE volatile *ppPageNext;
127 /** Constructor callback. */
128 PFNMEMCACHECTOR pfnCtor;
129 /** Destructor callback. */
130 PFNMEMCACHEDTOR pfnDtor;
131 /** Callback argument. */
132 void *pvUser;
133 /** Critical section serializing page allocation and similar. */
134 RTCRITSECT CritSect;
135
136 /** The total object count. */
137 uint32_t volatile cTotal;
138 /** The number of free objects. */
139 int32_t volatile cFree;
140 /** This may point to a page with free entries. */
141 PRTMEMCACHEPAGE volatile pPageHint;
142 /** Stack of free items.
143 * These are marked as used in the allocation bitmaps.
144 *
145 * @todo This doesn't scale well when several threads are beating on the
146 * cache. Also, it totally doesn't work when the objects are too
147 * small. */
148 PRTMEMCACHEFREEOBJ volatile pFreeTop;
149} RTMEMCACHEINT;
150
151
152/*******************************************************************************
153* Internal Functions *
154*******************************************************************************/
155static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead);
156
157
158RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects,
159 PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser, uint32_t fFlags)
160
161{
162 AssertPtr(phMemCache);
163 AssertPtrNull(pfnCtor);
164 AssertPtrNull(pfnDtor);
165 AssertReturn(!pfnDtor || pfnCtor, VERR_INVALID_PARAMETER);
166 AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER);
167 AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER);
168 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
169
170 if (cbAlignment == 0)
171 {
172 if (cbObject <= 2)
173 cbAlignment = cbObject;
174 else if (cbObject <= 4)
175 cbAlignment = 4;
176 else if (cbObject <= 8)
177 cbAlignment = 8;
178 else if (cbObject <= 16)
179 cbAlignment = 16;
180 else if (cbObject <= 32)
181 cbAlignment = 32;
182 else
183 cbAlignment = 64;
184 }
185 else
186 {
187 AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO);
188 AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE);
189 }
190
191 /*
192 * Allocate and initialize the instance memory.
193 */
194 RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis));
195 if (!pThis)
196 return VERR_NO_MEMORY;
197 int rc = RTCritSectInit(&pThis->CritSect);
198 if (RT_FAILURE(rc))
199 {
200 RTMemFree(pThis);
201 return rc;
202 }
203
204 pThis->u32Magic = RTMEMCACHE_MAGIC;
205 pThis->cbObject = (uint32_t)RT_ALIGN_Z(cbObject, cbAlignment);
206 pThis->cbAlignment = (uint32_t)cbAlignment;
207 pThis->cPerPage = (uint32_t)((PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject);
208 while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), 8)
209 + pThis->cPerPage * pThis->cbObject
210 + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2
211 > PAGE_SIZE)
212 pThis->cPerPage--;
213 pThis->cBits = RT_ALIGN(pThis->cPerPage, 64);
214 pThis->cMax = cMaxObjects;
215 pThis->fUseFreeList = cbObject >= sizeof(RTMEMCACHEFREEOBJ)
216 && !pfnCtor
217 && !pfnDtor;
218 pThis->pPageHead = NULL;
219 pThis->ppPageNext = &pThis->pPageHead;
220 pThis->pfnCtor = pfnCtor;
221 pThis->pfnDtor = pfnDtor;
222 pThis->pvUser = pvUser;
223 pThis->cTotal = 0;
224 pThis->cFree = 0;
225 pThis->pPageHint = NULL;
226 pThis->pFreeTop = NULL;
227
228#if 1 /* should be fixed now, enable shortly... */
229 /** @todo
230 * Here is a puzzler (or maybe I'm just blind), the free list code breaks
231 * badly on my macbook pro (i7) (32-bit).
232 *
233 * I tried changing the reads from unordered to ordered to no avail. Then I
234 * tried optimizing the code with the ASMAtomicCmpXchgExPtr function to
235 * avoid some reads - no change. Inserting pause instructions did nothing
236 * (as expected). The only thing which seems to make a difference is
237 * reading the pFreeTop pointer twice in the free code... This is weird or I'm
238 * overlooking something..
239 *
240 * No time to figure it out, so I'm disabling the broken code paths for
241 * now. */
242 pThis->fUseFreeList = false;
243#endif
244
245 *phMemCache = pThis;
246 return VINF_SUCCESS;
247}
248
249
250RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
251{
252 RTMEMCACHEINT *pThis = hMemCache;
253 if (!pThis)
254 return VINF_SUCCESS;
255 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
256 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);
257
258#if 0 /*def RT_STRICT - don't require eveything to be freed. Caches are very convenient for lazy cleanup. */
259 uint32_t cFree = pThis->cFree;
260 for (PRTMEMCACHEFREEOBJ pFree = pThis->pFreeTop; pFree && cFree < pThis->cTotal + 5; pFree = pFree->pNext)
261 cFree++;
262 AssertMsg(cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", cFree, pThis->cTotal));
263#endif
264
265 /*
266 * Destroy it.
267 */
268 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
269 RTCritSectDelete(&pThis->CritSect);
270
271 while (pThis->pPageHead)
272 {
273 PRTMEMCACHEPAGE pPage = pThis->pPageHead;
274 pThis->pPageHead = pPage->pNext;
275 pPage->cFree = 0;
276
277 if (pThis->pfnDtor)
278 {
279 uint32_t iObj = pPage->cObjects;
280 while (iObj-- > 0)
281 if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
282 pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
283 }
284
285 RTMemPageFree(pPage, PAGE_SIZE);
286 }
287
288 RTMemFree(pThis);
289 return VINF_SUCCESS;
290}
291
292
293/**
294 * Grows the cache.
295 *
296 * @returns IPRT status code.
297 * @param pThis The memory cache instance.
298 */
299static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
300{
301 /*
302 * Enter the critical section here to avoid allocation races leading to
303 * wasted memory (++) and make it easier to link in the new page.
304 */
305 RTCritSectEnter(&pThis->CritSect);
306 int rc = VINF_SUCCESS;
307 if (pThis->cFree < 0)
308 {
309 /*
310 * Allocate and initialize the new page.
311 *
312 * We put the constructor bitmap at the lower end right after cFree.
313 * We then push the object array to the end of the page and place the
314 * allocation bitmap below it. The hope is to increase the chance that
315 * the allocation bitmap is in a different cache line than cFree since
316 * this increases performance markably when lots of threads are beating
317 * on the cache.
318 */
319 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
320 if (pPage)
321 {
322 uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);
323
324 ASMMemZeroPage(pPage);
325 pPage->pCache = pThis;
326 pPage->pNext = NULL;
327 pPage->cFree = cObjects;
328 pPage->cObjects = cObjects;
329 uint8_t *pb = (uint8_t *)(pPage + 1);
330 pb = RT_ALIGN_PT(pb, 8, uint8_t *);
331 pPage->pbmCtor = pb;
332 pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
333 pPage->pbObjects = pb; Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
334 pb -= pThis->cBits / 8;
335 pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
336 pPage->pbmAlloc = pb;
337 Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);
338
339 /* Mark the bitmap padding and any unused objects as allocated. */
340 for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
341 ASMBitSet(pPage->pbmAlloc, iBit);
342
343 /* Make it the hint. */
344 ASMAtomicWritePtr(&pThis->pPageHint, pPage);
345
346 /* Link the page in at the end of the list. */
347 ASMAtomicWritePtr(pThis->ppPageNext, pPage);
348 pThis->ppPageNext = &pPage->pNext;
349
350 /* Add it to the page counts. */
351 ASMAtomicAddS32(&pThis->cFree, cObjects);
352 ASMAtomicAddU32(&pThis->cTotal, cObjects);
353 }
354 else
355 rc = VERR_NO_MEMORY;
356 }
357 RTCritSectLeave(&pThis->CritSect);
358 return rc;
359}
360
361
362/**
363 * Grabs a an object in a page.
364 * @returns New cFree value on success (0 or higher), -1 on failure.
365 * @param pPage Pointer to the page.
366 */
367DECL_FORCE_INLINE(int32_t) rtMemCacheGrabObj(PRTMEMCACHEPAGE pPage)
368{
369 int32_t cFreeNew = ASMAtomicDecS32(&pPage->cFree);
370 if (cFreeNew < 0)
371 {
372 ASMAtomicIncS32(&pPage->cFree);
373 return -1;
374 }
375 return cFreeNew;
376}
377
378
379RTDECL(int) RTMemCacheAllocEx(RTMEMCACHE hMemCache, void **ppvObj)
380{
381 RTMEMCACHEINT *pThis = hMemCache;
382 AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
383 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_PARAMETER);
384
385 /*
386 * Try grab a free object from the stack.
387 */
388 PRTMEMCACHEFREEOBJ pObj = ASMAtomicUoReadPtrT(&pThis->pFreeTop, PRTMEMCACHEFREEOBJ);
389 if (pObj)
390 {
391 pObj = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
392 if (pObj)
393 {
394 if (pObj->pNext)
395 {
396 Assert(pObj->pNext != pObj);
397 PRTMEMCACHEFREEOBJ pAllocRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj->pNext, PRTMEMCACHEFREEOBJ);
398 if (pAllocRace)
399 rtMemCacheFreeList(pThis, pAllocRace);
400 }
401
402 pObj->pNext = NULL;
403 *ppvObj = pObj;
404 return VINF_SUCCESS;
405 }
406 }
407
408 /*
409 * Try grab a free object at the cache level.
410 */
411 int32_t cNewFree = ASMAtomicDecS32(&pThis->cFree);
412 if (RT_LIKELY(cNewFree < 0))
413 {
414 uint32_t cTotal = ASMAtomicUoReadU32(&pThis->cTotal);
415 if ( (uint32_t)(cTotal + -cNewFree) > pThis->cMax
416 || (uint32_t)(cTotal + -cNewFree) <= cTotal)
417 {
418 ASMAtomicIncS32(&pThis->cFree);
419 return VERR_MEM_CACHE_MAX_SIZE;
420 }
421
422 int rc = rtMemCacheGrow(pThis);
423 if (RT_FAILURE(rc))
424 {
425 ASMAtomicIncS32(&pThis->cFree);
426 return rc;
427 }
428 }
429
430 /*
431 * Grab a free object at the page level.
432 */
433 PRTMEMCACHEPAGE pPage = ASMAtomicReadPtrT(&pThis->pPageHint, PRTMEMCACHEPAGE);
434 int32_t iObj = pPage ? rtMemCacheGrabObj(pPage) : -1;
435 if (iObj < 0)
436 {
437 for (unsigned cLoops = 0; ; cLoops++)
438 {
439 for (pPage = pThis->pPageHead; pPage; pPage = pPage->pNext)
440 {
441 iObj = rtMemCacheGrabObj(pPage);
442 if (iObj >= 0)
443 {
444 if (iObj > 0)
445 ASMAtomicWritePtr(&pThis->pPageHint, pPage);
446 break;
447 }
448 }
449 if (iObj >= 0)
450 break;
451 Assert(cLoops != 2);
452 Assert(cLoops < 10);
453 }
454 }
455 Assert(iObj >= 0);
456 Assert((uint32_t)iObj < pThis->cMax);
457
458 /*
459 * Find a free object in the allocation bitmap. Use the new cFree count
460 * as a hint.
461 */
462 if (ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
463 {
464 for (unsigned cLoops2 = 0;; cLoops2++)
465 {
466 iObj = ASMBitFirstClear(pPage->pbmAlloc, pThis->cBits);
467 if (RT_LIKELY(iObj >= 0))
468 {
469 if (!ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
470 break;
471 }
472 else
473 ASMMemoryFence();
474 Assert(cLoops2 != 40);
475 }
476 Assert(iObj >= 0);
477 }
478 void *pvObj = &pPage->pbObjects[iObj * pThis->cbObject];
479 Assert((uintptr_t)pvObj - (uintptr_t)pPage < PAGE_SIZE);
480
481 /*
482 * Call the constructor?
483 */
484 if ( pThis->pfnCtor
485 && !ASMAtomicBitTestAndSet(pPage->pbmCtor, iObj))
486 {
487 int rc = pThis->pfnCtor(hMemCache, pvObj, pThis->pvUser);
488 if (RT_FAILURE(rc))
489 {
490 ASMAtomicBitClear(pPage->pbmCtor, iObj);
491 RTMemCacheFree(pThis, pvObj);
492 return rc;
493 }
494 }
495
496 *ppvObj = pvObj;
497 return VINF_SUCCESS;
498}
499
500
501RTDECL(void *) RTMemCacheAlloc(RTMEMCACHE hMemCache)
502{
503 void *pvObj;
504 int rc = RTMemCacheAllocEx(hMemCache, &pvObj);
505 if (RT_SUCCESS(rc))
506 return pvObj;
507 return NULL;
508}
509
510
511
512/**
513 * Really frees one object.
514 *
515 * @param pThis The memory cache.
516 * @param pvObj The memory object to free.
517 */
518static void rtMemCacheFreeOne(RTMEMCACHEINT *pThis, void *pvObj)
519{
520 /* Note: Do *NOT* attempt to poison the object! */
521
522 /*
523 * Find the cache page. The page structure is at the start of the page.
524 */
525 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
526 Assert(pPage->pCache == pThis);
527 Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
528
529 /*
530 * Clear the bitmap bit and update the two object counter. Order matters!
531 */
532 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
533 uintptr_t iObj = offObj / pThis->cbObject;
534 Assert(iObj * pThis->cbObject == offObj);
535 Assert(iObj < pThis->cPerPage);
536 AssertReturnVoid(ASMAtomicBitTestAndClear(pPage->pbmAlloc, iObj));
537
538 ASMAtomicIncS32(&pPage->cFree);
539 ASMAtomicIncS32(&pThis->cFree);
540}
541
542
543/**
544 * Really frees a list of 'freed' object.
545 *
546 * @param pThis The memory cache.
547 * @param pHead The head of the list.
548 */
549static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead)
550{
551 while (pHead)
552 {
553 PRTMEMCACHEFREEOBJ pFreeMe = pHead;
554 pHead = pHead->pNext;
555 pFreeMe->pNext = NULL;
556 ASMCompilerBarrier();
557 rtMemCacheFreeOne(pThis, pFreeMe);
558 }
559}
560
561
562
563RTDECL(void) RTMemCacheFree(RTMEMCACHE hMemCache, void *pvObj)
564{
565 if (!pvObj)
566 return;
567
568 RTMEMCACHEINT *pThis = hMemCache;
569 AssertPtrReturnVoid(pThis);
570 AssertReturnVoid(pThis->u32Magic == RTMEMCACHE_MAGIC);
571
572 AssertPtr(pvObj);
573 Assert(RT_ALIGN_P(pvObj, pThis->cbAlignment) == pvObj);
574
575 if (!pThis->fUseFreeList)
576 rtMemCacheFreeOne(pThis, pvObj);
577 else
578 {
579# ifdef RT_STRICT
580 /* This is the same as the other branch, except it's not actually freed. */
581 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
582 Assert(pPage->pCache == pThis);
583 Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
584 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
585 uintptr_t iObj = offObj / pThis->cbObject;
586 Assert(iObj * pThis->cbObject == offObj);
587 Assert(iObj < pThis->cPerPage);
588 AssertReturnVoid(ASMBitTest(pPage->pbmAlloc, (int32_t)iObj));
589# endif
590
591 /*
592 * Push it onto the free stack.
593 */
594 PRTMEMCACHEFREEOBJ pObj = (PRTMEMCACHEFREEOBJ)pvObj;
595 pObj->pNext = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
596 PRTMEMCACHEFREEOBJ pFreeRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj, PRTMEMCACHEFREEOBJ);
597 if (pFreeRace)
598 rtMemCacheFreeList(pThis, pFreeRace);
599 }
600}
601
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette