1 /************************************************************************* 2 * 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * Copyright 2000, 2010 Oracle and/or its affiliates. 6 * 7 * OpenOffice.org - a multi-platform office productivity suite 8 * 9 * This file is part of OpenOffice.org. 10 * 11 * OpenOffice.org is free software: you can redistribute it and/or modify 12 * it under the terms of the GNU Lesser General Public License version 3 13 * only, as published by the Free Software Foundation. 14 * 15 * OpenOffice.org is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License version 3 for more details 19 * (a copy is included in the LICENSE file that accompanied this code). 20 * 21 * You should have received a copy of the GNU Lesser General Public License 22 * version 3 along with OpenOffice.org. If not, see 23 * <http://www.openoffice.org/license.html> 24 * for a copy of the LGPLv3 License. 25 * 26 ************************************************************************/ 27 28 #define _BSD_SOURCE /* sys/mman.h: MAP_ANON */ 29 #include "alloc_arena.h" 30 31 #include "alloc_impl.h" 32 #include "internal/once.h" 33 #include "sal/macros.h" 34 #include "osl/diagnose.h" 35 36 #include <string.h> 37 #include <stdio.h> 38 39 #ifdef OS2 40 #undef OSL_TRACE 41 #define OSL_TRACE 1 ? ((void)0) : _OSL_GLOBAL osl_trace 42 #define INCL_DOS 43 #include <os2.h> 44 #endif 45 46 /* ================================================================= * 47 * 48 * arena internals. 49 * 50 * ================================================================= */ 51 52 /** g_arena_list 53 * @internal 54 */ 55 struct rtl_arena_list_st 56 { 57 rtl_memory_lock_type m_lock; 58 rtl_arena_type m_arena_head; 59 }; 60 61 static struct rtl_arena_list_st g_arena_list; 62 63 64 /** gp_arena_arena 65 * provided for arena_type allocations, and hash_table resizing. 66 * 67 * @internal 68 */ 69 static rtl_arena_type * gp_arena_arena = 0; 70 71 72 /** gp_machdep_arena 73 * 74 * Low level virtual memory (pseudo) arena 75 * (platform dependent implementation) 76 * 77 * @internal 78 */ 79 static rtl_arena_type * gp_machdep_arena = 0; 80 81 82 static void * 83 SAL_CALL rtl_machdep_alloc ( 84 rtl_arena_type * pArena, 85 sal_Size * pSize 86 ); 87 88 static void 89 SAL_CALL rtl_machdep_free ( 90 rtl_arena_type * pArena, 91 void * pAddr, 92 sal_Size nSize 93 ); 94 95 static sal_Size 96 rtl_machdep_pagesize (void); 97 98 99 /** gp_default_arena 100 */ 101 rtl_arena_type * gp_default_arena = 0; 102 103 104 /** rtl_arena_init() 105 * @internal 106 */ 107 static int 108 rtl_arena_init (void); 109 110 111 /* ================================================================= */ 112 113 /** rtl_arena_segment_constructor() 114 */ 115 static int 116 rtl_arena_segment_constructor (void * obj) 117 { 118 rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj); 119 120 QUEUE_START_NAMED(segment, s); 121 QUEUE_START_NAMED(segment, f); 122 123 return (1); 124 } 125 126 127 /** rtl_arena_segment_destructor() 128 */ 129 static void 130 rtl_arena_segment_destructor (void * obj) 131 { 132 #if OSL_DEBUG_LEVEL == 0 133 (void) obj; /* unused */ 134 #else /* OSL_DEBUG_LEVEL */ 135 rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj); 136 137 OSL_ASSERT(QUEUE_STARTED_NAMED(segment, s)); 138 OSL_ASSERT(QUEUE_STARTED_NAMED(segment, f)); 139 #endif /* OSL_DEBUG_LEVEL */ 140 } 141 142 /* ================================================================= */ 143 144 /** rtl_arena_segment_populate() 145 * 146 * @precond arena->m_lock acquired. 147 */ 148 static int 149 rtl_arena_segment_populate ( 150 rtl_arena_type * arena 151 ) 152 { 153 rtl_arena_segment_type *span; 154 sal_Size size = rtl_machdep_pagesize(); 155 156 span = rtl_machdep_alloc(gp_machdep_arena, &size); 157 if (span != 0) 158 { 159 rtl_arena_segment_type *first, *last, *head; 160 sal_Size count = size / sizeof(rtl_arena_segment_type); 161 162 /* insert onto reserve span list */ 163 QUEUE_INSERT_TAIL_NAMED(&(arena->m_segment_reserve_span_head), span, s); 164 QUEUE_START_NAMED(span, f); 165 span->m_addr = (sal_uIntPtr)(span); 166 span->m_size = size; 167 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN; 168 169 /* insert onto reserve list */ 170 head = &(arena->m_segment_reserve_head); 171 for (first = span + 1, last = span + count; first < last; ++first) 172 { 173 QUEUE_INSERT_TAIL_NAMED(head, first, s); 174 QUEUE_START_NAMED(first, f); 175 first->m_addr = 0; 176 first->m_size = 0; 177 first->m_type = 0; 178 } 179 } 180 return (span != 0); 181 } 182 183 184 /** rtl_arena_segment_get() 185 * 186 * @precond arena->m_lock acquired. 187 * @precond (*ppSegment == 0) 188 */ 189 static RTL_MEMORY_INLINE void 190 rtl_arena_segment_get ( 191 rtl_arena_type * arena, 192 rtl_arena_segment_type ** ppSegment 193 ) 194 { 195 rtl_arena_segment_type * head; 196 197 OSL_ASSERT(*ppSegment == 0); 198 199 head = &(arena->m_segment_reserve_head); 200 if ((head->m_snext != head) || rtl_arena_segment_populate (arena)) 201 { 202 (*ppSegment) = head->m_snext; 203 QUEUE_REMOVE_NAMED((*ppSegment), s); 204 } 205 } 206 207 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) 208 #pragma inline(rtl_arena_segment_get) 209 #endif 210 211 212 /** rtl_arena_segment_put() 213 * 214 * @precond arena->m_lock acquired. 215 * @postcond (*ppSegment == 0) 216 */ 217 static RTL_MEMORY_INLINE void 218 rtl_arena_segment_put ( 219 rtl_arena_type * arena, 220 rtl_arena_segment_type ** ppSegment 221 ) 222 { 223 rtl_arena_segment_type * head; 224 225 OSL_ASSERT(QUEUE_STARTED_NAMED((*ppSegment), s)); 226 OSL_ASSERT(QUEUE_STARTED_NAMED((*ppSegment), f)); 227 228 (*ppSegment)->m_addr = 0; 229 (*ppSegment)->m_size = 0; 230 231 OSL_ASSERT((*ppSegment)->m_type != RTL_ARENA_SEGMENT_TYPE_HEAD); 232 (*ppSegment)->m_type = 0; 233 234 /* keep as reserve */ 235 head = &(arena->m_segment_reserve_head); 236 QUEUE_INSERT_HEAD_NAMED(head, (*ppSegment), s); 237 238 /* clear */ 239 (*ppSegment) = 0; 240 } 241 242 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) 243 #pragma inline(rtl_arena_segment_put) 244 #endif 245 246 /* ================================================================= */ 247 248 /** rtl_arena_freelist_insert() 249 * 250 * @precond arena->m_lock acquired. 251 */ 252 static RTL_MEMORY_INLINE void 253 rtl_arena_freelist_insert ( 254 rtl_arena_type * arena, 255 rtl_arena_segment_type * segment 256 ) 257 { 258 rtl_arena_segment_type * head; 259 260 head = &(arena->m_freelist_head[highbit(segment->m_size) - 1]); 261 QUEUE_INSERT_TAIL_NAMED(head, segment, f); 262 263 arena->m_freelist_bitmap |= head->m_size; 264 } 265 266 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) 267 #pragma inline(rtl_arena_freelist_insert) 268 #endif /* __SUNPRO_C */ 269 270 271 /** rtl_arena_freelist_remove() 272 * 273 * @precond arena->m_lock acquired. 274 */ 275 static RTL_MEMORY_INLINE void 276 rtl_arena_freelist_remove ( 277 rtl_arena_type * arena, 278 rtl_arena_segment_type * segment 279 ) 280 { 281 if ((segment->m_fnext->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) && 282 (segment->m_fprev->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) ) 283 { 284 rtl_arena_segment_type * head; 285 286 head = segment->m_fprev; 287 OSL_ASSERT(arena->m_freelist_bitmap & head->m_size); 288 arena->m_freelist_bitmap ^= head->m_size; 289 } 290 QUEUE_REMOVE_NAMED(segment, f); 291 } 292 293 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) 294 #pragma inline(rtl_arena_freelist_remove) 295 #endif /* __SUNPRO_C */ 296 297 298 /* ================================================================= */ 299 300 /** RTL_ARENA_HASH_INDEX() 301 */ 302 #define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \ 303 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m)) 304 305 #define RTL_ARENA_HASH_INDEX(arena, addr) \ 306 RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1)) 307 308 /** rtl_arena_hash_rescale() 309 * 310 * @precond arena->m_lock released. 311 */ 312 static void 313 rtl_arena_hash_rescale ( 314 rtl_arena_type * arena, 315 sal_Size new_size 316 ) 317 { 318 rtl_arena_segment_type ** new_table; 319 sal_Size new_bytes; 320 321 new_bytes = new_size * sizeof(rtl_arena_segment_type*); 322 new_table = (rtl_arena_segment_type **)rtl_arena_alloc (gp_arena_arena, &new_bytes); 323 324 if (new_table != 0) 325 { 326 rtl_arena_segment_type ** old_table; 327 sal_Size old_size, i; 328 329 memset (new_table, 0, new_bytes); 330 331 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock)); 332 333 old_table = arena->m_hash_table; 334 old_size = arena->m_hash_size; 335 336 OSL_TRACE( 337 "rtl_arena_hash_rescale(\"%s\"): " 338 "nseg: %"PRIu64" (ave: %"PRIu64"), frees: %"PRIu64" " 339 "[old_size: %lu, new_size: %lu]", 340 arena->m_name, 341 arena->m_stats.m_alloc - arena->m_stats.m_free, 342 (arena->m_stats.m_alloc - arena->m_stats.m_free) >> arena->m_hash_shift, 343 arena->m_stats.m_free, 344 old_size, new_size 345 ); 346 347 #if 0 /* DBG */ 348 for (i = 0; i < arena->m_hash_size; i++) 349 { 350 sal_Size k = 0; rtl_arena_segment_type ** segpp = &(arena->m_hash_table[i]); 351 while (*segpp) 352 { 353 k += 1; 354 segpp = &((*segpp)->m_fnext); 355 } 356 fprintf(stdout, "%d, ", k); 357 } 358 fprintf(stdout, "\n"); 359 #endif /* DBG */ 360 361 arena->m_hash_table = new_table; 362 arena->m_hash_size = new_size; 363 arena->m_hash_shift = highbit(arena->m_hash_size) - 1; 364 365 for (i = 0; i < old_size; i++) 366 { 367 rtl_arena_segment_type * curr = old_table[i]; 368 while (curr != 0) 369 { 370 rtl_arena_segment_type * next = curr->m_fnext; 371 rtl_arena_segment_type ** head; 372 373 head = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, curr->m_addr)]); 374 curr->m_fnext = (*head); 375 (*head) = curr; 376 377 curr = next; 378 } 379 old_table[i] = 0; 380 } 381 382 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 383 384 if (old_table != arena->m_hash_table_0) 385 { 386 sal_Size old_bytes = old_size * sizeof(rtl_arena_segment_type*); 387 rtl_arena_free (gp_arena_arena, old_table, old_bytes); 388 } 389 } 390 } 391 392 393 /** rtl_arena_hash_insert() 394 * ...and update stats. 395 */ 396 static RTL_MEMORY_INLINE void 397 rtl_arena_hash_insert ( 398 rtl_arena_type * arena, 399 rtl_arena_segment_type * segment 400 ) 401 { 402 rtl_arena_segment_type ** ppSegment; 403 404 ppSegment = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, segment->m_addr)]); 405 406 segment->m_fnext = (*ppSegment); 407 (*ppSegment) = segment; 408 409 arena->m_stats.m_alloc += 1; 410 arena->m_stats.m_mem_alloc += segment->m_size; 411 } 412 413 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) 414 #pragma inline(rtl_arena_hash_insert) 415 #endif /* __SUNPRO_C */ 416 417 418 /** rtl_arena_hash_remove() 419 * ...and update stats. 420 */ 421 static rtl_arena_segment_type * 422 rtl_arena_hash_remove ( 423 rtl_arena_type * arena, 424 sal_uIntPtr addr, 425 sal_Size size 426 ) 427 { 428 rtl_arena_segment_type *segment, **segpp; 429 sal_Size lookups = 0; 430 431 #if OSL_DEBUG_LEVEL == 0 432 (void) size; /* unused */ 433 #endif /* OSL_DEBUG_LEVEL */ 434 435 segpp = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, addr)]); 436 while ((segment = *segpp) != 0) 437 { 438 if (segment->m_addr == addr) 439 { 440 *segpp = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment; 441 break; 442 } 443 444 /* update lookup miss stats */ 445 lookups += 1; 446 segpp = &(segment->m_fnext); 447 } 448 449 OSL_POSTCOND(segment != 0, "rtl_arena_hash_remove(): bad free."); 450 if (segment != 0) 451 { 452 OSL_POSTCOND(segment->m_size == size, "rtl_arena_hash_remove(): wrong size."); 453 454 arena->m_stats.m_free += 1; 455 arena->m_stats.m_mem_alloc -= segment->m_size; 456 457 if (lookups > 1) 458 { 459 sal_Size nseg = (sal_Size)(arena->m_stats.m_alloc - arena->m_stats.m_free); 460 if (nseg > 4 * arena->m_hash_size) 461 { 462 if (!(arena->m_flags & RTL_ARENA_FLAG_RESCALE)) 463 { 464 sal_Size ave = nseg >> arena->m_hash_shift; 465 sal_Size new_size = arena->m_hash_size << (highbit(ave) - 1); 466 467 arena->m_flags |= RTL_ARENA_FLAG_RESCALE; 468 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 469 rtl_arena_hash_rescale (arena, new_size); 470 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock)); 471 arena->m_flags &= ~RTL_ARENA_FLAG_RESCALE; 472 } 473 } 474 } 475 } 476 477 return (segment); 478 } 479 480 /* ================================================================= */ 481 482 /** rtl_arena_segment_alloc() 483 * allocate (and remove) segment from freelist 484 * 485 * @precond arena->m_lock acquired 486 * @precond (*ppSegment == 0) 487 */ 488 static int 489 rtl_arena_segment_alloc ( 490 rtl_arena_type * arena, 491 sal_Size size, 492 rtl_arena_segment_type ** ppSegment 493 ) 494 { 495 int index = 0; 496 497 OSL_ASSERT(*ppSegment == 0); 498 if (!RTL_MEMORY_ISP2(size)) 499 { 500 int msb = highbit(size); 501 if (RTL_ARENA_FREELIST_SIZE == SAL_INT_CAST(size_t, msb)) 502 { 503 /* highest possible freelist: fall back to first fit */ 504 rtl_arena_segment_type *head, *segment; 505 506 head = &(arena->m_freelist_head[msb - 1]); 507 for (segment = head->m_fnext; segment != head; segment = segment->m_fnext) 508 { 509 if (segment->m_size >= size) 510 { 511 /* allocate first fit segment */ 512 (*ppSegment) = segment; 513 break; 514 } 515 } 516 goto dequeue_and_leave; 517 } 518 519 /* roundup to next power of 2 */ 520 size = (1UL << msb); 521 } 522 523 index = lowbit(RTL_MEMORY_P2ALIGN(arena->m_freelist_bitmap, size)); 524 if (index > 0) 525 { 526 /* instant fit: allocate first free segment */ 527 rtl_arena_segment_type *head; 528 529 head = &(arena->m_freelist_head[index - 1]); 530 (*ppSegment) = head->m_fnext; 531 OSL_ASSERT((*ppSegment) != head); 532 } 533 534 dequeue_and_leave: 535 if (*ppSegment != 0) 536 { 537 /* remove from freelist */ 538 rtl_arena_freelist_remove (arena, (*ppSegment)); 539 } 540 return (*ppSegment != 0); 541 } 542 543 544 /** rtl_arena_segment_create() 545 * import new (span) segment from source arena 546 * 547 * @precond arena->m_lock acquired 548 * @precond (*ppSegment == 0) 549 */ 550 static int 551 rtl_arena_segment_create ( 552 rtl_arena_type * arena, 553 sal_Size size, 554 rtl_arena_segment_type ** ppSegment 555 ) 556 { 557 OSL_ASSERT((*ppSegment) == 0); 558 if (arena->m_source_alloc != 0) 559 { 560 rtl_arena_segment_get (arena, ppSegment); 561 if (*ppSegment != 0) 562 { 563 rtl_arena_segment_type * span = 0; 564 rtl_arena_segment_get (arena, &span); 565 if (span != 0) 566 { 567 /* import new span from source arena */ 568 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 569 570 span->m_size = size; 571 span->m_addr = (sal_uIntPtr)(arena->m_source_alloc)( 572 arena->m_source_arena, &(span->m_size)); 573 574 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock)); 575 if (span->m_addr != 0) 576 { 577 /* insert onto segment list, update stats */ 578 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN; 579 QUEUE_INSERT_HEAD_NAMED(&(arena->m_segment_head), span, s); 580 arena->m_stats.m_mem_total += span->m_size; 581 582 (*ppSegment)->m_addr = span->m_addr; 583 (*ppSegment)->m_size = span->m_size; 584 (*ppSegment)->m_type = RTL_ARENA_SEGMENT_TYPE_FREE; 585 QUEUE_INSERT_HEAD_NAMED(span, (*ppSegment), s); 586 587 /* report success */ 588 return (1); 589 } 590 rtl_arena_segment_put (arena, &span); 591 } 592 rtl_arena_segment_put (arena, ppSegment); 593 } 594 } 595 return (0); 596 } 597 598 599 /** rtl_arena_segment_coalesce() 600 * mark as free and join with adjacent free segment(s) 601 * 602 * @precond arena->m_lock acquired 603 * @precond segment marked 'used' 604 */ 605 static void 606 rtl_arena_segment_coalesce ( 607 rtl_arena_type * arena, 608 rtl_arena_segment_type * segment 609 ) 610 { 611 rtl_arena_segment_type *next, *prev; 612 613 /* mark segment free */ 614 OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_USED); 615 segment->m_type = RTL_ARENA_SEGMENT_TYPE_FREE; 616 617 /* try to merge w/ next segment */ 618 next = segment->m_snext; 619 if (next->m_type == RTL_ARENA_SEGMENT_TYPE_FREE) 620 { 621 OSL_ASSERT(segment->m_addr + segment->m_size == next->m_addr); 622 segment->m_size += next->m_size; 623 624 /* remove from freelist */ 625 rtl_arena_freelist_remove (arena, next); 626 627 /* remove from segment list */ 628 QUEUE_REMOVE_NAMED(next, s); 629 630 /* release segment descriptor */ 631 rtl_arena_segment_put (arena, &next); 632 } 633 634 /* try to merge w/ prev segment */ 635 prev = segment->m_sprev; 636 if (prev->m_type == RTL_ARENA_SEGMENT_TYPE_FREE) 637 { 638 OSL_ASSERT(prev->m_addr + prev->m_size == segment->m_addr); 639 segment->m_addr = prev->m_addr; 640 segment->m_size += prev->m_size; 641 642 /* remove from freelist */ 643 rtl_arena_freelist_remove (arena, prev); 644 645 /* remove from segment list */ 646 QUEUE_REMOVE_NAMED(prev, s); 647 648 /* release segment descriptor */ 649 rtl_arena_segment_put (arena, &prev); 650 } 651 } 652 653 /* ================================================================= */ 654 655 /** rtl_arena_constructor() 656 */ 657 static void 658 rtl_arena_constructor (void * obj) 659 { 660 rtl_arena_type * arena = (rtl_arena_type*)(obj); 661 rtl_arena_segment_type * head; 662 size_t i; 663 664 memset (arena, 0, sizeof(rtl_arena_type)); 665 666 QUEUE_START_NAMED(arena, arena_); 667 668 (void) RTL_MEMORY_LOCK_INIT(&(arena->m_lock)); 669 670 head = &(arena->m_segment_reserve_span_head); 671 rtl_arena_segment_constructor (head); 672 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD; 673 674 head = &(arena->m_segment_reserve_head); 675 rtl_arena_segment_constructor (head); 676 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD; 677 678 head = &(arena->m_segment_head); 679 rtl_arena_segment_constructor (head); 680 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD; 681 682 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++) 683 { 684 head = &(arena->m_freelist_head[i]); 685 rtl_arena_segment_constructor (head); 686 687 head->m_size = (1UL << i); 688 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD; 689 } 690 691 arena->m_hash_table = arena->m_hash_table_0; 692 arena->m_hash_size = RTL_ARENA_HASH_SIZE; 693 arena->m_hash_shift = highbit(arena->m_hash_size) - 1; 694 } 695 696 697 /** rtl_arena_destructor() 698 */ 699 static void 700 rtl_arena_destructor (void * obj) 701 { 702 rtl_arena_type * arena = (rtl_arena_type*)(obj); 703 rtl_arena_segment_type * head; 704 size_t i; 705 706 OSL_ASSERT(QUEUE_STARTED_NAMED(arena, arena_)); 707 708 RTL_MEMORY_LOCK_DESTROY(&(arena->m_lock)); 709 710 head = &(arena->m_segment_reserve_span_head); 711 OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD); 712 rtl_arena_segment_destructor (head); 713 714 head = &(arena->m_segment_reserve_head); 715 OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD); 716 rtl_arena_segment_destructor (head); 717 718 head = &(arena->m_segment_head); 719 OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD); 720 rtl_arena_segment_destructor (head); 721 722 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++) 723 { 724 head = &(arena->m_freelist_head[i]); 725 726 OSL_ASSERT(head->m_size == (1UL << i)); 727 OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD); 728 729 rtl_arena_segment_destructor (head); 730 } 731 732 OSL_ASSERT(arena->m_hash_table == arena->m_hash_table_0); 733 OSL_ASSERT(arena->m_hash_size == RTL_ARENA_HASH_SIZE); 734 OSL_ASSERT( 735 arena->m_hash_shift == 736 SAL_INT_CAST(unsigned, highbit(arena->m_hash_size) - 1)); 737 } 738 739 /* ================================================================= */ 740 741 /** rtl_arena_activate() 742 */ 743 static rtl_arena_type * 744 rtl_arena_activate ( 745 rtl_arena_type * arena, 746 const char * name, 747 sal_Size quantum, 748 sal_Size quantum_cache_max, 749 rtl_arena_type * source_arena, 750 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *), 751 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size) 752 ) 753 { 754 OSL_ASSERT(arena != 0); 755 if (arena != 0) 756 { 757 (void) snprintf (arena->m_name, sizeof(arena->m_name), "%s", name); 758 759 if (!RTL_MEMORY_ISP2(quantum)) 760 { 761 /* roundup to next power of 2 */ 762 quantum = (1UL << highbit(quantum)); 763 } 764 quantum_cache_max = RTL_MEMORY_P2ROUNDUP(quantum_cache_max, quantum); 765 766 arena->m_quantum = quantum; 767 arena->m_quantum_shift = highbit(arena->m_quantum) - 1; 768 arena->m_qcache_max = quantum_cache_max; 769 770 arena->m_source_arena = source_arena; 771 arena->m_source_alloc = source_alloc; 772 arena->m_source_free = source_free; 773 774 if (arena->m_qcache_max > 0) 775 { 776 char name[RTL_ARENA_NAME_LENGTH + 1]; 777 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift); 778 779 sal_Size size = n * sizeof(rtl_cache_type*); 780 arena->m_qcache_ptr = (rtl_cache_type**)rtl_arena_alloc (gp_arena_arena, &size); 781 if (!(arena->m_qcache_ptr)) 782 { 783 /* out of memory */ 784 return (0); 785 } 786 for (i = 1; i <= n; i++) 787 { 788 size = i * arena->m_quantum; 789 (void) snprintf (name, sizeof(name), "%s_%lu", arena->m_name, size); 790 arena->m_qcache_ptr[i - 1] = rtl_cache_create(name, size, 0, NULL, NULL, NULL, NULL, arena, RTL_CACHE_FLAG_QUANTUMCACHE); 791 } 792 } 793 794 /* insert into arena list */ 795 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock)); 796 QUEUE_INSERT_TAIL_NAMED(&(g_arena_list.m_arena_head), arena, arena_); 797 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock)); 798 } 799 return (arena); 800 } 801 802 /** rtl_arena_deactivate() 803 */ 804 static void 805 rtl_arena_deactivate ( 806 rtl_arena_type * arena 807 ) 808 { 809 rtl_arena_segment_type * head, * segment; 810 811 /* remove from arena list */ 812 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock)); 813 QUEUE_REMOVE_NAMED(arena, arena_); 814 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock)); 815 816 /* cleanup quantum cache(s) */ 817 if ((arena->m_qcache_max > 0) && (arena->m_qcache_ptr != 0)) 818 { 819 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift); 820 for (i = 1; i <= n; i++) 821 { 822 if (arena->m_qcache_ptr[i - 1] != 0) 823 { 824 rtl_cache_destroy (arena->m_qcache_ptr[i - 1]); 825 arena->m_qcache_ptr[i - 1] = 0; 826 } 827 } 828 rtl_arena_free ( 829 gp_arena_arena, 830 arena->m_qcache_ptr, 831 n * sizeof(rtl_cache_type*)); 832 833 arena->m_qcache_ptr = 0; 834 } 835 836 /* check for leaked segments */ 837 OSL_TRACE( 838 "rtl_arena_deactivate(\"%s\"): " 839 "allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu", 840 arena->m_name, 841 arena->m_stats.m_alloc, arena->m_stats.m_free, 842 arena->m_stats.m_mem_total, arena->m_stats.m_mem_alloc 843 ); 844 if (arena->m_stats.m_alloc > arena->m_stats.m_free) 845 { 846 sal_Size i, n; 847 848 OSL_TRACE( 849 "rtl_arena_deactivate(\"%s\"): " 850 "cleaning up %"PRIu64" leaked segment(s) [%lu bytes]", 851 arena->m_name, 852 arena->m_stats.m_alloc - arena->m_stats.m_free, 853 arena->m_stats.m_mem_alloc 854 ); 855 856 /* cleanup still used segment(s) */ 857 for (i = 0, n = arena->m_hash_size; i < n; i++) 858 { 859 while ((segment = arena->m_hash_table[i]) != 0) 860 { 861 /* pop from hash table */ 862 arena->m_hash_table[i] = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment; 863 864 /* coalesce w/ adjacent free segment(s) */ 865 rtl_arena_segment_coalesce (arena, segment); 866 867 /* insert onto freelist */ 868 rtl_arena_freelist_insert (arena, segment); 869 } 870 } 871 } 872 873 /* cleanup hash table */ 874 if (arena->m_hash_table != arena->m_hash_table_0) 875 { 876 rtl_arena_free ( 877 gp_arena_arena, 878 arena->m_hash_table, 879 arena->m_hash_size * sizeof(rtl_arena_segment_type*)); 880 881 arena->m_hash_table = arena->m_hash_table_0; 882 arena->m_hash_size = RTL_ARENA_HASH_SIZE; 883 arena->m_hash_shift = highbit(arena->m_hash_size) - 1; 884 } 885 886 /* cleanup segment list */ 887 head = &(arena->m_segment_head); 888 for (segment = head->m_snext; segment != head; segment = head->m_snext) 889 { 890 if (segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE) 891 { 892 /* remove from freelist */ 893 rtl_arena_freelist_remove (arena, segment); 894 } 895 else 896 { 897 /* can have only free and span segments here */ 898 OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN); 899 } 900 901 /* remove from segment list */ 902 QUEUE_REMOVE_NAMED(segment, s); 903 904 /* release segment descriptor */ 905 rtl_arena_segment_put (arena, &segment); 906 } 907 908 /* cleanup segment reserve list */ 909 head = &(arena->m_segment_reserve_head); 910 for (segment = head->m_snext; segment != head; segment = head->m_snext) 911 { 912 /* remove from segment list */ 913 QUEUE_REMOVE_NAMED(segment, s); 914 } 915 916 /* cleanup segment reserve span(s) */ 917 head = &(arena->m_segment_reserve_span_head); 918 for (segment = head->m_snext; segment != head; segment = head->m_snext) 919 { 920 /* can have only span segments here */ 921 OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN); 922 923 /* remove from segment list */ 924 QUEUE_REMOVE_NAMED(segment, s); 925 926 /* return span to g_machdep_arena */ 927 rtl_machdep_free (gp_machdep_arena, (void*)(segment->m_addr), segment->m_size); 928 } 929 } 930 931 /* ================================================================= * 932 * 933 * arena implementation. 934 * 935 * ================================================================= */ 936 937 /** rtl_arena_create() 938 */ 939 rtl_arena_type * 940 SAL_CALL rtl_arena_create ( 941 const char * name, 942 sal_Size quantum, 943 sal_Size quantum_cache_max, 944 rtl_arena_type * source_arena, 945 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *), 946 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size), 947 int flags 948 ) SAL_THROW_EXTERN_C() 949 { 950 rtl_arena_type * result = 0; 951 sal_Size size = sizeof(rtl_arena_type); 952 953 (void) flags; /* unused */ 954 955 try_alloc: 956 result = (rtl_arena_type*)rtl_arena_alloc (gp_arena_arena, &size); 957 if (result != 0) 958 { 959 rtl_arena_type * arena = result; 960 VALGRIND_CREATE_MEMPOOL(arena, 0, 0); 961 rtl_arena_constructor (arena); 962 963 if (!source_arena) 964 { 965 OSL_ASSERT(gp_default_arena != 0); 966 source_arena = gp_default_arena; 967 } 968 969 result = rtl_arena_activate ( 970 arena, 971 name, 972 quantum, 973 quantum_cache_max, 974 source_arena, 975 source_alloc, 976 source_free 977 ); 978 979 if (result == 0) 980 { 981 rtl_arena_deactivate (arena); 982 rtl_arena_destructor (arena); 983 VALGRIND_DESTROY_MEMPOOL(arena); 984 rtl_arena_free (gp_arena_arena, arena, size); 985 } 986 } 987 else if (gp_arena_arena == 0) 988 { 989 if (rtl_arena_init()) 990 { 991 /* try again */ 992 goto try_alloc; 993 } 994 } 995 return (result); 996 } 997 998 /** rtl_arena_destroy() 999 */ 1000 void 1001 SAL_CALL rtl_arena_destroy ( 1002 rtl_arena_type * arena 1003 ) 1004 { 1005 if (arena != 0) 1006 { 1007 rtl_arena_deactivate (arena); 1008 rtl_arena_destructor (arena); 1009 VALGRIND_DESTROY_MEMPOOL(arena); 1010 rtl_arena_free (gp_arena_arena, arena, sizeof(rtl_arena_type)); 1011 } 1012 } 1013 1014 /** rtl_arena_alloc() 1015 */ 1016 void * 1017 SAL_CALL rtl_arena_alloc ( 1018 rtl_arena_type * arena, 1019 sal_Size * pSize 1020 ) SAL_THROW_EXTERN_C() 1021 { 1022 void * addr = 0; 1023 1024 if ((arena != 0) && (pSize != 0)) 1025 { 1026 sal_Size size = RTL_MEMORY_ALIGN((*pSize), arena->m_quantum); 1027 if (size > arena->m_qcache_max) 1028 { 1029 /* allocate from segment list */ 1030 rtl_arena_segment_type *segment = 0; 1031 1032 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock)); 1033 if (rtl_arena_segment_alloc (arena, size, &segment) || 1034 rtl_arena_segment_create(arena, size, &segment) ) 1035 { 1036 /* shrink to fit */ 1037 sal_Size oversize; 1038 1039 /* mark segment used */ 1040 OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE); 1041 segment->m_type = RTL_ARENA_SEGMENT_TYPE_USED; 1042 1043 /* resize */ 1044 OSL_ASSERT(segment->m_size >= size); 1045 oversize = segment->m_size - size; 1046 if (oversize >= SAL_MAX(arena->m_quantum, arena->m_qcache_max)) 1047 { 1048 rtl_arena_segment_type * remainder = 0; 1049 rtl_arena_segment_get (arena, &remainder); 1050 if (remainder != 0) 1051 { 1052 segment->m_size = size; 1053 1054 remainder->m_addr = segment->m_addr + segment->m_size; 1055 remainder->m_size = oversize; 1056 remainder->m_type = RTL_ARENA_SEGMENT_TYPE_FREE; 1057 QUEUE_INSERT_HEAD_NAMED(segment, remainder, s); 1058 1059 rtl_arena_freelist_insert (arena, remainder); 1060 } 1061 } 1062 1063 rtl_arena_hash_insert (arena, segment); 1064 1065 /* DEBUG ONLY: mark allocated, undefined */ 1066 OSL_DEBUG_ONLY(memset((void*)(segment->m_addr), 0x77777777, segment->m_size)); 1067 VALGRIND_MEMPOOL_ALLOC(arena, segment->m_addr, segment->m_size); 1068 1069 (*pSize) = segment->m_size; 1070 addr = (void*)(segment->m_addr); 1071 } 1072 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 1073 } 1074 else if (size > 0) 1075 { 1076 /* allocate from quantum cache(s) */ 1077 int index = (size >> arena->m_quantum_shift) - 1; 1078 OSL_ASSERT (arena->m_qcache_ptr[index] != 0); 1079 1080 addr = rtl_cache_alloc (arena->m_qcache_ptr[index]); 1081 if (addr != 0) 1082 (*pSize) = size; 1083 } 1084 } 1085 return (addr); 1086 } 1087 1088 /** rtl_arena_free() 1089 */ 1090 void 1091 SAL_CALL rtl_arena_free ( 1092 rtl_arena_type * arena, 1093 void * addr, 1094 sal_Size size 1095 ) SAL_THROW_EXTERN_C() 1096 { 1097 if (arena != 0) 1098 { 1099 size = RTL_MEMORY_ALIGN(size, arena->m_quantum); 1100 if (size > arena->m_qcache_max) 1101 { 1102 /* free to segment list */ 1103 rtl_arena_segment_type * segment; 1104 1105 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock)); 1106 1107 segment = rtl_arena_hash_remove (arena, (sal_uIntPtr)(addr), size); 1108 if (segment != 0) 1109 { 1110 rtl_arena_segment_type *next, *prev; 1111 1112 /* DEBUG ONLY: mark unallocated, undefined */ 1113 VALGRIND_MEMPOOL_FREE(arena, segment->m_addr); 1114 /* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(segment->m_addr, segment->m_size); 1115 OSL_DEBUG_ONLY(memset((void*)(segment->m_addr), 0x33333333, segment->m_size)); 1116 1117 /* coalesce w/ adjacent free segment(s) */ 1118 rtl_arena_segment_coalesce (arena, segment); 1119 1120 /* determine (new) next and prev segment */ 1121 next = segment->m_snext, prev = segment->m_sprev; 1122 1123 /* entire span free when prev is a span, and next is either a span or a list head */ 1124 if (((prev->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)) && 1125 ((next->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN) || 1126 (next->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD)) ) 1127 { 1128 OSL_ASSERT((prev->m_addr == segment->m_addr) && 1129 (prev->m_size == segment->m_size) ); 1130 1131 if (arena->m_source_free) 1132 { 1133 addr = (void*)(prev->m_addr); 1134 size = prev->m_size; 1135 1136 /* remove from segment list */ 1137 QUEUE_REMOVE_NAMED(segment, s); 1138 1139 /* release segment descriptor */ 1140 rtl_arena_segment_put (arena, &segment); 1141 1142 /* remove from segment list */ 1143 QUEUE_REMOVE_NAMED(prev, s); 1144 1145 /* release (span) segment descriptor */ 1146 rtl_arena_segment_put (arena, &prev); 1147 1148 /* update stats, return span to source arena */ 1149 arena->m_stats.m_mem_total -= size; 1150 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 1151 1152 (arena->m_source_free)(arena->m_source_arena, addr, size); 1153 return; 1154 } 1155 } 1156 1157 /* insert onto freelist */ 1158 rtl_arena_freelist_insert (arena, segment); 1159 } 1160 1161 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock)); 1162 } 1163 else if (size > 0) 1164 { 1165 /* free to quantum cache(s) */ 1166 int index = (size >> arena->m_quantum_shift) - 1; 1167 OSL_ASSERT (arena->m_qcache_ptr[index] != 0); 1168 1169 rtl_cache_free (arena->m_qcache_ptr[index], addr); 1170 } 1171 } 1172 } 1173 1174 /* ================================================================= * 1175 * 1176 * machdep internals. 1177 * 1178 * ================================================================= */ 1179 1180 #if defined(SAL_UNX) 1181 #include <sys/mman.h> 1182 #elif defined(SAL_W32) || defined(SAL_OS2) 1183 #define MAP_FAILED 0 1184 #endif /* SAL_UNX || SAL_W32 */ 1185 1186 /** rtl_machdep_alloc() 1187 */ 1188 static void * 1189 SAL_CALL rtl_machdep_alloc ( 1190 rtl_arena_type * pArena, 1191 sal_Size * pSize 1192 ) 1193 { 1194 void * addr; 1195 sal_Size size = (*pSize); 1196 1197 OSL_PRECOND(pArena == gp_machdep_arena, "rtl_machdep_alloc(): invalid argument"); 1198 1199 #if defined(SOLARIS) && defined(SPARC) 1200 /* see @ mmap(2) man pages */ 1201 size += (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */ 1202 if (size > (4 << 20)) 1203 size = RTL_MEMORY_P2ROUNDUP(size, (4 << 20)); 1204 else if (size > (512 << 10)) 1205 size = RTL_MEMORY_P2ROUNDUP(size, (512 << 10)); 1206 else 1207 size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10)); 1208 size -= (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */ 1209 #else 1210 /* default allocation granularity */ 1211 size = RTL_MEMORY_P2ROUNDUP(size, SAL_MAX(pArena->m_quantum, 64 << 10)); 1212 #endif 1213 1214 #if defined(SAL_UNX) 1215 addr = mmap (NULL, (size_t)(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1216 #elif defined(SAL_W32) 1217 addr = VirtualAlloc (NULL, (SIZE_T)(size), MEM_COMMIT, PAGE_READWRITE); 1218 #elif defined(SAL_OS2) 1219 { 1220 APIRET rc; 1221 addr = 0; 1222 // Use DosAlloc* to get a 4KB page aligned address. 1223 rc = DosAllocMem( &addr, size, PAG_COMMIT | PAG_READ | PAG_WRITE | OBJ_ANY); 1224 if (rc) { 1225 fprintf( stderr, "sal3::DosAllocMem failed rc=%d\n", rc); 1226 addr = 0; 1227 } 1228 } 1229 #endif /* (SAL_UNX || SAL_W32 || SAL_OS2) */ 1230 1231 if (addr != MAP_FAILED) 1232 { 1233 pArena->m_stats.m_alloc += 1; 1234 pArena->m_stats.m_mem_total += size; 1235 pArena->m_stats.m_mem_alloc += size; 1236 1237 (*pSize) = size; 1238 return (addr); 1239 } 1240 return (NULL); 1241 } 1242 1243 /** rtl_machdep_free() 1244 */ 1245 static void 1246 SAL_CALL rtl_machdep_free ( 1247 rtl_arena_type * pArena, 1248 void * pAddr, 1249 sal_Size nSize 1250 ) 1251 { 1252 OSL_PRECOND(pArena == gp_machdep_arena, "rtl_machdep_free(): invalid argument"); 1253 1254 pArena->m_stats.m_free += 1; 1255 pArena->m_stats.m_mem_total -= nSize; 1256 pArena->m_stats.m_mem_alloc -= nSize; 1257 1258 #if defined(SAL_UNX) 1259 (void) munmap(pAddr, nSize); 1260 #elif defined(SAL_W32) 1261 (void) VirtualFree ((LPVOID)(pAddr), (SIZE_T)(0), MEM_RELEASE); 1262 #elif defined(SAL_OS2) 1263 (void) DosFreeMem( pAddr); 1264 #endif /* (SAL_UNX || SAL_W32) */ 1265 } 1266 1267 /** rtl_machdep_pagesize() 1268 */ 1269 static sal_Size 1270 rtl_machdep_pagesize (void) 1271 { 1272 #if defined(SAL_UNX) 1273 #if defined(FREEBSD) || defined(NETBSD) 1274 return ((sal_Size)getpagesize()); 1275 #else /* POSIX */ 1276 return ((sal_Size)sysconf(_SC_PAGESIZE)); 1277 #endif /* xBSD || POSIX */ 1278 #elif defined(SAL_W32) 1279 SYSTEM_INFO info; 1280 GetSystemInfo (&info); 1281 return ((sal_Size)(info.dwPageSize)); 1282 #elif defined(SAL_OS2) 1283 ULONG ulPageSize; 1284 DosQuerySysInfo(QSV_PAGE_SIZE, QSV_PAGE_SIZE, &ulPageSize, sizeof(ULONG)); 1285 return ((sal_Size)ulPageSize); 1286 #endif /* (SAL_UNX || SAL_W32) */ 1287 } 1288 1289 /* ================================================================= * 1290 * 1291 * arena initialization. 1292 * 1293 * ================================================================= */ 1294 1295 static void 1296 rtl_arena_once_init (void) 1297 { 1298 { 1299 /* list of arenas */ 1300 RTL_MEMORY_LOCK_INIT(&(g_arena_list.m_lock)); 1301 rtl_arena_constructor (&(g_arena_list.m_arena_head)); 1302 } 1303 { 1304 /* machdep (pseudo) arena */ 1305 static rtl_arena_type g_machdep_arena; 1306 1307 OSL_ASSERT(gp_machdep_arena == 0); 1308 VALGRIND_CREATE_MEMPOOL(&g_machdep_arena, 0, 0); 1309 rtl_arena_constructor (&g_machdep_arena); 1310 1311 gp_machdep_arena = rtl_arena_activate ( 1312 &g_machdep_arena, 1313 "rtl_machdep_arena", 1314 rtl_machdep_pagesize(), 1315 0, /* no quantum caching */ 1316 0, 0, 0 /* no source */ 1317 ); 1318 OSL_ASSERT(gp_machdep_arena != 0); 1319 } 1320 { 1321 /* default arena */ 1322 static rtl_arena_type g_default_arena; 1323 1324 OSL_ASSERT(gp_default_arena == 0); 1325 VALGRIND_CREATE_MEMPOOL(&g_default_arena, 0, 0); 1326 rtl_arena_constructor (&g_default_arena); 1327 1328 gp_default_arena = rtl_arena_activate ( 1329 &g_default_arena, 1330 "rtl_default_arena", 1331 rtl_machdep_pagesize(), 1332 0, /* no quantum caching */ 1333 gp_machdep_arena, /* source */ 1334 rtl_machdep_alloc, 1335 rtl_machdep_free 1336 ); 1337 OSL_ASSERT(gp_default_arena != 0); 1338 } 1339 { 1340 /* arena internal arena */ 1341 static rtl_arena_type g_arena_arena; 1342 1343 OSL_ASSERT(gp_arena_arena == 0); 1344 VALGRIND_CREATE_MEMPOOL(&g_arena_arena, 0, 0); 1345 rtl_arena_constructor (&g_arena_arena); 1346 1347 gp_arena_arena = rtl_arena_activate ( 1348 &g_arena_arena, 1349 "rtl_arena_internal_arena", 1350 64, /* quantum */ 1351 0, /* no quantum caching */ 1352 gp_default_arena, /* source */ 1353 rtl_arena_alloc, 1354 rtl_arena_free 1355 ); 1356 OSL_ASSERT(gp_arena_arena != 0); 1357 } 1358 } 1359 1360 static int 1361 rtl_arena_init (void) 1362 { 1363 static sal_once_type g_once = SAL_ONCE_INIT; 1364 SAL_ONCE(&g_once, rtl_arena_once_init); 1365 return (gp_arena_arena != 0); 1366 } 1367 1368 /* ================================================================= */ 1369 1370 /* 1371 Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388 1372 1373 Mac OS X does not seem to support "__cxa__atexit", thus leading 1374 to the situation that "__attribute__((destructor))__" functions 1375 (in particular "rtl_{memory|cache|arena}_fini") become called 1376 _before_ global C++ object d'tors. 1377 1378 Delegated the call to "rtl_arena_fini()" into a dummy C++ object, 1379 see alloc_fini.cxx . 1380 */ 1381 #if defined(__GNUC__) && !defined(MACOSX) 1382 static void rtl_arena_fini (void) __attribute__((destructor)); 1383 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) 1384 #pragma fini(rtl_arena_fini) 1385 static void rtl_arena_fini (void); 1386 #endif /* __GNUC__ || __SUNPRO_C */ 1387 1388 void 1389 rtl_arena_fini (void) 1390 { 1391 if (gp_arena_arena != 0) 1392 { 1393 rtl_arena_type * arena, * head; 1394 1395 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock)); 1396 head = &(g_arena_list.m_arena_head); 1397 1398 for (arena = head->m_arena_next; arena != head; arena = arena->m_arena_next) 1399 { 1400 OSL_TRACE( 1401 "rtl_arena_fini(\"%s\"): " 1402 "allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu", 1403 arena->m_name, 1404 arena->m_stats.m_alloc, arena->m_stats.m_free, 1405 arena->m_stats.m_mem_total, arena->m_stats.m_mem_alloc 1406 ); 1407 } 1408 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock)); 1409 } 1410 } 1411 1412 /* ================================================================= */ 1413