xref: /aoo41x/main/sal/rtl/source/alloc_cache.c (revision cdf0e10c)
1 /*************************************************************************
2  *
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * Copyright 2000, 2010 Oracle and/or its affiliates.
6  *
7  * OpenOffice.org - a multi-platform office productivity suite
8  *
9  * This file is part of OpenOffice.org.
10  *
11  * OpenOffice.org is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU Lesser General Public License version 3
13  * only, as published by the Free Software Foundation.
14  *
15  * OpenOffice.org is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License version 3 for more details
19  * (a copy is included in the LICENSE file that accompanied this code).
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * version 3 along with OpenOffice.org.  If not, see
23  * <http://www.openoffice.org/license.html>
24  * for a copy of the LGPLv3 License.
25  *
26  ************************************************************************/
27 
28 #include "alloc_cache.h"
29 #include "alloc_impl.h"
30 #include "alloc_arena.h"
31 #include "internal/once.h"
32 #include "sal/macros.h"
33 #include "osl/diagnose.h"
34 
35 #ifndef INCLUDED_STRING_H
36 #include <string.h>
37 #endif
38 
39 #ifndef INCLUDED_STDIO_H
40 #include <stdio.h>
41 #endif
42 
43 #ifdef OS2
44 #undef OSL_TRACE
45 #define OSL_TRACE                  1 ? ((void)0) : _OSL_GLOBAL osl_trace
46 #endif
47 
48 /* ================================================================= *
49  *
50  * cache internals.
51  *
52  * ================================================================= */
53 
54 /** g_cache_list
55  *  @internal
56  */
57 struct rtl_cache_list_st
58 {
59 	rtl_memory_lock_type m_lock;
60 	rtl_cache_type       m_cache_head;
61 
62 #if defined(SAL_UNX) || defined(SAL_OS2)
63 	pthread_t            m_update_thread;
64 	pthread_cond_t       m_update_cond;
65 #elif defined(SAL_W32)
66 	HANDLE               m_update_thread;
67 	HANDLE               m_update_cond;
68 #endif /* SAL_UNX || SAL_W32 */
69 	int                  m_update_done;
70 };
71 
72 static struct rtl_cache_list_st g_cache_list;
73 
74 
75 /** gp_cache_arena
76  *  provided for cache_type allocations, and hash_table resizing.
77  *
78  *  @internal
79  */
80 static rtl_arena_type * gp_cache_arena = 0;
81 
82 
83 /** gp_cache_magazine_cache
84  *  @internal
85  */
86 static rtl_cache_type * gp_cache_magazine_cache = 0;
87 
88 
89 /** gp_cache_slab_cache
90  *  @internal
91  */
92 static rtl_cache_type * gp_cache_slab_cache = 0;
93 
94 
95 /** gp_cache_bufctl_cache
96  *  @internal
97  */
98 static rtl_cache_type * gp_cache_bufctl_cache = 0;
99 
100 
101 /** rtl_cache_init()
102  *  @internal
103  */
104 static int
105 rtl_cache_init (void);
106 
107 
108 /* ================================================================= */
109 
110 /** RTL_CACHE_HASH_INDEX()
111  */
112 #define	RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
113  	((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
114 
115 #define	RTL_CACHE_HASH_INDEX(cache, addr) \
116     RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
117 
118 
119 /** rtl_cache_hash_rescale()
120  */
121 static void
122 rtl_cache_hash_rescale (
123 	rtl_cache_type * cache,
124 	sal_Size         new_size
125 )
126 {
127 	rtl_cache_bufctl_type ** new_table;
128 	sal_Size                 new_bytes;
129 
130 	new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
131 	new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
132 
133 	if (new_table != 0)
134 	{
135 		rtl_cache_bufctl_type ** old_table;
136 		sal_Size                 old_size, i;
137 
138 		memset (new_table, 0, new_bytes);
139 
140 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
141 
142 		old_table = cache->m_hash_table;
143 		old_size  = cache->m_hash_size;
144 
145 		OSL_TRACE(
146 			"rtl_cache_hash_rescale(\"%s\"): "
147 			"nbuf: % " PRIu64 " (ave: %" PRIu64 "), frees: %" PRIu64 " "
148 			"[old_size: %lu, new_size: %lu]",
149 			cache->m_name,
150 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
151 			(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free) >> cache->m_hash_shift,
152 			cache->m_slab_stats.m_free,
153 			old_size, new_size);
154 
155 		cache->m_hash_table = new_table;
156 		cache->m_hash_size  = new_size;
157 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
158 
159 		for (i = 0; i < old_size; i++)
160 		{
161 			rtl_cache_bufctl_type * curr = old_table[i];
162 			while (curr != 0)
163 			{
164 				rtl_cache_bufctl_type  * next = curr->m_next;
165 				rtl_cache_bufctl_type ** head;
166 
167 				head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
168 				curr->m_next = (*head);
169 				(*head) = curr;
170 
171 				curr = next;
172 			}
173 			old_table[i] = 0;
174 		}
175 
176 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
177 
178 		if (old_table != cache->m_hash_table_0)
179 		{
180 			sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
181 			rtl_arena_free (gp_cache_arena, old_table, old_bytes);
182 		}
183 	}
184 }
185 
186 /** rtl_cache_hash_insert()
187  */
188 static RTL_MEMORY_INLINE sal_uIntPtr
189 rtl_cache_hash_insert (
190 	rtl_cache_type *        cache,
191 	rtl_cache_bufctl_type * bufctl
192 )
193 {
194 	rtl_cache_bufctl_type ** ppHead;
195 
196 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
197 
198 	bufctl->m_next = (*ppHead);
199 	(*ppHead) = bufctl;
200 
201 	return (bufctl->m_addr);
202 }
203 
204 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
205 #pragma inline(rtl_cache_hash_insert)
206 #endif /* __SUNPRO_C */
207 
208 
209 /** rtl_cache_hash_remove()
210  */
211 static rtl_cache_bufctl_type *
212 rtl_cache_hash_remove (
213 	rtl_cache_type * cache,
214 	sal_uIntPtr      addr
215 )
216 {
217 	rtl_cache_bufctl_type ** ppHead;
218 	rtl_cache_bufctl_type  * bufctl;
219 	sal_Size                 lookups = 0;
220 
221 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
222 	while ((bufctl = *ppHead) != 0)
223 	{
224 		if (bufctl->m_addr == addr)
225 		{
226 			*ppHead = bufctl->m_next, bufctl->m_next = 0;
227 			break;
228 		}
229 
230 		lookups += 1;
231 		ppHead = &(bufctl->m_next);
232 	}
233 
234 	OSL_ASSERT (bufctl != 0); /* bad free */
235 
236 	if (lookups > 1)
237 	{
238 		sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
239 		if (nbuf > 4 * cache->m_hash_size)
240 		{
241 			if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
242 			{
243 				sal_Size ave = nbuf >> cache->m_hash_shift;
244 				sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
245 
246 				cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
247 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
248 				rtl_cache_hash_rescale (cache, new_size);
249 				RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
250 				cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
251 			}
252 		}
253 	}
254 
255 	return (bufctl);
256 }
257 
258 /* ================================================================= */
259 
260 /** RTL_CACHE_SLAB()
261  */
262 #define RTL_CACHE_SLAB(addr, size) \
263     (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
264 
265 
266 /** rtl_cache_slab_constructor()
267  */
268 static int
269 rtl_cache_slab_constructor (void * obj, void * arg)
270 {
271 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
272 
273     (void) arg; /* unused */
274 
275 	QUEUE_START_NAMED(slab, slab_);
276 	slab->m_ntypes = 0;
277 
278 	return (1);
279 }
280 
281 
282 /** rtl_cache_slab_destructor()
283  */
284 static void
285 rtl_cache_slab_destructor (void * obj, void * arg)
286 {
287 #if OSL_DEBUG_LEVEL == 0
288     (void) obj; /* unused */
289 #else /* OSL_DEBUG_LEVEL */
290 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
291 
292 	/* assure removed from queue(s) */
293 	OSL_ASSERT(QUEUE_STARTED_NAMED(slab, slab_));
294 
295 	/* assure no longer referenced */
296 	OSL_ASSERT(slab->m_ntypes == 0);
297 #endif /* OSL_DEBUG_LEVEL */
298 
299     (void) arg; /* unused */
300 }
301 
302 
303 /** rtl_cache_slab_create()
304  *
305  *  @precond cache->m_slab_lock released.
306  */
307 static rtl_cache_slab_type *
308 rtl_cache_slab_create (
309 	rtl_cache_type * cache
310 )
311 {
312 	rtl_cache_slab_type * slab = 0;
313 	void *                addr;
314 	sal_Size              size;
315 
316 	size = cache->m_slab_size;
317 	addr = rtl_arena_alloc (cache->m_source, &size);
318 	if (addr != 0)
319 	{
320 		OSL_ASSERT(size >= cache->m_slab_size);
321 
322 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
323 		{
324 			/* allocate slab struct from slab cache */
325 			OSL_ASSERT (cache != gp_cache_slab_cache);
326 			slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
327 		}
328 		else
329 		{
330 			/* construct embedded slab struct */
331 			slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
332 			(void) rtl_cache_slab_constructor (slab, 0);
333 		}
334 		if (slab != 0)
335 		{
336 			slab->m_data = (sal_uIntPtr)(addr);
337 
338 			/* dynamic freelist initialization */
339 			slab->m_bp = slab->m_data;
340 			slab->m_sp = 0;
341 		}
342 		else
343 		{
344 			rtl_arena_free (cache->m_source, addr, size);
345 		}
346 	}
347 	return (slab);
348 }
349 
350 
351 /** rtl_cache_slab_destroy()
352  *
353  *  @precond cache->m_slab_lock released.
354  */
355 static void
356 rtl_cache_slab_destroy (
357 	rtl_cache_type *      cache,
358 	rtl_cache_slab_type * slab
359 )
360 {
361 	void *   addr   = (void*)(slab->m_data);
362 	sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
363 
364 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
365 	{
366 		/* cleanup bufctl(s) for free buffer(s) */
367 		sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
368 		for (ntypes -= refcnt; slab->m_sp != 0; ntypes--)
369 		{
370 			rtl_cache_bufctl_type * bufctl = slab->m_sp;
371 
372 			/* pop from freelist */
373 			slab->m_sp = bufctl->m_next, bufctl->m_next = 0;
374 
375 			/* return bufctl struct to bufctl cache */
376 			rtl_cache_free (gp_cache_bufctl_cache, bufctl);
377 		}
378 		OSL_ASSERT(ntypes == 0);
379 
380 		/* return slab struct to slab cache */
381 		rtl_cache_free (gp_cache_slab_cache, slab);
382 	}
383 	else
384 	{
385 		/* destruct embedded slab struct */
386 		rtl_cache_slab_destructor (slab, 0);
387 	}
388 
389 	if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
390 	{
391 		/* free memory */
392 		rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
393 	}
394 }
395 
396 
397 /** rtl_cache_slab_populate()
398  *
399  *  @precond cache->m_slab_lock acquired.
400  */
401 static int
402 rtl_cache_slab_populate (
403 	rtl_cache_type * cache
404 )
405 {
406 	rtl_cache_slab_type * slab;
407 
408 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
409 	slab = rtl_cache_slab_create (cache);
410 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
411 	if (slab != 0)
412 	{
413 		/* update buffer start addr w/ current color */
414 		slab->m_bp += cache->m_ncolor;
415 
416 		/* update color for next slab */
417 		cache->m_ncolor += cache->m_type_align;
418 		if (cache->m_ncolor > cache->m_ncolor_max)
419 			cache->m_ncolor = 0;
420 
421 		/* update stats */
422 		cache->m_slab_stats.m_mem_total += cache->m_slab_size;
423 
424 		/* insert onto 'free' queue */
425 		QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
426 	}
427 	return (slab != 0);
428 }
429 
430 /* ================================================================= */
431 
432 /** rtl_cache_slab_alloc()
433  *
434  *  Allocate a buffer from slab layer; used by magazine layer.
435  */
436 static void *
437 rtl_cache_slab_alloc (
438 	rtl_cache_type * cache
439 )
440 {
441 	void                * addr = 0;
442 	rtl_cache_slab_type * head;
443 
444 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
445 
446 	head = &(cache->m_free_head);
447 	if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
448 	{
449 		rtl_cache_slab_type   * slab;
450 		rtl_cache_bufctl_type * bufctl;
451 
452 		slab = head->m_slab_next;
453 		OSL_ASSERT(slab->m_ntypes < cache->m_ntypes);
454 
455 		if (slab->m_sp == 0)
456 		{
457 			/* initialize bufctl w/ current 'slab->m_bp' */
458 			OSL_ASSERT (slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
459 			if (cache->m_features & RTL_CACHE_FEATURE_HASH)
460 			{
461 				/* allocate bufctl */
462 				OSL_ASSERT (cache != gp_cache_bufctl_cache);
463 				bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
464 				if (bufctl == 0)
465 				{
466 					/* out of memory */
467 					RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
468 					return (0);
469 				}
470 
471 				bufctl->m_addr = slab->m_bp;
472 				bufctl->m_slab = (sal_uIntPtr)(slab);
473 			}
474 			else
475 			{
476 				/* embedded bufctl */
477 				bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
478 			}
479 			bufctl->m_next = 0;
480 
481 			/* update 'slab->m_bp' to next free buffer */
482 			slab->m_bp += cache->m_type_size;
483 
484 			/* assign bufctl to freelist */
485 			slab->m_sp = bufctl;
486 		}
487 
488 		/* pop front */
489 		bufctl = slab->m_sp;
490 		slab->m_sp = bufctl->m_next;
491 
492 		/* increment usage, check for full slab */
493 		if ((slab->m_ntypes += 1) == cache->m_ntypes)
494 		{
495 			/* remove from 'free' queue */
496 			QUEUE_REMOVE_NAMED(slab, slab_);
497 
498 			/* insert onto 'used' queue (tail) */
499 			QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
500 		}
501 
502 		/* update stats */
503 		cache->m_slab_stats.m_alloc     += 1;
504 		cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
505 
506 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
507 			addr = (void*)rtl_cache_hash_insert (cache, bufctl);
508 		else
509 			addr = bufctl;
510 
511 		/* DEBUG ONLY: mark allocated, undefined */
512 		OSL_DEBUG_ONLY(memset(addr, 0x77777777, cache->m_type_size));
513 		VALGRIND_MEMPOOL_ALLOC(cache, addr, cache->m_type_size);
514 	}
515 
516 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
517 	return (addr);
518 }
519 
520 
521 /** rtl_cache_slab_free()
522  *
523  *  Return a buffer to slab layer; used by magazine layer.
524  */
525 static void
526 rtl_cache_slab_free (
527 	rtl_cache_type * cache,
528 	void *           addr
529 )
530 {
531 	rtl_cache_bufctl_type * bufctl;
532 	rtl_cache_slab_type   * slab;
533 
534 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
535 
536 	/* DEBUG ONLY: mark unallocated, undefined */
537 	VALGRIND_MEMPOOL_FREE(cache, addr);
538 	/* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(addr, cache->m_type_size);
539     OSL_DEBUG_ONLY(memset(addr, 0x33333333, cache->m_type_size));
540 
541 	/* determine slab from addr */
542 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
543 	{
544 		bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
545 		slab = (bufctl != 0) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
546 	}
547 	else
548 	{
549 		/* embedded slab struct */
550 		bufctl = (rtl_cache_bufctl_type*)(addr);
551 		slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
552 	}
553 
554 	if (slab != 0)
555 	{
556 		/* check for full slab */
557 		if (slab->m_ntypes == cache->m_ntypes)
558 		{
559 			/* remove from 'used' queue */
560 			QUEUE_REMOVE_NAMED(slab, slab_);
561 
562 			/* insert onto 'free' queue (head) */
563 			QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
564 		}
565 
566 		/* push front */
567 		bufctl->m_next = slab->m_sp;
568 		slab->m_sp = bufctl;
569 
570 		/* update stats */
571 		cache->m_slab_stats.m_free      += 1;
572 		cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
573 
574 		/* decrement usage, check for empty slab */
575 		if ((slab->m_ntypes -= 1) == 0)
576 		{
577 			/* remove from 'free' queue */
578 			QUEUE_REMOVE_NAMED(slab, slab_);
579 
580 			/* update stats */
581 			cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
582 
583 			/* free 'empty' slab */
584 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
585 			rtl_cache_slab_destroy (cache, slab);
586 			return;
587 		}
588 	}
589 
590 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
591 }
592 
593 /* ================================================================= */
594 
595 /** rtl_cache_magazine_constructor()
596  */
597 static int
598 rtl_cache_magazine_constructor (void * obj, void * arg)
599 {
600 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
601 	/* @@@ sal_Size size = (sal_Size)(arg); @@@ */
602 
603     (void) arg; /* unused */
604 
605 	mag->m_mag_next = 0;
606 	mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
607 	mag->m_mag_used = 0;
608 
609 	return (1);
610 }
611 
612 
613 /** rtl_cache_magazine_destructor()
614  */
615 static void
616 rtl_cache_magazine_destructor (void * obj, void * arg)
617 {
618 #if OSL_DEBUG_LEVEL == 0
619     (void) obj; /* unused */
620 #else /* OSL_DEBUG_LEVEL */
621 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
622 
623 	/* assure removed from queue(s) */
624 	OSL_ASSERT(mag->m_mag_next == 0);
625 
626 	/* assure no longer referenced */
627 	OSL_ASSERT(mag->m_mag_used == 0);
628 #endif /* OSL_DEBUG_LEVEL */
629 
630     (void) arg; /* unused */
631 }
632 
633 
634 /** rtl_cache_magazine_clear()
635  */
636 static void
637 rtl_cache_magazine_clear (
638 	rtl_cache_type *          cache,
639 	rtl_cache_magazine_type * mag
640 )
641 {
642 	for (; mag->m_mag_used > 0; --mag->m_mag_used)
643 	{
644 		void * obj = mag->m_objects[mag->m_mag_used - 1];
645 		mag->m_objects[mag->m_mag_used - 1] = 0;
646 
647         /* DEBUG ONLY: mark cached object allocated, undefined */
648         VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
649 		if (cache->m_destructor != 0)
650 		{
651             /* DEBUG ONLY: keep constructed object defined */
652             VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
653 
654 			/* destruct object */
655 			(cache->m_destructor)(obj, cache->m_userarg);
656 		}
657 
658 		/* return buffer to slab layer */
659 		rtl_cache_slab_free (cache, obj);
660 	}
661 }
662 
663 /* ================================================================= */
664 
665 /** rtl_cache_depot_enqueue()
666  *
667  *  @precond cache->m_depot_lock acquired.
668  */
669 static RTL_MEMORY_INLINE void
670 rtl_cache_depot_enqueue (
671 	rtl_cache_depot_type *    depot,
672 	rtl_cache_magazine_type * mag
673 )
674 {
675 	/* enqueue empty magazine */
676 	mag->m_mag_next = depot->m_mag_next;
677 	depot->m_mag_next = mag;
678 
679 	/* update depot stats */
680 	depot->m_mag_count++;
681 }
682 
683 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
684 #pragma inline(rtl_cache_depot_enqueue)
685 #endif /* __SUNPRO_C */
686 
687 
688 /** rtl_cache_depot_dequeue()
689  *
690  *  @precond cache->m_depot_lock acquired.
691  */
692 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
693 rtl_cache_depot_dequeue (
694 	rtl_cache_depot_type * depot
695 )
696 {
697 	rtl_cache_magazine_type * mag = 0;
698 	if (depot->m_mag_count > 0)
699 	{
700 		/* dequeue magazine */
701 		OSL_ASSERT(depot->m_mag_next != 0);
702 
703 		mag = depot->m_mag_next;
704 		depot->m_mag_next = mag->m_mag_next;
705 		mag->m_mag_next = 0;
706 
707 		/* update depot stats */
708 		depot->m_mag_count--;
709 		depot->m_curr_min = SAL_MIN(depot->m_curr_min, depot->m_mag_count);
710 	}
711 	return (mag);
712 }
713 
714 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
715 #pragma inline(rtl_cache_depot_dequeue)
716 #endif /* __SUNPRO_C */
717 
718 
719 /** rtl_cache_depot_exchange_alloc()
720  *
721  *  @precond cache->m_depot_lock acquired.
722  */
723 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
724 rtl_cache_depot_exchange_alloc (
725 	rtl_cache_type *          cache,
726 	rtl_cache_magazine_type * empty
727 )
728 {
729 	rtl_cache_magazine_type * full;
730 
731 	OSL_ASSERT((empty == 0) || (empty->m_mag_used == 0));
732 
733 	/* dequeue full magazine */
734 	full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
735 	if ((full != 0) && (empty != 0))
736 	{
737 		/* enqueue empty magazine */
738 		rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
739 	}
740 
741 	OSL_ASSERT((full == 0) || (full->m_mag_used > 0));
742 
743 	return (full);
744 }
745 
746 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
747 #pragma inline(rtl_cache_depot_exchange_alloc)
748 #endif /* __SUNPRO_C */
749 
750 
751 /** rtl_cache_depot_exchange_free()
752  *
753  *  @precond cache->m_depot_lock acquired.
754  */
755 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
756 rtl_cache_depot_exchange_free (
757 	rtl_cache_type *          cache,
758 	rtl_cache_magazine_type * full
759 )
760 {
761 	rtl_cache_magazine_type * empty;
762 
763 	OSL_ASSERT((full == 0) || (full->m_mag_used > 0));
764 
765 	/* dequeue empty magazine */
766 	empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
767 	if ((empty != 0) && (full != 0))
768 	{
769 		/* enqueue full magazine */
770 		rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
771 	}
772 
773 	OSL_ASSERT((empty == 0) || (empty->m_mag_used == 0));
774 
775 	return (empty);
776 }
777 
778 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
779 #pragma inline(rtl_cache_depot_exchange_free)
780 #endif /* __SUNPRO_C */
781 
782 
783 /** rtl_cache_depot_populate()
784  *
785  *  @precond cache->m_depot_lock acquired.
786  */
787 static int
788 rtl_cache_depot_populate (
789 	rtl_cache_type * cache
790 )
791 {
792 	rtl_cache_magazine_type * empty = 0;
793 
794 	if (cache->m_magazine_cache != 0)
795 	{
796 		/* allocate new empty magazine */
797 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
798 		empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
799 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
800 		if (empty != 0)
801 		{
802 			/* enqueue (new) empty magazine */
803 			rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
804 		}
805     }
806 	return (empty != 0);
807 }
808 
809 /* ================================================================= */
810 
811 /** rtl_cache_constructor()
812  */
813 static int
814 rtl_cache_constructor (void * obj)
815 {
816 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
817 
818 	memset (cache, 0, sizeof(rtl_cache_type));
819 
820 	/* linkage */
821 	QUEUE_START_NAMED(cache, cache_);
822 
823 	/* slab layer */
824 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
825 
826 	QUEUE_START_NAMED(&(cache->m_free_head), slab_);
827 	QUEUE_START_NAMED(&(cache->m_used_head), slab_);
828 
829 	cache->m_hash_table = cache->m_hash_table_0;
830 	cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
831 	cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
832 
833 	/* depot layer */
834 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
835 
836 	return (1);
837 }
838 
839 /** rtl_cache_destructor()
840  */
841 static void
842 rtl_cache_destructor (void * obj)
843 {
844 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
845 
846 	/* linkage */
847 	OSL_ASSERT(QUEUE_STARTED_NAMED(cache, cache_));
848 
849 	/* slab layer */
850 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
851 
852 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
853 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
854 
855 	OSL_ASSERT(cache->m_hash_table == cache->m_hash_table_0);
856 	OSL_ASSERT(cache->m_hash_size  == RTL_CACHE_HASH_SIZE);
857 	OSL_ASSERT(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
858 
859 	/* depot layer */
860 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
861 }
862 
863 /* ================================================================= */
864 
865 /** rtl_cache_activate()
866  */
867 static rtl_cache_type *
868 rtl_cache_activate (
869     rtl_cache_type * cache,
870     const char *     name,
871     size_t           objsize,
872     size_t           objalign,
873     int  (SAL_CALL * constructor)(void * obj, void * userarg),
874     void (SAL_CALL * destructor) (void * obj, void * userarg),
875 	void (SAL_CALL * reclaim)    (void * userarg),
876     void *           userarg,
877     rtl_arena_type * source,
878     int              flags
879 )
880 {
881 	OSL_ASSERT(cache != 0);
882 	if (cache != 0)
883 	{
884 		sal_Size slabsize;
885 
886 		snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
887 
888 		/* ensure minimum size (embedded bufctl linkage) */
889 		objsize = SAL_MAX(objsize, sizeof(rtl_cache_bufctl_type*));
890 
891 		if (objalign == 0)
892 		{
893 			/* determine default alignment */
894 			if (objsize >= RTL_MEMORY_ALIGNMENT_8)
895 				objalign = RTL_MEMORY_ALIGNMENT_8;
896 			else
897 				objalign = RTL_MEMORY_ALIGNMENT_4;
898 		}
899 		else
900 		{
901 			/* ensure minimum alignment */
902 			objalign = SAL_MAX(objalign, RTL_MEMORY_ALIGNMENT_4);
903 		}
904 		OSL_ASSERT(RTL_MEMORY_ISP2(objalign));
905 
906 		cache->m_type_size  = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
907 		cache->m_type_align = objalign;
908 		cache->m_type_shift = highbit(cache->m_type_size) - 1;
909 
910 		cache->m_constructor = constructor;
911 		cache->m_destructor  = destructor;
912 		cache->m_reclaim     = reclaim;
913 		cache->m_userarg     = userarg;
914 
915 		/* slab layer */
916 		cache->m_source = source;
917 
918 		slabsize = source->m_quantum; /* minimum slab size */
919 		if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
920 		{
921 			/* next power of 2 above 3 * qcache_max */
922 			slabsize = SAL_MAX(slabsize, (1UL << highbit(3 * source->m_qcache_max)));
923 		}
924 		else
925 		{
926 		    /* waste at most 1/8 of slab */
927 		    slabsize = SAL_MAX(slabsize, cache->m_type_size * 8);
928 		}
929 
930 		slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
931 		if (!RTL_MEMORY_ISP2(slabsize))
932 			slabsize = 1UL << highbit(slabsize);
933 		cache->m_slab_size = slabsize;
934 
935 		if (cache->m_slab_size > source->m_quantum)
936 		{
937 			OSL_ASSERT(gp_cache_slab_cache != 0);
938 			OSL_ASSERT(gp_cache_bufctl_cache != 0);
939 
940 			cache->m_features  |= RTL_CACHE_FEATURE_HASH;
941 			cache->m_ntypes     = cache->m_slab_size / cache->m_type_size;
942 			cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
943 		}
944 		else
945 		{
946 			/* embedded slab struct */
947 			cache->m_ntypes     = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
948 			cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
949 		}
950 
951 		OSL_ASSERT(cache->m_ntypes > 0);
952 		cache->m_ncolor = 0;
953 
954 		if (flags & RTL_CACHE_FLAG_BULKDESTROY)
955 		{
956 			/* allow bulk slab delete upon cache deactivation */
957 			cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
958 		}
959 
960 		/* magazine layer */
961 		if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
962 		{
963 			OSL_ASSERT(gp_cache_magazine_cache != 0);
964 			cache->m_magazine_cache = gp_cache_magazine_cache;
965 		}
966 
967 		/* insert into cache list */
968 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
969 		QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
970 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
971 	}
972 	return (cache);
973 }
974 
975 /** rtl_cache_deactivate()
976  */
977 static void
978 rtl_cache_deactivate (
979     rtl_cache_type * cache
980 )
981 {
982 	int active = 1;
983 
984 	/* remove from cache list */
985 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
986 	active = QUEUE_STARTED_NAMED(cache, cache_) == 0;
987 	QUEUE_REMOVE_NAMED(cache, cache_);
988 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
989 
990 	OSL_PRECOND(active, "rtl_cache_deactivate(): orphaned cache.");
991 
992 	/* cleanup magazine layer */
993 	if (cache->m_magazine_cache != 0)
994 	{
995 		rtl_cache_type *          mag_cache;
996 		rtl_cache_magazine_type * mag;
997 
998 		/* prevent recursion */
999 		mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
1000 
1001 		/* cleanup cpu layer */
1002 		if ((mag = cache->m_cpu_curr) != 0)
1003 		{
1004 			cache->m_cpu_curr = 0;
1005 			rtl_cache_magazine_clear (cache, mag);
1006 			rtl_cache_free (mag_cache, mag);
1007 		}
1008 		if ((mag = cache->m_cpu_prev) != 0)
1009 		{
1010 			cache->m_cpu_prev = 0;
1011 			rtl_cache_magazine_clear (cache, mag);
1012 			rtl_cache_free (mag_cache, mag);
1013 		}
1014 
1015 		/* cleanup depot layer */
1016 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != 0)
1017 		{
1018 			rtl_cache_magazine_clear (cache, mag);
1019 			rtl_cache_free (mag_cache, mag);
1020 		}
1021 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != 0)
1022 		{
1023 			rtl_cache_magazine_clear (cache, mag);
1024 			rtl_cache_free (mag_cache, mag);
1025 		}
1026 	}
1027 
1028 	OSL_TRACE(
1029 		"rtl_cache_deactivate(\"%s\"): "
1030 		"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1031 		"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1032 		"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1033 		cache->m_name,
1034 		cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1035 		cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1036 		cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1037 		cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1038 		cache->m_slab_stats.m_free  + cache->m_cpu_stats.m_free
1039 	);
1040 
1041 	/* cleanup slab layer */
1042 	if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
1043 	{
1044 		OSL_TRACE(
1045 			"rtl_cache_deactivate(\"%s\"): "
1046 			"cleaning up %"PRIu64" leaked buffer(s) [%lu bytes] [%lu total]",
1047 			cache->m_name,
1048 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
1049 			cache->m_slab_stats.m_mem_alloc, cache->m_slab_stats.m_mem_total
1050 		);
1051 
1052 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
1053 		{
1054 			/* cleanup bufctl(s) for leaking buffer(s) */
1055 			sal_Size i, n = cache->m_hash_size;
1056 			for (i = 0; i < n; i++)
1057 			{
1058 				rtl_cache_bufctl_type * bufctl;
1059 				while ((bufctl = cache->m_hash_table[i]) != 0)
1060 				{
1061 					/* pop from hash table */
1062 					cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = 0;
1063 
1064 					/* return to bufctl cache */
1065 					rtl_cache_free (gp_cache_bufctl_cache, bufctl);
1066 				}
1067 			}
1068 		}
1069 		{
1070 			/* force cleanup of remaining slabs */
1071 			rtl_cache_slab_type *head, *slab;
1072 
1073 			head = &(cache->m_used_head);
1074 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1075 			{
1076 				/* remove from 'used' queue */
1077 				QUEUE_REMOVE_NAMED(slab, slab_);
1078 
1079 				/* update stats */
1080 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1081 
1082 				/* free slab */
1083 				rtl_cache_slab_destroy (cache, slab);
1084 			}
1085 
1086 			head = &(cache->m_free_head);
1087 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1088 			{
1089 				/* remove from 'free' queue */
1090 				QUEUE_REMOVE_NAMED(slab, slab_);
1091 
1092 				/* update stats */
1093 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1094 
1095 				/* free slab */
1096 				rtl_cache_slab_destroy (cache, slab);
1097 			}
1098 		}
1099 	}
1100 
1101 	if (cache->m_hash_table != cache->m_hash_table_0)
1102 	{
1103 		rtl_arena_free (
1104 			gp_cache_arena,
1105 			cache->m_hash_table,
1106 			cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
1107 
1108 		cache->m_hash_table = cache->m_hash_table_0;
1109 		cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
1110 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
1111 	}
1112 }
1113 
1114 /* ================================================================= *
1115  *
1116  * cache implementation.
1117  *
1118  * ================================================================= */
1119 
1120 /** rtl_cache_create()
1121  */
1122 rtl_cache_type *
1123 SAL_CALL rtl_cache_create (
1124     const char *     name,
1125     sal_Size         objsize,
1126     sal_Size         objalign,
1127     int  (SAL_CALL * constructor)(void * obj, void * userarg),
1128     void (SAL_CALL * destructor) (void * obj, void * userarg),
1129 	void (SAL_CALL * reclaim)    (void * userarg),
1130     void *           userarg,
1131     rtl_arena_type * source,
1132     int              flags
1133 ) SAL_THROW_EXTERN_C()
1134 {
1135 	rtl_cache_type * result = 0;
1136 	sal_Size         size   = sizeof(rtl_cache_type);
1137 
1138 try_alloc:
1139 	result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
1140 	if (result != 0)
1141 	{
1142 		rtl_cache_type * cache = result;
1143 		VALGRIND_CREATE_MEMPOOL(cache, 0, 0);
1144 		(void) rtl_cache_constructor (cache);
1145 
1146 		if (!source)
1147 		{
1148 			/* use default arena */
1149 			OSL_ASSERT(gp_default_arena != 0);
1150 			source = gp_default_arena;
1151 		}
1152 
1153 		result = rtl_cache_activate (
1154 			cache,
1155 			name,
1156 			objsize,
1157 			objalign,
1158 			constructor,
1159 			destructor,
1160 			reclaim,
1161 			userarg,
1162 			source,
1163 			flags
1164 		);
1165 
1166 		if (result == 0)
1167 		{
1168 			/* activation failed */
1169 			rtl_cache_deactivate (cache);
1170 			rtl_cache_destructor (cache);
1171 			VALGRIND_DESTROY_MEMPOOL(cache);
1172 			rtl_arena_free (gp_cache_arena, cache, size);
1173 		}
1174 	}
1175 	else if (gp_cache_arena == 0)
1176 	{
1177 		if (rtl_cache_init())
1178 		{
1179 			/* try again */
1180 			goto try_alloc;
1181 		}
1182 	}
1183 	return (result);
1184 }
1185 
1186 /** rtl_cache_destroy()
1187  */
1188 void SAL_CALL rtl_cache_destroy (
1189     rtl_cache_type * cache
1190 ) SAL_THROW_EXTERN_C()
1191 {
1192 	if (cache != 0)
1193 	{
1194 		rtl_cache_deactivate (cache);
1195 		rtl_cache_destructor (cache);
1196 		VALGRIND_DESTROY_MEMPOOL(cache);
1197 		rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
1198 	}
1199 }
1200 
1201 /** rtl_cache_alloc()
1202  */
1203 void *
1204 SAL_CALL rtl_cache_alloc (
1205     rtl_cache_type * cache
1206 ) SAL_THROW_EXTERN_C()
1207 {
1208 	void * obj = 0;
1209 
1210 	if (cache == 0)
1211 		return (0);
1212 
1213 	if (cache->m_cpu_curr != 0)
1214 	{
1215 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1216 
1217 		for (;;)
1218 		{
1219 			/* take object from magazine layer */
1220 			rtl_cache_magazine_type *curr, *prev, *temp;
1221 
1222 			curr = cache->m_cpu_curr;
1223 			if ((curr != 0) && (curr->m_mag_used > 0))
1224 			{
1225 				obj = curr->m_objects[--curr->m_mag_used];
1226 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1227 				VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
1228                 if (cache->m_constructor != 0)
1229                 {
1230                     /* keep constructed object defined */
1231                     VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
1232                 }
1233 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1234 				cache->m_cpu_stats.m_alloc += 1;
1235 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1236 
1237 				return (obj);
1238 			}
1239 
1240 			prev = cache->m_cpu_prev;
1241 			if ((prev != 0) && (prev->m_mag_used > 0))
1242 			{
1243 				temp = cache->m_cpu_curr;
1244 				cache->m_cpu_curr = cache->m_cpu_prev;
1245 				cache->m_cpu_prev = temp;
1246 
1247 				continue;
1248 			}
1249 
1250 			temp = rtl_cache_depot_exchange_alloc (cache, prev);
1251 			if (temp != 0)
1252 			{
1253 				cache->m_cpu_prev = cache->m_cpu_curr;
1254 				cache->m_cpu_curr = temp;
1255 
1256 				continue;
1257 			}
1258 
1259 			/* no full magazine: fall through to slab layer */
1260 			break;
1261 		}
1262 
1263 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1264 	}
1265 
1266 	/* alloc buffer from slab layer */
1267 	obj = rtl_cache_slab_alloc (cache);
1268 	if ((obj != 0) && (cache->m_constructor != 0))
1269 	{
1270 	    /* construct object */
1271 	    if (!((cache->m_constructor)(obj, cache->m_userarg)))
1272 	    {
1273 	        /* construction failure */
1274 	        rtl_cache_slab_free (cache, obj), obj = 0;
1275 	    }
1276 	}
1277 	return (obj);
1278 }
1279 
1280 /** rtl_cache_free()
1281  */
1282 void
1283 SAL_CALL rtl_cache_free (
1284     rtl_cache_type * cache,
1285     void *           obj
1286 ) SAL_THROW_EXTERN_C()
1287 {
1288 	if ((obj != 0) && (cache != 0))
1289 	{
1290 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1291 
1292 		for (;;)
1293 		{
1294 			/* return object to magazine layer */
1295 			rtl_cache_magazine_type *curr, *prev, *temp;
1296 
1297 			curr = cache->m_cpu_curr;
1298 			if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
1299 			{
1300 				curr->m_objects[curr->m_mag_used++] = obj;
1301 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1302 				VALGRIND_MEMPOOL_FREE(cache, obj);
1303 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1304 				cache->m_cpu_stats.m_free += 1;
1305 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1306 
1307 				return;
1308 			}
1309 
1310 			prev = cache->m_cpu_prev;
1311 			if ((prev != 0) && (prev->m_mag_used == 0))
1312 			{
1313 				temp = cache->m_cpu_curr;
1314 				cache->m_cpu_curr = cache->m_cpu_prev;
1315 				cache->m_cpu_prev = temp;
1316 
1317 				continue;
1318 			}
1319 
1320 			temp = rtl_cache_depot_exchange_free (cache, prev);
1321 			if (temp != 0)
1322 			{
1323 				cache->m_cpu_prev = cache->m_cpu_curr;
1324 				cache->m_cpu_curr = temp;
1325 
1326 				continue;
1327 			}
1328 
1329 			if (rtl_cache_depot_populate(cache) != 0)
1330 			{
1331 				continue;
1332 			}
1333 
1334 			/* no empty magazine: fall through to slab layer */
1335 			break;
1336 		}
1337 
1338 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1339 
1340 		/* no space for constructed object in magazine layer */
1341 		if (cache->m_destructor != 0)
1342 		{
1343 			/* destruct object */
1344 			(cache->m_destructor)(obj, cache->m_userarg);
1345 		}
1346 
1347 		/* return buffer to slab layer */
1348 		rtl_cache_slab_free (cache, obj);
1349 	}
1350 }
1351 
1352 /* ================================================================= *
1353  *
1354  * cache wsupdate (machdep) internals.
1355  *
1356  * ================================================================= */
1357 
1358 /** rtl_cache_wsupdate_init()
1359  *
1360  *  @precond g_cache_list.m_lock initialized
1361  */
1362 static void
1363 rtl_cache_wsupdate_init (void);
1364 
1365 
1366 /** rtl_cache_wsupdate_wait()
1367  *
1368  *  @precond g_cache_list.m_lock acquired
1369  */
1370 static void
1371 rtl_cache_wsupdate_wait (
1372 	unsigned int seconds
1373 );
1374 
1375 /** rtl_cache_wsupdate_fini()
1376  *
1377  */
1378 static void
1379 rtl_cache_wsupdate_fini (void);
1380 
1381 /* ================================================================= */
1382 
1383 #if defined(SAL_UNX) || defined(SAL_OS2)
1384 
1385 #include <sys/time.h>
1386 
1387 static void *
1388 rtl_cache_wsupdate_all (void * arg);
1389 
1390 static void
1391 rtl_cache_wsupdate_init (void)
1392 {
1393 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1394 	g_cache_list.m_update_done = 0;
1395 	(void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
1396 	if (pthread_create (
1397 			&(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
1398 	{
1399 		/* failure */
1400 		g_cache_list.m_update_thread = (pthread_t)(0);
1401 	}
1402 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1403 }
1404 
1405 static void
1406 rtl_cache_wsupdate_wait (unsigned int seconds)
1407 {
1408 	if (seconds > 0)
1409 	{
1410 		struct timeval  now;
1411 		struct timespec wakeup;
1412 
1413 		gettimeofday(&now, 0);
1414 		wakeup.tv_sec  = now.tv_sec + (seconds);
1415 		wakeup.tv_nsec = now.tv_usec * 1000;
1416 
1417 		(void) pthread_cond_timedwait (
1418 			&(g_cache_list.m_update_cond),
1419 			&(g_cache_list.m_lock),
1420 			&wakeup);
1421 	}
1422 }
1423 
1424 static void
1425 rtl_cache_wsupdate_fini (void)
1426 {
1427 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1428 	g_cache_list.m_update_done = 1;
1429 	pthread_cond_signal (&(g_cache_list.m_update_cond));
1430 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1431 
1432 	if (g_cache_list.m_update_thread != (pthread_t)(0))
1433 		pthread_join (g_cache_list.m_update_thread, NULL);
1434 }
1435 
1436 /* ================================================================= */
1437 
1438 #elif defined(SAL_W32)
1439 
1440 static DWORD WINAPI
1441 rtl_cache_wsupdate_all (void * arg);
1442 
1443 static void
1444 rtl_cache_wsupdate_init (void)
1445 {
1446 	DWORD dwThreadId;
1447 
1448 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1449 	g_cache_list.m_update_done = 0;
1450 	g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1451 
1452 	g_cache_list.m_update_thread =
1453 		CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
1454 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1455 }
1456 
1457 static void
1458 rtl_cache_wsupdate_wait (unsigned int seconds)
1459 {
1460 	if (seconds > 0)
1461 	{
1462 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1463 		WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
1464 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1465 	}
1466 }
1467 
1468 static void
1469 rtl_cache_wsupdate_fini (void)
1470 {
1471 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1472 	g_cache_list.m_update_done = 1;
1473 	SetEvent (g_cache_list.m_update_cond);
1474 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1475 
1476 	WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
1477 }
1478 
1479 #endif /* SAL_UNX || SAL_W32 */
1480 
1481 /* ================================================================= */
1482 
1483 /** rtl_cache_depot_wsupdate()
1484  *  update depot stats and purge excess magazines.
1485  *
1486  *  @precond cache->m_depot_lock acquired
1487  */
1488 static void
1489 rtl_cache_depot_wsupdate (
1490 	rtl_cache_type *       cache,
1491 	rtl_cache_depot_type * depot
1492 )
1493 {
1494 	sal_Size npurge;
1495 
1496 	depot->m_prev_min = depot->m_curr_min;
1497 	depot->m_curr_min = depot->m_mag_count;
1498 
1499 	npurge = SAL_MIN(depot->m_curr_min, depot->m_prev_min);
1500 	for (; npurge > 0; npurge--)
1501 	{
1502 		rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
1503 		if (mag != 0)
1504 		{
1505 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1506 			rtl_cache_magazine_clear (cache, mag);
1507 			rtl_cache_free (cache->m_magazine_cache, mag);
1508 			RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1509 		}
1510 	}
1511 }
1512 
1513 /** rtl_cache_wsupdate()
1514  *
1515  *  @precond cache->m_depot_lock released
1516  */
1517 static void
1518 rtl_cache_wsupdate (
1519 	rtl_cache_type * cache
1520 )
1521 {
1522 	if (cache->m_magazine_cache != 0)
1523 	{
1524 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1525 
1526 		OSL_TRACE(
1527 			"rtl_cache_wsupdate(\"%s\") "
1528 			"[depot: count, curr_min, prev_min] "
1529 			"full: %lu, %lu, %lu; empty: %lu, %lu, %lu",
1530 			cache->m_name,
1531 			cache->m_depot_full.m_mag_count,
1532 			cache->m_depot_full.m_curr_min,
1533 			cache->m_depot_full.m_prev_min,
1534 			cache->m_depot_empty.m_mag_count,
1535 			cache->m_depot_empty.m_curr_min,
1536 			cache->m_depot_empty.m_prev_min
1537 		);
1538 
1539 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
1540 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
1541 
1542 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1543 	}
1544 }
1545 
1546 /** rtl_cache_wsupdate_all()
1547  *
1548  */
1549 #if defined(SAL_UNX) || defined(SAL_OS2)
1550 static void *
1551 #elif defined(SAL_W32)
1552 static DWORD WINAPI
1553 #endif /* SAL_UNX || SAL_W32 */
1554 rtl_cache_wsupdate_all (void * arg)
1555 {
1556 	unsigned int seconds = (unsigned int)SAL_INT_CAST(sal_uIntPtr, arg);
1557 
1558 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1559 	while (!g_cache_list.m_update_done)
1560 	{
1561 		rtl_cache_wsupdate_wait (seconds);
1562 		if (!g_cache_list.m_update_done)
1563 		{
1564 			rtl_cache_type * head, * cache;
1565 
1566 			head = &(g_cache_list.m_cache_head);
1567 			for (cache  = head->m_cache_next;
1568 				 cache != head;
1569 				 cache  = cache->m_cache_next)
1570 			{
1571 				rtl_cache_wsupdate (cache);
1572 			}
1573 		}
1574 	}
1575 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1576 
1577 	return (0);
1578 }
1579 
1580 /* ================================================================= *
1581  *
1582  * cache initialization.
1583  *
1584  * ================================================================= */
1585 
1586 static void
1587 rtl_cache_once_init (void)
1588 {
1589 	{
1590 		/* list of caches */
1591 		RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
1592 		(void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
1593 	}
1594 	{
1595 		/* cache: internal arena */
1596 		OSL_ASSERT(gp_cache_arena == 0);
1597 
1598 		gp_cache_arena = rtl_arena_create (
1599 			"rtl_cache_internal_arena",
1600 			64,   /* quantum */
1601 			0,    /* no quantum caching */
1602 			NULL, /* default source */
1603 			rtl_arena_alloc,
1604 			rtl_arena_free,
1605 			0     /* flags */
1606 		);
1607 		OSL_ASSERT(gp_cache_arena != 0);
1608 
1609 		/* check 'gp_default_arena' initialization */
1610 		OSL_ASSERT(gp_default_arena != 0);
1611 	}
1612 	{
1613 		/* cache: magazine cache */
1614 		static rtl_cache_type g_cache_magazine_cache;
1615 
1616 		OSL_ASSERT(gp_cache_magazine_cache == 0);
1617 		VALGRIND_CREATE_MEMPOOL(&g_cache_magazine_cache, 0, 0);
1618 		(void) rtl_cache_constructor (&g_cache_magazine_cache);
1619 
1620 		gp_cache_magazine_cache = rtl_cache_activate (
1621 			&g_cache_magazine_cache,
1622 			"rtl_cache_magazine_cache",
1623 			sizeof(rtl_cache_magazine_type), /* objsize  */
1624 			0,                               /* objalign */
1625 			rtl_cache_magazine_constructor,
1626 			rtl_cache_magazine_destructor,
1627 			0, /* reclaim */
1628 			0, /* userarg: NYI */
1629 			gp_default_arena, /* source */
1630 			RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
1631 		);
1632 		OSL_ASSERT(gp_cache_magazine_cache != 0);
1633 
1634 		/* activate magazine layer */
1635 		g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
1636 	}
1637 	{
1638 		/* cache: slab (struct) cache */
1639 		static rtl_cache_type g_cache_slab_cache;
1640 
1641 		OSL_ASSERT(gp_cache_slab_cache == 0);
1642 		VALGRIND_CREATE_MEMPOOL(&g_cache_slab_cache, 0, 0);
1643 		(void) rtl_cache_constructor (&g_cache_slab_cache);
1644 
1645 		gp_cache_slab_cache = rtl_cache_activate (
1646 			&g_cache_slab_cache,
1647 			"rtl_cache_slab_cache",
1648 			sizeof(rtl_cache_slab_type), /* objsize  */
1649 			0,                           /* objalign */
1650 			rtl_cache_slab_constructor,
1651 			rtl_cache_slab_destructor,
1652 			0,                           /* reclaim */
1653 			0,                           /* userarg: none */
1654 			gp_default_arena,            /* source */
1655 			0                            /* flags: none */
1656 		);
1657 		OSL_ASSERT(gp_cache_slab_cache != 0);
1658 	}
1659 	{
1660 		/* cache: bufctl cache */
1661 		static rtl_cache_type g_cache_bufctl_cache;
1662 
1663 		OSL_ASSERT(gp_cache_bufctl_cache == 0);
1664 		VALGRIND_CREATE_MEMPOOL(&g_cache_bufctl_cache, 0, 0);
1665 		(void) rtl_cache_constructor (&g_cache_bufctl_cache);
1666 
1667 		gp_cache_bufctl_cache = rtl_cache_activate (
1668 			&g_cache_bufctl_cache,
1669 			"rtl_cache_bufctl_cache",
1670 			sizeof(rtl_cache_bufctl_type), /* objsize */
1671 			0,                             /* objalign  */
1672 			0,                /* constructor */
1673 			0,                /* destructor */
1674 			0,                /* reclaim */
1675 			0,                /* userarg */
1676 			gp_default_arena, /* source */
1677 			0                 /* flags: none */
1678 		);
1679 		OSL_ASSERT(gp_cache_bufctl_cache != 0);
1680 	}
1681 
1682 	rtl_cache_wsupdate_init();
1683 }
1684 
1685 static int
1686 rtl_cache_init (void)
1687 {
1688 	static sal_once_type g_once = SAL_ONCE_INIT;
1689 	SAL_ONCE(&g_once, rtl_cache_once_init);
1690 	return (gp_cache_arena != 0);
1691 }
1692 
1693 /* ================================================================= */
1694 
1695 /*
1696   Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388
1697 
1698   Mac OS X does not seem to support "__cxa__atexit", thus leading
1699   to the situation that "__attribute__((destructor))__" functions
1700   (in particular "rtl_{memory|cache|arena}_fini") become called
1701   _before_ global C++ object d'tors.
1702 
1703   Delegated the call to "rtl_cache_fini()" into a dummy C++ object,
1704   see alloc_fini.cxx .
1705 */
1706 #if defined(__GNUC__) && !defined(MACOSX)
1707 static void rtl_cache_fini (void) __attribute__((destructor));
1708 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1709 #pragma fini(rtl_cache_fini)
1710 static void rtl_cache_fini (void);
1711 #endif /* __GNUC__ || __SUNPRO_C */
1712 
1713 void
1714 rtl_cache_fini (void)
1715 {
1716 	if (gp_cache_arena != 0)
1717 	{
1718 		rtl_cache_type * cache, * head;
1719 
1720 		rtl_cache_wsupdate_fini();
1721 
1722 		if (gp_cache_bufctl_cache != 0)
1723 		{
1724 			cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = 0;
1725 			rtl_cache_deactivate (cache);
1726 			rtl_cache_destructor (cache);
1727 			VALGRIND_DESTROY_MEMPOOL(cache);
1728 		}
1729 		if (gp_cache_slab_cache != 0)
1730 		{
1731 			cache = gp_cache_slab_cache, gp_cache_slab_cache = 0;
1732 			rtl_cache_deactivate (cache);
1733 			rtl_cache_destructor (cache);
1734 			VALGRIND_DESTROY_MEMPOOL(cache);
1735 		}
1736 		if (gp_cache_magazine_cache != 0)
1737 		{
1738 			cache = gp_cache_magazine_cache, gp_cache_magazine_cache = 0;
1739 			rtl_cache_deactivate (cache);
1740 			rtl_cache_destructor (cache);
1741 			VALGRIND_DESTROY_MEMPOOL(cache);
1742 		}
1743 		if (gp_cache_arena != 0)
1744 		{
1745 			rtl_arena_destroy (gp_cache_arena);
1746 			gp_cache_arena = 0;
1747 		}
1748 
1749 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1750 		head = &(g_cache_list.m_cache_head);
1751 		for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
1752 		{
1753 			OSL_TRACE(
1754 				"rtl_cache_fini(\"%s\") "
1755 				"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1756 				"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1757 				"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1758 				cache->m_name,
1759 				cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1760 				cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1761 				cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1762 				cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1763 				cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free
1764 			);
1765 		}
1766 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1767 	}
1768 }
1769 
1770 /* ================================================================= */
1771