/home/users/khuck/src/jemalloc-3.5.1/include/jemalloc/internal/tcache.h

Line% of fetchesSource
1  
/******************************************************************************/
2  
#ifdef JEMALLOC_H_TYPES
3  
4  
typedef struct tcache_bin_info_s tcache_bin_info_t;
5  
typedef struct tcache_bin_s tcache_bin_t;
6  
typedef struct tcache_s tcache_t;
7  
8  
/*
9  
 * tcache pointers close to NULL are used to encode state information that is
10  
 * used for two purposes: preventing thread caching on a per thread basis and
11  
 * cleaning up during thread shutdown.
12  
 */
13  
#define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
14  
#define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
15  
#define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
16  
#define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
17  
18  
/*
19  
 * Absolute maximum number of cache slots for each small bin in the thread
20  
 * cache.  This is an additional constraint beyond that imposed as: twice the
21  
 * number of regions per run for this size class.
22  
 *
23  
 * This constant must be an even number.
24  
 */
25  
#define	TCACHE_NSLOTS_SMALL_MAX		200
26  
27  
/* Number of cache slots for large size classes. */
28  
#define	TCACHE_NSLOTS_LARGE		20
29  
30  
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
31  
#define	LG_TCACHE_MAXCLASS_DEFAULT	15
32  
33  
/*
34  
 * TCACHE_GC_SWEEP is the approximate number of allocation events between
35  
 * full GC sweeps.  Integer rounding may cause the actual number to be
36  
 * slightly higher, since GC is performed incrementally.
37  
 */
38  
#define	TCACHE_GC_SWEEP			8192
39  
40  
/* Number of tcache allocation/deallocation events between incremental GCs. */
41  
#define	TCACHE_GC_INCR							\
42  
    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
43  
44  
#endif /* JEMALLOC_H_TYPES */
45  
/******************************************************************************/
46  
#ifdef JEMALLOC_H_STRUCTS
47  
48  
typedef enum {
49  
	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
50  
	tcache_enabled_true    = 1,
51  
	tcache_enabled_default = 2
52  
} tcache_enabled_t;
53  
54  
/*
55  
 * Read-only information associated with each element of tcache_t's tbins array
56  
 * is stored separately, mainly to reduce memory usage.
57  
 */
58  
struct tcache_bin_info_s {
59  
	unsigned	ncached_max;	/* Upper limit on ncached. */
60  
};
61  
62  
struct tcache_bin_s {
63  
	tcache_bin_stats_t tstats;
64  
	int		low_water;	/* Min # cached since last GC. */
65  
	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
66  
	unsigned	ncached;	/* # of cached objects. */
67  
	void		**avail;	/* Stack of available objects. */
68  
};
69  
70  
struct tcache_s {
71  
	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
72  
	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
73  
	arena_t		*arena;		/* This thread's arena. */
74  
	unsigned	ev_cnt;		/* Event count since incremental GC. */
75  
	unsigned	next_gc_bin;	/* Next bin to GC. */
76  
	tcache_bin_t	tbins[1];	/* Dynamically sized. */
77  
	/*
78  
	 * The pointer stacks associated with tbins follow as a contiguous
79  
	 * array.  During tcache initialization, the avail pointer in each
80  
	 * element of tbins is initialized to point to the proper offset within
81  
	 * this array.
82  
	 */
83  
};
84  
85  
#endif /* JEMALLOC_H_STRUCTS */
86  
/******************************************************************************/
87  
#ifdef JEMALLOC_H_EXTERNS
88  
89  
extern bool	opt_tcache;
90  
extern ssize_t	opt_lg_tcache_max;
91  
92  
extern tcache_bin_info_t	*tcache_bin_info;
93  
94  
/*
95  
 * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
96  
 * large-object bins.
97  
 */
98  
extern size_t			nhbins;
99  
100  
/* Maximum cached size class. */
101  
extern size_t			tcache_maxclass;
102  
103  
size_t	tcache_salloc(const void *ptr);
104  
void	tcache_event_hard(tcache_t *tcache);
105  
void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
106  
    size_t binind);
107  
void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
108  
    tcache_t *tcache);
109  
void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
110  
    tcache_t *tcache);
111  
void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
112  
void	tcache_arena_dissociate(tcache_t *tcache);
113  
tcache_t *tcache_create(arena_t *arena);
114  
void	tcache_destroy(tcache_t *tcache);
115  
void	tcache_thread_cleanup(void *arg);
116  
void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
117  
bool	tcache_boot0(void);
118  
bool	tcache_boot1(void);
119  
120  
#endif /* JEMALLOC_H_EXTERNS */
121  
/******************************************************************************/
122  
#ifdef JEMALLOC_H_INLINES
123  
124  
#ifndef JEMALLOC_ENABLE_INLINE
125  
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
126  
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
127  
128  
void	tcache_event(tcache_t *tcache);
129  
void	tcache_flush(void);
130  
bool	tcache_enabled_get(void);
131  
tcache_t *tcache_get(bool create);
132  
void	tcache_enabled_set(bool enabled);
133  
void	*tcache_alloc_easy(tcache_bin_t *tbin);
134  
void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
135  
void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
136  
void	tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
137  
void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
138  
#endif
139  
140  
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
141  
/* Map of thread-specific caches. */
142  
malloc_tsd_externs(tcache, tcache_t *)
143  
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
144  
    tcache_thread_cleanup)
145  
/* Per thread flag that allows thread caches to be disabled. */
146  
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
147  
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
148  
    tcache_enabled_default, malloc_tsd_no_cleanup)
149  
150  
JEMALLOC_INLINE void
151  
tcache_flush(void)
152  
{
153  
	tcache_t *tcache;
154  
155  
	cassert(config_tcache);
156  
157  
	tcache = *tcache_tsd_get();
158  
	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
159  
		return;
160  
	tcache_destroy(tcache);
161  
	tcache = NULL;
162  
	tcache_tsd_set(&tcache);
163  
}
164  
165  
JEMALLOC_INLINE bool
166  
tcache_enabled_get(void)
167  
{
168  
	tcache_enabled_t tcache_enabled;
169  
170  
	cassert(config_tcache);
171  
172  
	tcache_enabled = *tcache_enabled_tsd_get();
173  
	if (tcache_enabled == tcache_enabled_default) {
174  
		tcache_enabled = (tcache_enabled_t)opt_tcache;
175  
		tcache_enabled_tsd_set(&tcache_enabled);
176  
	}
177  
178  
	return ((bool)tcache_enabled);
179  
}
180  
181  
JEMALLOC_INLINE void
182  
tcache_enabled_set(bool enabled)
183  
{
184  
	tcache_enabled_t tcache_enabled;
185  
	tcache_t *tcache;
186  
187  
	cassert(config_tcache);
188  
189  
	tcache_enabled = (tcache_enabled_t)enabled;
190  
	tcache_enabled_tsd_set(&tcache_enabled);
191  
	tcache = *tcache_tsd_get();
192  
	if (enabled) {
193  
		if (tcache == TCACHE_STATE_DISABLED) {
194  
			tcache = NULL;
195  
			tcache_tsd_set(&tcache);
196  
		}
197  
	} else /* disabled */ {
198  
		if (tcache > TCACHE_STATE_MAX) {
199  
			tcache_destroy(tcache);
200  
			tcache = NULL;
201  
		}
202  
		if (tcache == NULL) {
203  
			tcache = TCACHE_STATE_DISABLED;
204  
			tcache_tsd_set(&tcache);
205  
		}
206  
	}
207  
}
208  
209  
JEMALLOC_ALWAYS_INLINE tcache_t *
210  
tcache_get(bool create)
211  
{
212  
	tcache_t *tcache;
213  
214  
	if (config_tcache == false)
215  
		return (NULL);
216  
	if (config_lazy_lock && isthreaded == false)
217  
		return (NULL);
218  
219  
	tcache = *tcache_tsd_get();
220  
	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
221  
		if (tcache == TCACHE_STATE_DISABLED)
222  
			return (NULL);
223  
		if (tcache == NULL) {
224  
			if (create == false) {
225  
				/*
226  
				 * Creating a tcache here would cause
227  
				 * allocation as a side effect of free().
228  
				 * Ordinarily that would be okay since
229  
				 * tcache_create() failure is a soft failure
230  
				 * that doesn't propagate.  However, if TLS
231  
				 * data are freed via free() as in glibc,
232  
				 * subtle corruption could result from setting
233  
				 * a TLS variable after its backing memory is
234  
				 * freed.
235  
				 */
236  
				return (NULL);
237  
			}
238  
			if (tcache_enabled_get() == false) {
239  
				tcache_enabled_set(false); /* Memoize. */
240  
				return (NULL);
241  
			}
242  
			return (tcache_create(choose_arena(NULL)));
243  
		}
244  
		if (tcache == TCACHE_STATE_PURGATORY) {
245  
			/*
246  
			 * Make a note that an allocator function was called
247  
			 * after tcache_thread_cleanup() was called.
248  
			 */
249  
			tcache = TCACHE_STATE_REINCARNATED;
250  
			tcache_tsd_set(&tcache);
251  
			return (NULL);
252  
		}
253  
		if (tcache == TCACHE_STATE_REINCARNATED)
254  
			return (NULL);
255  
		not_reached();
256  
	}
257  
258  
	return (tcache);
259  
}
260  
261  
JEMALLOC_ALWAYS_INLINE void
262  
tcache_event(tcache_t *tcache)
263  
{
264  
265  
	if (TCACHE_GC_INCR == 0)
266  
		return;
267  
268  
	tcache->ev_cnt++;
269  
	assert(tcache->ev_cnt <= TCACHE_GC_INCR);
270  
	if (tcache->ev_cnt == TCACHE_GC_INCR)
271  
		tcache_event_hard(tcache);
272  
}
273  
274  
JEMALLOC_ALWAYS_INLINE void *
275  
tcache_alloc_easy(tcache_bin_t *tbin)
276  
{
277  
	void *ret;
278  
279  
	if (tbin->ncached == 0) {
280  
		tbin->low_water = -1;
281  
		return (NULL);
282  
	}
283  
	tbin->ncached--;
284  
	if ((int)tbin->ncached < tbin->low_water)
285  
		tbin->low_water = tbin->ncached;
286  
	ret = tbin->avail[tbin->ncached];
287  
	return (ret);
288  
}
289  
290  
JEMALLOC_ALWAYS_INLINE void *
291  
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
292  
{
293  
	void *ret;
294  
	size_t binind;
295  
	tcache_bin_t *tbin;
296  
297  
	binind = SMALL_SIZE2BIN(size);
298  
	assert(binind < NBINS);
299  
	tbin = &tcache->tbins[binind];
300  
	size = arena_bin_info[binind].reg_size;
301  
	ret = tcache_alloc_easy(tbin);
302  
	if (ret == NULL) {
303  
		ret = tcache_alloc_small_hard(tcache, tbin, binind);
304  
		if (ret == NULL)
305  
			return (NULL);
306  
	}
307  
	assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
308  
309  
	if (zero == false) {
310  
		if (config_fill) {
311  
			if (opt_junk) {
312  
				arena_alloc_junk_small(ret,
313  
				    &arena_bin_info[binind], false);
314  
			} else if (opt_zero)
315  
				memset(ret, 0, size);
316  
		}
317  
		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
318  
	} else {
319  
		if (config_fill && opt_junk) {
320  
			arena_alloc_junk_small(ret, &arena_bin_info[binind],
321  
			    true);
322  
		}
323  
		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
324  
		memset(ret, 0, size);
325  
	}
326  
327  
	if (config_stats)
328  
		tbin->tstats.nrequests++;
329  
	if (config_prof)
330  
		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
331  
	tcache_event(tcache);
332  
	return (ret);
333  
}
334  
335  
JEMALLOC_ALWAYS_INLINE void *
336  
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
337  
{
338  
	void *ret;
339  
	size_t binind;
340  
	tcache_bin_t *tbin;
341  
342  
	size = PAGE_CEILING(size);
343  
	assert(size <= tcache_maxclass);
344  
	binind = NBINS + (size >> LG_PAGE) - 1;
345  
	assert(binind < nhbins);
346  
	tbin = &tcache->tbins[binind];
347  
	ret = tcache_alloc_easy(tbin);
348  
	if (ret == NULL) {
349  
		/*
350  
		 * Only allocate one large object at a time, because it's quite
351  
		 * expensive to create one and not use it.
352  
		 */
353  
		ret = arena_malloc_large(tcache->arena, size, zero);
354  
		if (ret == NULL)
355  
			return (NULL);
356  
	} else {
357  
		if (config_prof && prof_promote && size == PAGE) {
358  
			arena_chunk_t *chunk =
359  
			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
360  
			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
361  
			    LG_PAGE);
362  
			arena_mapbits_large_binind_set(chunk, pageind,
363  
			    BININD_INVALID);
364  
		}
365  
		if (zero == false) {
366  
			if (config_fill) {
367  
				if (opt_junk)
368  
					memset(ret, 0xa5, size);
369  
				else if (opt_zero)
370  
					memset(ret, 0, size);
371  
			}
372  
			VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
373  
		} else {
374  
			VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
375  
			memset(ret, 0, size);
376  
		}
377  
378  
		if (config_stats)
379  
			tbin->tstats.nrequests++;
380  
		if (config_prof)
381  
			tcache->prof_accumbytes += size;
382  
	}
383  
384  
	tcache_event(tcache);
385  
	return (ret);
386  
}
387  
388  
JEMALLOC_ALWAYS_INLINE void
389  
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
390  
{
391  
	tcache_bin_t *tbin;
392  
	tcache_bin_info_t *tbin_info;
393  
394  
	assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
395  
396  
	if (config_fill && opt_junk)
397  
		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
398  
399  
	tbin = &tcache->tbins[binind];
400  
	tbin_info = &tcache_bin_info[binind];
401  
	if (tbin->ncached == tbin_info->ncached_max) {
402  
		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
403  
		    1), tcache);
404  
	}
405  
	assert(tbin->ncached < tbin_info->ncached_max);
406  
	tbin->avail[tbin->ncached] = ptr;
407  
	tbin->ncached++;
408  
409  
	tcache_event(tcache);
410  
}
411  
412  
JEMALLOC_ALWAYS_INLINE void
413  
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
414  
{
415  
	size_t binind;
416  
	tcache_bin_t *tbin;
417  
	tcache_bin_info_t *tbin_info;
418  
419  
	assert((size & PAGE_MASK) == 0);
420  
	assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
421  
	assert(tcache_salloc(ptr) <= tcache_maxclass);
422  
423  
	binind = NBINS + (size >> LG_PAGE) - 1;
424  
425  
	if (config_fill && opt_junk)
426  
		memset(ptr, 0x5a, size);
427  
428  
	tbin = &tcache->tbins[binind];
429  
	tbin_info = &tcache_bin_info[binind];
430  
	if (tbin->ncached == tbin_info->ncached_max) {
431  
		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
432  
		    1), tcache);
433  
	}
434  
	assert(tbin->ncached < tbin_info->ncached_max);
435  
	tbin->avail[tbin->ncached] = ptr;
436  
	tbin->ncached++;
437  
438  
	tcache_event(tcache);
439  
}
440  
#endif
441  
442  
#endif /* JEMALLOC_H_INLINES */
443  
/******************************************************************************/
444  

Copyright (c) 2006-2012 Rogue Wave Software, Inc. All Rights Reserved.
Patents pending.