This patch is generated from the chunked_mempools branch of HEAD in squid
Sun Jan 25 14:33:49 2004 GMT
See http://devel.squid-cache.org/

Index: squid/acconfig.h
diff -u squid/acconfig.h:1.15 squid/acconfig.h:1.6.6.6
--- squid/acconfig.h:1.15	Tue Jan  1 06:13:42 2002
+++ squid/acconfig.h	Fri Mar  8 01:44:15 2002
@@ -53,6 +53,9 @@
 #undef CACHE_ICP_PORT
 #endif
 
+/* Compile & use the malloc package by Doug Lea] */
+#undef USE_DLMALLOC
+
 /* Define to do simple malloc debugging */
 #undef XMALLOC_DEBUG
 
@@ -67,6 +70,9 @@
 
 #undef FORW_VIA_DB
 
+/* Define if you have problems with memPools and want to disable Pools */
+#undef DISABLE_POOLS
+
 /* Defines how many threads aufs uses for I/O */
 #undef AUFS_IO_THREADS
 
Index: squid/configure.in
diff -u squid/configure.in:1.47 squid/configure.in:1.19.6.7
--- squid/configure.in:1.47	Tue Jan 29 06:49:11 2002
+++ squid/configure.in	Fri Mar  8 01:44:15 2002
@@ -68,6 +68,9 @@
 
 AC_DEFINE_UNQUOTED(SQUID_CONFIGURE_OPTIONS, "$ac_configure_args")
 
+dnl Check for GNU cc
+AC_PROG_CC
+
 dnl Gerben Wierda <Gerben_Wierda@RnA.nl>
 case "$host" in
     mab-next-nextstep3)
@@ -205,6 +208,7 @@
     ac_cv_header_gnumalloc_h="no"
     ac_cv_lib_malloc="no"
     ac_cv_enabled_dlmalloc="yes"
+    AC_DEFINE(USE_DLMALLOC, 1)
 fi
 
 AC_SUBST(LIBDLMALLOC)
@@ -981,6 +985,17 @@
   fi
 ])
 
+dnl Disable "memPools" code
+AC_ARG_ENABLE(mempools,
+[  --disable-mempools      Disable memPools],
+[ if test "$enableval" = "no" ; then
+    echo "memPools disabled"
+    AC_DEFINE(DISABLE_POOLS, 1)
+  else
+    AC_DEFINE(DISABLE_POOLS, 0)
+  fi
+])
+
 dnl Disable "unlinkd" code
 AC_ARG_ENABLE(unlinkd,
 [  --disable-unlinkd       Do not use unlinkd],
Index: squid/doc/Programming-Guide/prog-guide.sgml
diff -u squid/doc/Programming-Guide/prog-guide.sgml:1.17 squid/doc/Programming-Guide/prog-guide.sgml:1.10.8.12
--- squid/doc/Programming-Guide/prog-guide.sgml:1.17	Sat Jan 12 19:21:38 2002
+++ squid/doc/Programming-Guide/prog-guide.sgml	Fri Mar  8 01:44:16 2002
@@ -3085,4 +3085,262 @@
 that pointer was last accessed.  If there is a leak, then the bug
 occurs somewhere after that point of the code.
 
+<sect>MemPools
+
+<p>
+MemPools are a pooled memory allocator running on top of malloc(). It's
+purpose is to reduce memory fragmentation and provide detailed statistics
+on memory consumption.
+
+<p>
+Preferably all memory allocations in Squid should be done using MemPools
+or one of the types built on top of it (i.e. cbdata).
+
+<p>
+Note: Usually it is better to use cbdata types as these gives you additional
+safeguards in references and typechecking. However, for high usage pools where
+the cbdata functionality of cbdata is not required directly using a MemPool
+might be the way to go.
+
+<sect1>Public API
+
+<p>
+This defines the public API definitions
+
+<sect2>createMemPool
+
+<p>
+<verb>
+	MemPool * pool = memPoolCreate(char *name, size_t element_size);
+</verb>
+
+	<p>
+	Creates a MemPool of elements with the given size.
+
+<sect2>memPoolAlloc
+
+<p>
+<verb>
+	type * data = memPoolAlloc(pool);
+</verb>
+
+	<p>
+	Allocate one element from the pool
+
+<sect2>memPoolFree
+
+<p>
+<verb>
+	memPoolFree(pool, data);
+</verb>
+
+	<p>
+	Free a element allocated by memPoolAlloc();
+
+<sect2>memPoolDestroy
+
+<p>
+<verb>
+	memPoolDestroy(&amp;pool);
+</verb>
+
+	<p>
+	Destroys a memory pool created by memPoolCreate() and reset pool to NULL.
+
+	<p>
+	Typical usage could be:
+<verb>
+	...
+	myStructType *myStruct;
+	MemPool * myType_pool = memPoolCreate("This is cute pool", sizeof(myStructType));
+	myStruct = memPoolAlloc(myType_pool);
+	myStruct->item = xxx;
+	   ...
+	memPoolFree(myStruct, myType_pool);
+	memPoolDestroy(&amp;myType_pool)
+</verb>
+
+<sect2>memPoolIterate
+
+<p>
+<verb>
+	MemPoolIterator * iter = memPoolIterate(void);
+</verb>
+
+	<p>
+	Initialise iteration through all of the pools.
+
+<sect2>memPoolIterateNext
+
+<p>
+<verb>
+	MemPool * pool = memPoolIterateNext(MemPoolIterator * iter);
+</verb>
+
+	<p>
+	Get next pool pointer, until getting NULL pointer.
+
+	<P>
+<verb>
+	MemPoolIterator *iter;
+	iter = memPoolIterate();
+	while ( (pool = memPoolIterateNext(iter)) ) {
+	    ... handle(pool);
+	}
+	memPoolIterateDone(&amp;iter);
+</verb>
+
+<sect2>memPoolIterateDone
+
+<p>
+<verb>
+	memPoolIterateDone(MemPoolIterator ** iter);
+</verb>
+
+	<p>
+	Should be called after finished with iterating through all pools.
+
+<sect2>memPoolSetChunkSize
+
+<p>
+<verb>
+	memPoolSetChunkSize(MemPool * pool, size_t chunksize);
+</verb>
+
+	<p>
+	Allows you tune chunk size of pooling. Objects are allocated in chunks
+	instead of individually. This conserves memory, reduces fragmentation.
+	Because of that memory can be freed also only in chunks. Therefore
+	there is tradeoff between memory conservation due to chunking and free
+	memory fragmentation.
+	As a general guideline, increase chunk size only for pools that keep very
+	many items for relatively long time. 
+
+<sect2>memPoolSetIdleLimit
+
+<p>
+<verb>
+	memPoolSetIdleLimit(size_t new_idle_limit);
+</verb>
+
+	<p>
+	Sets upper limit in bytes to amount of free ram kept in pools. This is
+	not strict upper limit, but a hint. When MemPools are over this limit,
+	totally free chunks are immediately considered for release. Otherwise
+	only chunks that have not been referenced for a long time are checked.
+
+<sect2>memPoolGetStats
+
+<p>
+<verb>
+	int inuse = memPoolGetStats(MemPoolStats * stats, MemPool * pool);
+</verb>
+
+	<p>
+	Fills MemPoolStats struct with statistical data about pool. As a
+	return value returns number of objects in use, ie. allocated.
+	<p>
+<verb>
+	struct _MemPoolStats {
+	    MemPool *pool;
+	    const char *label;
+	    MemPoolMeter *meter;
+	    int obj_size;
+	    int chunk_capacity;
+	    int chunk_size;
+
+	    int chunks_alloc;
+	    int chunks_inuse;
+	    int chunks_partial;
+	    int chunks_free;
+
+	    int items_alloc;
+	    int items_inuse;
+	    int items_idle;
+
+	    int overhead;
+	};
+
+	/* object to track per-pool cumulative counters */
+	typedef struct {
+	    double count;
+	    double bytes;
+	} mgb_t;
+
+	/* object to track per-pool memory usage (alloc = inuse+idle) */
+	struct _MemPoolMeter {
+	    MemMeter alloc;
+	    MemMeter inuse;
+	    MemMeter idle;
+	    mgb_t gb_saved;             /* account Allocations */
+	    mgb_t gb_osaved;            /* history Allocations */
+	    mgb_t gb_freed;             /* account Free calls */
+	};
+</verb>
+
+<sect2>memPoolGetGlobalStats
+
+<p>
+<verb>
+	int pools_inuse = memPoolGetGlobalStats(MemPoolGlobalStats * stats);
+</verb>
+
+	<p>
+	Fills MemPoolGlobalStats struct with statistical data about overall
+	usage for all pools. As a return value returns number of pools that
+	have at least one object in use. Ie. number of dirty pools.
+	<p>
+<verb>
+	struct _MemPoolGlobalStats {
+	    MemPoolMeter *TheMeter;
+
+	    int tot_pools_alloc;
+	    int tot_pools_inuse;
+	    int tot_pools_mempid;
+
+	    int tot_chunks_alloc;
+	    int tot_chunks_inuse;
+	    int tot_chunks_partial;
+	    int tot_chunks_free;
+
+	    int tot_items_alloc;
+	    int tot_items_inuse;
+	    int tot_items_idle;
+
+	    int tot_overhead;
+	    int mem_idle_limit;
+	};
+</verb>
+
+<sect2>memPoolClean
+
+<p>
+<verb>
+	memPoolClean(time_t maxage);
+</verb>
+
+<p>
+	Main cleanup handler. For MemPools to stay within upper idle limits,
+	this function needs to be called periodically, preferrably at some
+ 	constant rate, eg. from Squid event. It looks through all pools and
+	chunks, cleans up internal states and checks for releasable chunks.
+<p>
+	Between the calls to this function objects are placed onto internal
+	cache instead of returning to their home chunks, mainly for speedup	
+	purpose. During that time state of chunk is not known, it is not
+	known whether chunk is free or in use. This call returns all objects
+	to their chunks and restores consistency.
+<p>
+	Should be called relatively often, as it sorts chunks in suitable
+	order as to reduce free memory fragmentation and increase chunk
+	utilisation.
+<p>
+	Parameter maxage instructs to release all totally idle chunks that
+	have not been referenced for maxage seconds.
+<p>
+	Suitable frequency for cleanup is in range of few tens of seconds to
+	few minutes, depending of memory activity.
+	Several functions above call memPoolClean internally to operate on
+	consistent states.
+
 </article>
Index: squid/include/MemPool.h
diff -u /dev/null squid/include/MemPool.h:1.1.2.16
--- /dev/null	Sun Jan 25 06:33:36 2004
+++ squid/include/MemPool.h	Tue Oct  2 15:40:47 2001
@@ -0,0 +1,169 @@
+
+#ifndef _MEM_POOLS_H_
+#define _MEM_POOLS_H_
+
+#include "config.h"
+#include "Stack.h"
+#include "util.h"
+#include "splay.h"
+#include "memMeter.h"
+
+#if HAVE_GNUMALLOC_H
+#include <gnumalloc.h>
+#elif HAVE_MALLOC_H && !defined(_SQUID_FREEBSD_) && !defined(_SQUID_NEXT_)
+#include <malloc.h>
+#endif
+
+#if HAVE_MEMORY_H
+#include <memory.h>
+#endif
+
+#if !M_MMAP_MAX
+#if USE_DLMALLOC
+#define M_MMAP_MAX -4
+#endif
+#endif
+
+#if PURIFY
+#define DISABLE_POOLS 1		/* Disabling Memory pools under purify */
+#endif
+
+#define MB ((size_t)1024*1024)
+#define mem_unlimited_size 2 * 1024 * MB
+#define toMB(size) ( ((double) size) / MB )
+#define toKB(size) ( (size + 1024 - 1) / 1024 )
+
+#define MEM_PAGE_SIZE 4096
+#define MEM_CHUNK_SIZE 4096 * 4
+#define MEM_CHUNK_MAX_SIZE  256 * 1024	/* 2MB */
+#define MEM_MIN_FREE  32
+#define MEM_MAX_FREE  65535	/* ushort is max number of items per chunk */
+
+typedef struct _MemPoolMeter MemPoolMeter;
+typedef struct _MemPool MemPool;
+typedef struct _MemChunk MemChunk;
+typedef struct _MemPoolStats MemPoolStats;
+typedef struct _MemPoolGlobalStats MemPoolGlobalStats;
+typedef struct _MemPoolIterator MemPoolIterator;
+
+struct _MemPoolIterator {
+    MemPool *pool;
+    MemPoolIterator * next;
+};
+
+/* object to track per-pool cumulative counters */
+typedef struct {
+    double count;
+    double bytes;
+} mgb_t;
+
+/* object to track per-pool memory usage (alloc = inuse+idle) */
+struct _MemPoolMeter {
+    MemMeter alloc;
+    MemMeter inuse;
+    MemMeter idle;
+    mgb_t gb_saved;		/* account Allocations */
+    mgb_t gb_osaved;		/* history Allocations */
+    mgb_t gb_freed;		/* account Free calls */
+};
+
+/* a pool is a [growing] space for objects of the same size */
+struct _MemPool {
+    const char *label;
+    size_t obj_size;
+    size_t chunk_size;
+    int chunk_capacity;
+    int memPID;
+    int chunkCount;
+    size_t alloc_calls;
+    size_t free_calls;
+    size_t inuse;
+    size_t idle;
+    void *freeCache;
+    MemChunk *nextFreeChunk;
+    MemChunk *Chunks;
+    MemPoolMeter meter;
+    splayNode *allChunks;
+    MemPool *next;
+};
+
+struct _MemChunk {
+    void *freeList;
+    void *objCache;
+    int inuse_count;
+    MemChunk *nextFreeChunk;
+    MemChunk *next;
+    time_t lastref;
+};
+
+struct _MemPoolStats {
+    MemPool *pool;
+    const char *label;
+    MemPoolMeter *meter;
+    int obj_size;
+    int chunk_capacity;
+    int chunk_size;
+
+    int chunks_alloc;
+    int chunks_inuse;
+    int chunks_partial;
+    int chunks_free;
+
+    int items_alloc;
+    int items_inuse;
+    int items_idle;
+
+    int overhead;
+};
+
+struct _MemPoolGlobalStats {
+    MemPoolMeter *TheMeter;
+
+    int tot_pools_alloc;
+    int tot_pools_inuse;
+    int tot_pools_mempid;
+
+    int tot_chunks_alloc;
+    int tot_chunks_inuse;
+    int tot_chunks_partial;
+    int tot_chunks_free;
+
+    int tot_items_alloc;
+    int tot_items_inuse;
+    int tot_items_idle;
+
+    int tot_overhead;
+    int mem_idle_limit;
+};
+
+#define SIZEOF_CHUNK  ( ( sizeof(MemChunk) + sizeof(double) -1) / sizeof(double) ) * sizeof(double);
+
+/* memPools */
+
+/* Allocator API */
+extern MemPool *memPoolCreate(const char *label, size_t obj_size);
+extern void *memPoolAlloc(MemPool * pool);
+extern void memPoolFree(MemPool * pool, void *obj);
+extern void memPoolDestroy(MemPool ** pool);
+
+extern MemPoolIterator * memPoolIterate(void);
+extern MemPool * memPoolIterateNext(MemPoolIterator * iter);
+extern void memPoolIterateDone(MemPoolIterator ** iter);
+
+/* Tune API */
+extern void memPoolSetChunkSize(MemPool * pool, size_t chunksize);
+extern void memPoolSetIdleLimit(size_t new_idle_limit);
+
+/* Stats API */
+extern int memPoolGetStats(MemPoolStats * stats, MemPool * pool);
+extern int memPoolGetGlobalStats(MemPoolGlobalStats * stats);
+
+/* Module housekeeping API */
+extern void memPoolClean(time_t maxage);
+
+#if UNUSED
+/* Stats history API */
+extern void memPoolCheckRates(); /* stats history checkpoints */
+#endif
+
+#endif /* _MEM_POOLS_H_ */
Index: squid/include/memMeter.h
diff -u /dev/null squid/include/memMeter.h:1.1.2.1
--- /dev/null	Sun Jan 25 06:33:36 2004
+++ squid/include/memMeter.h	Fri May  4 07:43:42 2001
@@ -0,0 +1,21 @@
+
+#ifndef _MEM_METER_H_
+#define _MEM_METER_H_
+
+typedef struct _MemMeter MemMeter;
+
+/* object to track per-action memory usage (e.g. #idle objects) */
+struct _MemMeter {
+    ssize_t level;              /* current level (count or volume) */
+    ssize_t hwater_level;       /* high water mark */
+    time_t hwater_stamp;        /* timestamp of last high water mark change */
+};
+
+#define memMeterSyncHWater(m)  { (m).hwater_level = (m).level; (m).hwater_stamp = squid_curtime; }
+#define memMeterCheckHWater(m) { if ((m).hwater_level < (m).level) memMeterSyncHWater((m)); }
+#define memMeterInc(m) { (m).level++; memMeterCheckHWater(m); }
+#define memMeterDec(m) { (m).level--; }
+#define memMeterAdd(m, sz) { (m).level += (sz); memMeterCheckHWater(m); }
+#define memMeterDel(m, sz) { (m).level -= (sz); }
+
+#endif /* _MEM_METER_H_ */
Index: squid/include/splay.h
diff -u squid/include/splay.h:1.5 squid/include/splay.h:1.4.22.2
--- squid/include/splay.h:1.5	Tue Oct  9 14:18:00 2001
+++ squid/include/splay.h	Fri Mar  8 01:44:18 2002
@@ -19,6 +19,7 @@
 
 extern splayNode *splay_insert(void *, splayNode *, SPLAYCMP *);
 extern splayNode *splay_splay(const void *, splayNode *, SPLAYCMP *);
+extern splayNode *splay_delete(const void *, splayNode *, SPLAYCMP *);
 extern void splay_destroy(splayNode *, SPLAYFREE *);
 extern void splay_walk(splayNode *, SPLAYWALKEE *, void *);
 
Index: squid/include/util.h
diff -u squid/include/util.h:1.10 squid/include/util.h:1.7.16.6
--- squid/include/util.h:1.10	Wed Oct 17 05:30:51 2001
+++ squid/include/util.h	Fri Mar  8 01:44:18 2002
@@ -127,6 +127,22 @@
 double drand48(void);
 #endif
 
+typedef struct {
+    size_t count;
+    size_t bytes;
+    size_t gb;
+} gb_t;
+
+/* gb_type operations */
+#define gb_flush_limit (0x3FFFFFFF)
+#define gb_inc(gb, delta) { if ((gb)->bytes > gb_flush_limit || delta > gb_flush_limit) gb_flush(gb); (gb)->bytes += delta; (gb)->count++; }
+#define gb_incb(gb, delta) { if ((gb)->bytes > gb_flush_limit || delta > gb_flush_limit) gb_flush(gb); (gb)->bytes += delta; }
+#define gb_incc(gb, delta) { if ((gb)->bytes > gb_flush_limit || delta > gb_flush_limit) gb_flush(gb); (gb)->count+= delta; }
+extern double gb_to_double(const gb_t *);
+extern const char *double_to_str(char *buf, int buf_size, double value);
+extern const char *gb_to_str(const gb_t *);
+extern void gb_flush(gb_t *);  /* internal, do not use this */
+
 /*
  * Returns the amount of known allocated memory
  */
Index: squid/lib/Makefile.am
diff -u squid/lib/Makefile.am:1.4 squid/lib/Makefile.am:1.2.18.4
--- squid/lib/Makefile.am:1.4	Wed Nov 21 15:48:57 2001
+++ squid/lib/Makefile.am	Sun Mar 24 23:49:05 2002
@@ -26,6 +26,7 @@
 	md5.c \
 	snprintf.c
 libmiscutil_a_SOURCES = \
+	MemPool.c \
 	Array.c \
 	base64.c \
 	getfullhostname.c \
Index: squid/lib/MemPool.c
diff -u /dev/null squid/lib/MemPool.c:1.1.2.22
--- /dev/null	Sun Jan 25 06:33:37 2004
+++ squid/lib/MemPool.c	Fri Oct  5 00:54:06 2001
@@ -0,0 +1,740 @@
+
+/*
+ * $Id: squid-chunked_mempools-HEAD,v 1.1 2004/08/17 20:55:12 hno Exp $
+ *
+ * DEBUG: section 63    Low Level Memory Pool Management
+ * AUTHOR: Alex Rousskov, Andres Kroonmaa
+ *
+ * SQUID Internet Object Cache  http://squid.nlanr.net/Squid/
+ * ----------------------------------------------------------
+ *
+ *  Squid is the result of efforts by numerous individuals from the
+ *  Internet community.  Development is led by Duane Wessels of the
+ *  National Laboratory for Applied Network Research and funded by the
+ *  National Science Foundation.  Squid is Copyrighted (C) 1998 by
+ *  the Regents of the University of California.  Please see the
+ *  COPYRIGHT file for full details.  Squid incorporates software
+ *  developed and/or copyrighted by other sources.  Please see the
+ *  CREDITS file for full details.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *  
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *  
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
+ *
+ */
+
+/*
+ * Old way:
+ *   xmalloc each item separately, upon free stack into idle pool array.
+ *   each item is individually malloc()ed from system, imposing libmalloc
+ *   overhead, and additionally we add our overhead of pointer size per item
+ *   as we keep a list of pointer to free items.
+ * 
+ * Chunking:
+ *   xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
+ *   limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
+ *   MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
+ *   Minimum Chunk size is MEM_CHUNK_SIZE (16K).
+ *   A number of items fits into a single chunk, depending on item size.
+ *   Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
+ * 
+ *   We populate Chunk with a linkedlist, each node at first word of item,
+ *   and pointing at next free item. Chunk->FreeList is pointing at first
+ *   free node. Thus we stuff free housekeeping into the Chunk itself, and
+ *   omit pointer overhead per item.
+ * 
+ *   Chunks are created on demand, and new chunks are inserted into linklist
+ *   of chunks so that Chunks with smaller pointer value are placed closer
+ *   to the linklist head. Head is a hotspot, servicing most of requests, so
+ *   slow sorting occurs and Chunks in highest memory tend to become idle
+ *   and freeable.
+ * 
+ *   event is registered that runs every 15 secs and checks reference time
+ *   of each idle chunk. If a chunk is not referenced for 15 secs, it is
+ *   released.
+ * 
+ *   [If mem_idle_limit is exceeded with pools, every chunk that becomes
+ *   idle is immediately considered for release, unless this is the only
+ *   chunk with free items in it.] (not implemented)
+ * 
+ *   In cachemgr output, there are new columns for chunking. Special item,
+ *   Frag, is shown to estimate approximately fragmentation of chunked
+ *   pools. Fragmentation is calculated by taking amount of items in use,
+ *   calculating needed amount of chunks to fit all, and then comparing to
+ *   actual amount of chunks in use. Frag number, in percent, is showing
+ *   how many percent of chunks are in use excessively. 100% meaning that
+ *   twice the needed amount of chunks are in use.
+ *   "part" item shows number of chunks partially filled. This shows how
+ *   badly fragmentation is spread across all chunks.
+ * 
+ *   Andres Kroonmaa.
+ */
+
+#define FLUSH_LIMIT 1000	/* Flush memPool counters to memMeters after flush limit calls */
+#define MEM_MAX_MMAP_CHUNKS 2048
+
+#include <assert.h>
+
+#include "config.h"
+#if HAVE_STRING_H
+#include <string.h>
+#endif
+#include "MemPool.h"
+
+/*
+ * XXX This is a boundary violation between lib and src.. would be good
+ * if it could be solved otherwise, but left for now.
+ */
+extern time_t squid_curtime;
+
+/* Allocator API */
+extern MemPool *memPoolCreate(const char *label, size_t obj_size);
+extern void *memPoolAlloc(MemPool * pool);
+extern void memPoolFree(MemPool * pool, void *obj);
+extern void memPoolDestroy(MemPool ** pool);
+
+extern MemPoolIterator *memPoolIterate(void);
+extern MemPool *memPoolIterateNext(MemPoolIterator * iter);
+extern void memPoolIterateDone(MemPoolIterator ** iter);
+
+/* Tune API */
+extern void memPoolSetChunkSize(MemPool * pool, size_t chunksize);
+extern void memPoolSetIdleLimit(size_t new_idle_limit);
+
+/* Stats API */
+extern int memPoolGetStats(MemPoolStats * stats, MemPool * pool);
+extern int memPoolGetGlobalStats(MemPoolGlobalStats * stats);
+
+/* Module housekeeping API */
+extern void memPoolClean(time_t maxage);
+
+/* local data */
+static int mempool_initialised = 0;
+static int mem_idle_limit = 0;
+static MemPool *memPools = NULL;
+static int memPools_alloc = 0;
+
+static MemPoolMeter TheMeter;
+static MemPoolIterator Iterator;
+
+static int Pool_id_counter = 0;
+static MemPool *lastPool;
+
+/* local prototypes */
+static int memCompChunks(MemChunk * chunkA, MemChunk * chunkB);
+static int memCompObjChunks(void *obj, MemChunk * chunk);
+static MemChunk *memPoolChunkNew(MemPool * pool);
+static void memPoolChunkDestroy(MemPool * pool, MemChunk * chunk);
+static void memPoolPush(MemPool * pool, void *obj);
+static void *memPoolGet(MemPool * pool);
+static void memPoolCreateChunk(MemPool * pool);
+static void memPoolFlushMeters(MemPool * pool);
+static void memPoolFlushMetersFull(MemPool * pool);
+static void memPoolFlushMetersAll(void);
+static void memPoolCleanOne(MemPool * pool, time_t maxage);
+
+static void memPoolInit(void);
+
+MemPoolIterator *
+memPoolIterate(void)
+{
+    Iterator.pool = memPools;
+    return &Iterator;
+}
+
+void
+memPoolIterateDone(MemPoolIterator ** iter)
+{
+    assert(iter);
+    Iterator.pool = NULL;
+    *iter = NULL;
+}
+
+MemPool *
+memPoolIterateNext(MemPoolIterator * iter)
+{
+    MemPool *pool;
+    assert(iter);
+
+    pool = iter->pool;
+    if (!pool)
+	return NULL;
+
+    iter->pool = pool->next;
+    return pool;
+}
+
+void
+memPoolSetIdleLimit(size_t new_idle_limit)
+{
+    mem_idle_limit = new_idle_limit;
+}
+
+/* Compare chunks */
+static int
+memCompChunks(MemChunk * chunkA, MemChunk * chunkB)
+{
+    return chunkA->objCache - chunkB->objCache;
+}
+
+/* Compare object to chunk */
+static int
+memCompObjChunks(void *obj, MemChunk * chunk)
+{
+    int bounds;
+    bounds = obj - chunk->objCache;
+    if (bounds < 0)
+	return -1;
+    if (bounds < lastPool->chunk_size)
+	return 0;
+    return 1;
+}
+
+static MemChunk *
+memPoolChunkNew(MemPool * pool)
+{
+    int i;
+    void **Free;
+    MemChunk *chunk;
+
+    chunk = xcalloc(1, sizeof(MemChunk));	/* should have a pool for this too */
+    chunk->objCache = xcalloc(1, pool->chunk_size);
+    Free = chunk->freeList = chunk->objCache;
+
+    for (i = 1; i < pool->chunk_capacity; i++) {
+	*Free = (void *) Free + pool->obj_size;
+	Free = *Free;
+    }
+    chunk->nextFreeChunk = pool->nextFreeChunk;
+    pool->nextFreeChunk = chunk;
+
+    memMeterAdd(pool->meter.alloc, pool->chunk_capacity);
+    memMeterAdd(pool->meter.idle, pool->chunk_capacity);
+    pool->idle += pool->chunk_capacity;
+    pool->chunkCount++;
+    chunk->lastref = squid_curtime;
+    lastPool = pool;
+    pool->allChunks = splay_insert(chunk, pool->allChunks, (SPLAYCMP *) memCompChunks);
+    return chunk;
+}
+
+static void
+memPoolChunkDestroy(MemPool * pool, MemChunk * chunk)
+{
+    memMeterDel(pool->meter.alloc, pool->chunk_capacity);
+    memMeterDel(pool->meter.idle, pool->chunk_capacity);
+    pool->idle -= pool->chunk_capacity;
+    pool->chunkCount--;
+    lastPool = pool;
+    pool->allChunks = splay_delete(chunk, pool->allChunks, (SPLAYCMP *) memCompChunks);
+    xfree(chunk->objCache);
+    xfree(chunk);
+}
+
+static void
+memPoolPush(MemPool * pool, void *obj)
+{
+    void **Free;
+    if ((pool->obj_size % 2048) != 0)
+	memset(obj, 0, pool->obj_size);
+    Free = obj;
+    *Free = pool->freeCache;
+    pool->freeCache = obj;
+    return;
+}
+
+/*
+ * Find a chunk with a free item.
+ * Create new chunk on demand if no chunk with frees found.
+ * Insert new chunk in front of lowest ram chunk, making it preferred in future,
+ * and resulting slow compaction towards lowest ram area.
+ */
+static void *
+memPoolGet(MemPool * pool)
+{
+    MemChunk *chunk;
+    void **Free;
+
+    /* first, try cache */
+    if (pool->freeCache) {
+	Free = pool->freeCache;
+	pool->freeCache = *Free;
+	*Free = NULL;
+	return Free;
+    }
+    /* then try perchunk freelist chain */
+    if (pool->nextFreeChunk == NULL) {
+	/* no chunk with frees, so create new one */
+	memPoolCreateChunk(pool);
+    }
+    /* now we have some in perchunk freelist chain */
+    chunk = pool->nextFreeChunk;
+
+    Free = chunk->freeList;
+    chunk->freeList = *Free;
+    *Free = NULL;
+    chunk->inuse_count++;
+    chunk->lastref = squid_curtime;
+
+    if (chunk->freeList == NULL) {
+	/* last free in this chunk, so remove us from perchunk freelist chain */
+	pool->nextFreeChunk = chunk->nextFreeChunk;
+    }
+    return Free;
+}
+
+/* just create a new chunk and place it into a good spot in the chunk chain */
+static void
+memPoolCreateChunk(MemPool * pool)
+{
+    MemChunk *chunk, *new;
+
+    new = memPoolChunkNew(pool);
+
+    chunk = pool->Chunks;
+    if (chunk == NULL) {	/* first chunk in pool */
+	pool->Chunks = new;
+	return;
+    }
+    if (new->objCache < chunk->objCache) {
+	/* we are lowest ram chunk, insert as first chunk */
+	new->next = chunk;
+	pool->Chunks = new;
+	return;
+    }
+    while (chunk->next) {
+	if (new->objCache < chunk->next->objCache) {
+	    /* new chunk is in lower ram, insert here */
+	    new->next = chunk->next;
+	    chunk->next = new;
+	    return;
+	}
+	chunk = chunk->next;
+    }
+    /* we are the worst chunk in chain, add as last */
+    chunk->next = new;
+    return;
+}
+
+static void
+memPoolInit(void)
+{
+    memPools = NULL;
+    memPools_alloc = 0;
+    memset(&TheMeter, 0, sizeof(TheMeter));
+    mem_idle_limit = 2 * MB;
+    mempool_initialised = 1;
+#if HAVE_MALLOPT && M_MMAP_MAX
+    mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
+#endif
+}
+
+void
+memPoolSetChunkSize(MemPool * pool, size_t chunksize)
+{
+    int cap;
+    size_t csize = chunksize;
+
+    if (pool->Chunks)		/* unsafe to tamper */
+	return;
+
+    csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE;	/* round up to page size */
+    cap = csize / pool->obj_size;
+
+    if (cap < MEM_MIN_FREE)
+	cap = MEM_MIN_FREE;
+    if (cap * pool->obj_size > MEM_CHUNK_MAX_SIZE)
+	cap = MEM_CHUNK_MAX_SIZE / pool->obj_size;
+    if (cap > MEM_MAX_FREE)
+	cap = MEM_MAX_FREE;
+    if (cap < 1)
+	cap = 1;
+
+    csize = cap * pool->obj_size;
+    csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE;	/* round up to page size */
+    cap = csize / pool->obj_size;
+
+    pool->chunk_capacity = cap;
+    pool->chunk_size = csize;
+}
+
+MemPool *
+memPoolCreate(const char *label, size_t obj_size)
+{
+    MemPool *pool, *last_pool;
+
+    if (!mempool_initialised)
+	memPoolInit();
+
+    pool = xcalloc(1, sizeof(MemPool));
+    assert(label && obj_size);
+    pool->label = label;
+    pool->obj_size = obj_size;
+    pool->obj_size =
+	((obj_size + sizeof(void *) - 1) / sizeof(void *)) * sizeof(void *);
+
+    memPoolSetChunkSize(pool, MEM_CHUNK_SIZE);
+
+    /* Append as Last */
+    for (last_pool = memPools; last_pool && last_pool->next;)
+	last_pool = last_pool->next;
+    if (last_pool)
+	last_pool->next = pool;
+    else
+	memPools = pool;
+
+    memPools_alloc++;
+    pool->memPID = ++Pool_id_counter;
+    return pool;
+}
+
+/*
+ * warning: we do not clean this entry from Pools assuming memPoolDestroy
+ * is used at the end of the program only
+ */
+void
+memPoolDestroy(MemPool ** pool)
+{
+    MemChunk *chunk, *fchunk;
+    MemPool *find_pool, *free_pool, *prev_pool;
+
+    assert(pool);
+    assert(*pool);
+    free_pool = *pool;
+    memPoolFlushMetersFull(free_pool);
+    memPoolCleanOne(free_pool, 0);
+    assert(free_pool->inuse == 0 && "While trying to destroy pool");
+
+    for (chunk = free_pool->Chunks; (fchunk = chunk) != NULL; chunk = chunk->next)
+	memPoolChunkDestroy(free_pool, fchunk);
+
+    assert(memPools && "Called memPoolDestroy, but no pool exists!");
+
+    /* Pool clean, remove it from List and free */
+    for (find_pool = memPools, prev_pool = NULL; (find_pool && free_pool != find_pool); find_pool = find_pool->next)
+	prev_pool = find_pool;
+    assert(find_pool && "pool to destroy not found");
+
+    if (prev_pool)
+	prev_pool->next = free_pool->next;
+    else
+	memPools = free_pool->next;
+    xfree(free_pool);
+    memPools_alloc--;
+    *pool = NULL;
+}
+
+static void
+memPoolFlushMeters(MemPool * pool)
+{
+    size_t calls;
+
+    calls = pool->free_calls;
+    if (calls) {
+	pool->meter.gb_freed.count += calls;
+	memMeterDel(pool->meter.inuse, calls);
+#if !DISABLE_POOLS
+	memMeterAdd(pool->meter.idle, calls);
+#endif
+	pool->free_calls = 0;
+    }
+    calls = pool->alloc_calls;
+    if (calls) {
+	pool->meter.gb_saved.count += calls;
+	memMeterAdd(pool->meter.inuse, calls);
+#if !DISABLE_POOLS
+	memMeterDel(pool->meter.idle, calls);
+#endif
+	pool->alloc_calls = 0;
+    }
+}
+
+static void
+memPoolFlushMetersFull(MemPool * pool)
+{
+    memPoolFlushMeters(pool);
+    pool->meter.gb_saved.bytes = pool->meter.gb_saved.count * pool->obj_size;
+    pool->meter.gb_freed.bytes = pool->meter.gb_freed.count * pool->obj_size;
+}
+
+/*
+ * Updates all pool counters, and recreates TheMeter totals from all pools
+ */
+static void
+memPoolFlushMetersAll(void)
+{
+    MemPool *pool;
+    MemPoolIterator *iter;
+
+    TheMeter.alloc.level = 0;
+    TheMeter.inuse.level = 0;
+    TheMeter.idle.level = 0;
+    TheMeter.gb_saved.count = 0;
+    TheMeter.gb_saved.bytes = 0;
+    TheMeter.gb_freed.count = 0;
+    TheMeter.gb_freed.bytes = 0;
+
+    iter = memPoolIterate();
+    while ((pool = memPoolIterateNext(iter))) {
+	memPoolFlushMetersFull(pool);
+	memMeterAdd(TheMeter.alloc, pool->meter.alloc.level * pool->obj_size);
+	memMeterAdd(TheMeter.inuse, pool->meter.inuse.level * pool->obj_size);
+	memMeterAdd(TheMeter.idle, pool->meter.idle.level * pool->obj_size);
+	TheMeter.gb_saved.count += pool->meter.gb_saved.count;
+	TheMeter.gb_freed.count += pool->meter.gb_freed.count;
+	TheMeter.gb_saved.bytes += pool->meter.gb_saved.bytes;
+	TheMeter.gb_freed.bytes += pool->meter.gb_freed.bytes;
+    }
+    memPoolIterateDone(&iter);
+}
+
+void *
+memPoolAlloc(MemPool * pool)
+{
+    void *p;
+    assert(pool);
+#if !DISABLE_POOLS
+    p = memPoolGet(pool);
+    assert(pool->idle);
+    pool->idle--;
+    pool->inuse++;
+#else
+    p = xcalloc(1, pool->obj_size);
+#endif
+    if (++pool->alloc_calls == FLUSH_LIMIT)
+	memPoolFlushMeters(pool);
+
+    return p;
+}
+
+void
+memPoolFree(MemPool * pool, void *obj)
+{
+    assert(pool && obj);
+#if !DISABLE_POOLS
+
+    memPoolPush(pool, obj);
+    assert(pool->inuse);
+    pool->inuse--;
+    pool->idle++;
+#else
+    xfree(obj);
+#endif
+    ++pool->free_calls;
+
+}
+
+/* removes empty Chunks from pool */
+static void
+memPoolCleanOne(MemPool * pool, time_t maxage)
+{
+    MemChunk *chunk, *freechunk, *listTail;
+    void **Free;
+    time_t age;
+
+    if (!pool)
+	return;
+    if (!pool->Chunks)
+	return;
+
+    memPoolFlushMetersFull(pool);
+    /*
+     * OK, so we have to go through all the global freeCache and find the Chunk
+     * any given Free belongs to, and stuff it into that Chunk's freelist 
+     */
+
+    while ((Free = pool->freeCache) != NULL) {
+	lastPool = pool;
+	pool->allChunks = splay_splay(Free, pool->allChunks, (SPLAYCMP *) memCompObjChunks);
+	assert(splayLastResult == 0);
+	chunk = pool->allChunks->data;
+	assert(chunk->inuse_count > 0);
+	chunk->inuse_count--;
+	pool->freeCache = *Free;	/* remove from global cache */
+	*Free = chunk->freeList;	/* stuff into chunks freelist */
+	chunk->freeList = Free;
+	chunk->lastref = squid_curtime;
+    }
+
+    /* Now we have all chunks in this pool cleared up, all free items returned to their home */
+    /* We start now checking all chunks to see if we can release any */
+    /* We start from pool->Chunks->next, so first chunk is not released */
+    /* Recreate nextFreeChunk list from scratch */
+
+    chunk = pool->Chunks;
+    while ((freechunk = chunk->next) != NULL) {
+	age = squid_curtime - freechunk->lastref;
+	freechunk->nextFreeChunk = NULL;
+	if (freechunk->inuse_count == 0)
+	    if (age >= maxage) {
+		chunk->next = freechunk->next;
+		memPoolChunkDestroy(pool, freechunk);
+		freechunk = NULL;
+	    }
+	if (chunk->next == NULL)
+	    break;
+	chunk = chunk->next;
+    }
+
+    /* Recreate nextFreeChunk list from scratch */
+    /* Populate nextFreeChunk list in order of "most filled chunk first" */
+    /* in case of equal fill, put chunk in lower ram first */
+    /* First (create time) chunk is always on top, no matter how full */
+
+    chunk = pool->Chunks;
+    pool->nextFreeChunk = chunk;
+    chunk->nextFreeChunk = NULL;
+
+    while (chunk->next) {
+	chunk->next->nextFreeChunk = NULL;
+	if (chunk->next->inuse_count < pool->chunk_capacity) {
+	    listTail = pool->nextFreeChunk;
+	    while (listTail->nextFreeChunk) {
+		if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
+		    break;
+		if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
+		    (chunk->next->objCache < listTail->nextFreeChunk->objCache))
+		    break;
+		listTail = listTail->nextFreeChunk;
+	    }
+	    chunk->next->nextFreeChunk = listTail->nextFreeChunk;
+	    listTail->nextFreeChunk = chunk->next;
+	}
+	chunk = chunk->next;
+    }
+    /* We started from 2nd chunk. If first chunk is full, remove it */
+    if (pool->nextFreeChunk->inuse_count == pool->chunk_capacity)
+	pool->nextFreeChunk = pool->nextFreeChunk->nextFreeChunk;
+
+    return;
+}
+
+/* 
+ * Returns all cached frees to their home chunks
+ * If chunks unreferenced age is over, destroys Idle chunk
+ * Flushes meters for a pool
+ * If pool is not specified, iterates through all pools.
+ * When used for all pools, if new_idle_limit is above -1, new
+ * idle memory limit is set before Cleanup. This allows to shrink
+ * memPool memory usage to specified minimum.
+ */
+void
+memPoolClean(time_t maxage)
+{
+    MemPool *pool;
+    MemPoolIterator *iter;
+
+    int shift = 1;
+    memPoolFlushMetersAll();
+    if (TheMeter.idle.level > mem_idle_limit)
+	maxage = shift = 0;
+
+    iter = memPoolIterate();
+    while ((pool = memPoolIterateNext(iter))) {
+	if (pool->meter.idle.level > (pool->chunk_capacity << shift)) {
+	    memPoolCleanOne(pool, maxage);
+	}
+    }
+    memPoolIterateDone(&iter);
+}
+
+/* Persistent Pool stats. for GlobalStats accumulation */
+static MemPoolStats pp_stats;
+
+/*
+ * Update MemPoolStats struct for single pool
+ */
+int
+memPoolGetStats(MemPoolStats * stats, MemPool * pool)
+{
+    MemChunk *chunk;
+    int chunks_free = 0;
+    int chunks_partial = 0;
+
+    if (stats != &pp_stats)	/* need skip memset for GlobalStats accumulation */
+	memset(stats, 0, sizeof(MemPoolStats));
+
+    memPoolCleanOne(pool, (time_t) 555555);	/* don't want to get chunks released before reporting */
+
+    stats->pool = pool;
+    stats->label = pool->label;
+    stats->meter = &pool->meter;
+    stats->obj_size = pool->obj_size;
+    stats->chunk_capacity = pool->chunk_capacity;
+
+    /* gather stats for each Chunk */
+    chunk = pool->Chunks;
+    while (chunk) {
+	if (chunk->inuse_count == 0)
+	    chunks_free++;
+	else if (chunk->inuse_count < pool->chunk_capacity)
+	    chunks_partial++;
+	chunk = chunk->next;
+    }
+
+    stats->chunks_alloc += pool->chunkCount;
+    stats->chunks_inuse += pool->chunkCount - chunks_free;
+    stats->chunks_partial += chunks_partial;
+    stats->chunks_free += chunks_free;
+
+    stats->items_alloc += pool->meter.alloc.level;
+    stats->items_inuse += pool->meter.inuse.level;
+    stats->items_idle += pool->meter.idle.level;
+
+    stats->overhead += sizeof(MemPool) + pool->chunkCount * sizeof(MemChunk) + strlen(pool->label) + 1;
+
+    return pool->meter.inuse.level;
+}
+
+/*
+ * Totals statistics is returned
+ */
+int
+memPoolGetGlobalStats(MemPoolGlobalStats * stats)
+{
+    int pools_inuse = 0;
+    MemPool *pool;
+    MemPoolIterator *iter;
+
+    memset(stats, 0, sizeof(MemPoolGlobalStats));
+    memset(&pp_stats, 0, sizeof(MemPoolStats));
+
+    memPoolFlushMetersAll();	/* recreate TheMeter */
+
+    /* gather all stats for Totals */
+    iter = memPoolIterate();
+    while ((pool = memPoolIterateNext(iter))) {
+	if (memPoolGetStats(&pp_stats, pool) > 0)
+	    pools_inuse++;
+    }
+    memPoolIterateDone(&iter);
+
+    stats->TheMeter = &TheMeter;
+
+    stats->tot_pools_alloc = memPools_alloc;
+    stats->tot_pools_inuse = pools_inuse;
+    stats->tot_pools_mempid = Pool_id_counter;
+
+    stats->tot_chunks_alloc = pp_stats.chunks_alloc;
+    stats->tot_chunks_inuse = pp_stats.chunks_inuse;
+    stats->tot_chunks_partial = pp_stats.chunks_partial;
+    stats->tot_chunks_free = pp_stats.chunks_free;
+    stats->tot_items_alloc = pp_stats.items_alloc;
+    stats->tot_items_inuse = pp_stats.items_inuse;
+    stats->tot_items_idle = pp_stats.items_idle;
+
+    stats->tot_overhead += pp_stats.overhead + memPools_alloc * sizeof(MemPool *);
+    stats->mem_idle_limit = mem_idle_limit;
+
+    return pools_inuse;
+}
Index: squid/lib/splay.c
diff -u squid/lib/splay.c:1.3 squid/lib/splay.c:1.3.48.1
--- squid/lib/splay.c:1.3	Mon Oct 23 08:04:19 2000
+++ squid/lib/splay.c	Wed Apr 18 11:17:43 2001
@@ -1,5 +1,8 @@
 /*
  * $Id: squid-chunked_mempools-HEAD,v 1.1 2004/08/17 20:55:12 hno Exp $
+ *
+ * based on ftp://ftp.cs.cmu.edu/user/sleator/splaying/top-down-splay.c
+ * http://bobo.link.cs.cmu.edu/cgi-bin/splay/splay-cgi.pl
  */
 
 #include "config.h"
@@ -99,6 +102,26 @@
     return top;
 }
 
+splayNode *
+splay_delete(const void *data, splayNode * top, SPLAYCMP * compare)
+{
+    splayNode *x;
+    if (top == NULL)
+        return NULL;
+    top = splay_splay(data, top, compare);
+    if (splayLastResult == 0) {		/* found it */
+	if (top->left == NULL) {
+	    x = top->right;
+	} else {
+	    x = splay_splay(data, top->left, compare);
+	    x->right = top->right;
+	}
+	xfree(top);
+	return x;
+    }
+    return top;				/* It wasn't there */
+}
+
 void
 splay_destroy(splayNode * top, SPLAYFREE * free_func)
 {
Index: squid/lib/util.c
diff -u squid/lib/util.c:1.12 squid/lib/util.c:1.7.20.5
--- squid/lib/util.c:1.12	Thu Oct 18 13:52:10 2001
+++ squid/lib/util.c	Fri Mar  8 01:44:18 2002
@@ -757,3 +757,53 @@
     write(2, "\n", 1);
     abort();
 }
+
+void
+gb_flush(gb_t * g)
+{
+    g->gb += (g->bytes >> 30);
+    g->bytes &= (1 << 30) - 1;
+}
+
+double
+gb_to_double(const gb_t * g)
+{
+    return ((double) g->gb) * ((double) (1 << 30)) + ((double) g->bytes);
+}
+
+const char *
+double_to_str(char *buf, int buf_size, double value)
+{
+    /* select format */
+    if (value < 1e9)
+        snprintf(buf, buf_size, "%.2f MB", value / 1e6);
+    else if (value < 1e12)
+        snprintf(buf, buf_size, "%.3f GB", value / 1e9);
+    else
+        snprintf(buf, buf_size, "%.4f TB", value / 1e12);
+    return buf;
+}
+
+const char *
+gb_to_str(const gb_t * g)
+{
+    /*
+     * it is often convenient to call gb_to_str several times for _one_ printf
+     */
+#define max_cc_calls 5
+    typedef char GbBuf[32];
+    static GbBuf bufs[max_cc_calls];
+    static int call_id = 0;
+    double value = gb_to_double(g);
+    char *buf = bufs[call_id++];
+    if (call_id >= max_cc_calls)
+	call_id = 0;
+    /* select format */
+    if (value < 1e9)
+	snprintf(buf, sizeof(GbBuf), "%.2f MB", value / 1e6);
+    else if (value < 1e12)
+	snprintf(buf, sizeof(GbBuf), "%.2f GB", value / 1e9);
+    else
+	snprintf(buf, sizeof(GbBuf), "%.2f TB", value / 1e12);
+    return buf;
+}
Index: squid/src/Makefile.am
diff -u squid/src/Makefile.am:1.16 squid/src/Makefile.am:1.5.4.3
--- squid/src/Makefile.am:1.16	Tue Jan  1 06:13:33 2002
+++ squid/src/Makefile.am	Fri Mar  8 01:44:20 2002
@@ -155,7 +155,6 @@
 	logfile.c \
 	main.c \
 	mem.c \
-	MemPool.c \
 	MemBuf.c \
 	mime.c \
 	multicast.c \
Index: squid/src/MemPool.c
diff -u squid/src/MemPool.c:1.6 squid/src/MemPool.c:removed
--- squid/src/MemPool.c:1.6	Wed Oct 24 02:42:11 2001
+++ squid/src/MemPool.c	Sun Jan 25 06:33:39 2004
@@ -1,383 +0,0 @@
-
-/*
- * $Id: squid-chunked_mempools-HEAD,v 1.1 2004/08/17 20:55:12 hno Exp $
- *
- * DEBUG: section 63    Low Level Memory Pool Management
- * AUTHOR: Alex Rousskov
- *
- * SQUID Web Proxy Cache          http://www.squid-cache.org/
- * ----------------------------------------------------------
- *
- *  Squid is the result of efforts by numerous individuals from
- *  the Internet community; see the CONTRIBUTORS file for full
- *  details.   Many organizations have provided support for Squid's
- *  development; see the SPONSORS file for full details.  Squid is
- *  Copyrighted (C) 2001 by the Regents of the University of
- *  California; see the COPYRIGHT file for full details.  Squid
- *  incorporates software developed and/or copyrighted by other
- *  sources; see the CREDITS file for full details.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *  
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *  
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
- *
- */
-
-
-#include "squid.h"
-#include "Stack.h"
-
-#define MB ((size_t)1024*1024)
-
-/* exported */
-unsigned int mem_pool_alloc_calls = 0;
-unsigned int mem_pool_free_calls = 0;
-
-/* module globals */
-
-/* huge constant to set mem_idle_limit to "unlimited" */
-static const size_t mem_unlimited_size = 2 * 1024 * MB;
-
-/* we cannot keep idle more than this limit */
-static size_t mem_idle_limit = 0;
-
-/* memory pool accounting */
-static MemPoolMeter TheMeter;
-static gb_t mem_traffic_volume =
-{0, 0};
-static Stack Pools;
-
-/* local prototypes */
-static void memShrink(ssize_t new_limit);
-static void memPoolDescribe(const MemPool * pool);
-static void memPoolShrink(MemPool * pool, ssize_t new_limit);
-
-
-static double
-toMB(size_t size)
-{
-    return ((double) size) / MB;
-}
-
-static size_t
-toKB(size_t size)
-{
-    return (size + 1024 - 1) / 1024;
-}
-
-
-/* Initialization */
-
-void
-memConfigure(void)
-{
-    size_t new_pool_limit = mem_idle_limit;
-    /* set to configured value first */
-#if PURIFY
-    debug(63, 1) ("Disabling Memory pools under purify\n");
-    Config.onoff.mem_pools = 0;
-#endif
-    if (!Config.onoff.mem_pools)
-	new_pool_limit = 0;
-    else if (Config.MemPools.limit > 0)
-	new_pool_limit = Config.MemPools.limit;
-    else
-	new_pool_limit = mem_unlimited_size;
-    /* shrink memory pools if needed */
-    if (TheMeter.idle.level > new_pool_limit) {
-	debug(63, 1) ("Shrinking idle mem pools to %.2f MB\n", toMB(new_pool_limit));
-	memShrink(new_pool_limit);
-    }
-    assert(TheMeter.idle.level <= new_pool_limit);
-    mem_idle_limit = new_pool_limit;
-}
-
-void
-memInitModule(void)
-{
-    memset(&TheMeter, 0, sizeof(TheMeter));
-    stackInit(&Pools);
-    debug(63, 1) ("Memory pools are '%s'; limit: %.2f MB\n",
-	(Config.onoff.mem_pools ? "on" : "off"), toMB(mem_idle_limit));
-}
-
-void
-memCleanModule(void)
-{
-    int i;
-    int dirty_count = 0;
-    for (i = 0; i < Pools.count; i++) {
-	MemPool *pool = Pools.items[i];
-	if (memPoolInUseCount(pool)) {
-	    memPoolDescribe(pool);
-	    dirty_count++;
-	} else {
-	    memPoolDestroy(pool);
-	    Pools.items[i] = NULL;
-	}
-    }
-    if (dirty_count)
-	debug(63, 2) ("memCleanModule: %d pools are left dirty\n", dirty_count);
-    /* we clean the stack anyway */
-    stackClean(&Pools);
-}
-
-
-static void
-memShrink(ssize_t new_limit)
-{
-    ssize_t start_limit = TheMeter.idle.level;
-    int i;
-    assert(start_limit >= 0 && new_limit >= 0);
-    debug(63, 1) ("memShrink: started with %ld KB goal: %ld KB\n",
-	(long int) toKB(TheMeter.idle.level), (long int) toKB(new_limit));
-    /* first phase: cut proportionally to the pool idle size */
-    for (i = 0; i < Pools.count && TheMeter.idle.level > new_limit; ++i) {
-	MemPool *pool = Pools.items[i];
-	const ssize_t target_pool_size = (size_t) ((double) pool->meter.idle.level * new_limit) / start_limit;
-	memPoolShrink(pool, target_pool_size);
-    }
-    debug(63, 1) ("memShrink: 1st phase done with %ld KB left\n", (long int) toKB(TheMeter.idle.level));
-    /* second phase: cut to 0 */
-    for (i = 0; i < Pools.count && TheMeter.idle.level > new_limit; ++i)
-	memPoolShrink(Pools.items[i], 0);
-    debug(63, 1) ("memShrink: 2nd phase done with %ld KB left\n", (long int) toKB(TheMeter.idle.level));
-    assert(TheMeter.idle.level <= new_limit);	/* paranoid */
-}
-
-/* MemPoolMeter */
-
-static void
-memPoolMeterReport(const MemPoolMeter * pm, size_t obj_size,
-    int alloc_count, int inuse_count, int idle_count, StoreEntry * e)
-{
-    assert(pm);
-    storeAppendPrintf(e, "%d\t %ld\t %ld\t %.2f\t %d\t %d\t %ld\t %ld\t %d\t %d\t %ld\t %ld\t %ld\t %.2f\t %.2f\t %.2f\t %ld\n",
-    /* alloc */
-	alloc_count,
-	(long int) toKB(obj_size * pm->alloc.level),
-	(long int) toKB(obj_size * pm->alloc.hwater_level),
-	(double) ((squid_curtime - pm->alloc.hwater_stamp) / 3600.),
-	xpercentInt(obj_size * pm->alloc.level, TheMeter.alloc.level),
-    /* in use */
-	inuse_count,
-	(long int) toKB(obj_size * pm->inuse.level),
-	(long int) toKB(obj_size * pm->inuse.hwater_level),
-	xpercentInt(pm->inuse.level, pm->alloc.level),
-    /* idle */
-	idle_count,
-	(long int) toKB(obj_size * pm->idle.level),
-	(long int) toKB(obj_size * pm->idle.hwater_level),
-    /* (int)rint(xpercent(pm->idle.level, pm->alloc.level)), */
-    /* saved */
-	(long int) pm->saved.count,
-	xpercent(pm->saved.count, mem_traffic_volume.count),
-	xpercent(obj_size * gb_to_double(&pm->saved), gb_to_double(&mem_traffic_volume)),
-	xpercent(pm->saved.count, pm->total.count),
-	(long int) pm->total.count);
-}
-
-/* MemMeter */
-
-void
-memMeterSyncHWater(MemMeter * m)
-{
-    assert(m);
-    if (m->hwater_level < m->level) {
-	m->hwater_level = m->level;
-	m->hwater_stamp = squid_curtime;
-    }
-}
-
-/* MemPool */
-
-MemPool *
-memPoolCreate(const char *label, size_t obj_size)
-{
-    MemPool *pool = xcalloc(1, sizeof(MemPool));
-    assert(label && obj_size);
-    pool->label = label;
-    pool->obj_size = obj_size;
-    stackInit(&pool->pstack);
-    /* other members are set to 0 */
-    stackPush(&Pools, pool);
-    return pool;
-}
-
-/*
- * warning: we do not clean this entry from Pools stack assuming memPoolDestroy
- * is used at the end of the program only
- */
-void
-memPoolDestroy(MemPool * pool)
-{
-    assert(pool);
-    stackClean(&pool->pstack);
-    xfree(pool);
-}
-
-void *
-memPoolAlloc(MemPool * pool)
-{
-    assert(pool);
-    memMeterInc(pool->meter.inuse);
-    gb_inc(&pool->meter.total, 1);
-    gb_inc(&TheMeter.total, pool->obj_size);
-    memMeterAdd(TheMeter.inuse, pool->obj_size);
-    gb_inc(&mem_traffic_volume, pool->obj_size);
-    mem_pool_alloc_calls++;
-    if (pool->pstack.count) {
-	assert(pool->meter.idle.level);
-	memMeterDec(pool->meter.idle);
-	memMeterDel(TheMeter.idle, pool->obj_size);
-	gb_inc(&pool->meter.saved, 1);
-	gb_inc(&TheMeter.saved, pool->obj_size);
-	return stackPop(&pool->pstack);
-    } else {
-	assert(!pool->meter.idle.level);
-	memMeterInc(pool->meter.alloc);
-	memMeterAdd(TheMeter.alloc, pool->obj_size);
-	return xcalloc(1, pool->obj_size);
-    }
-}
-
-void
-memPoolFree(MemPool * pool, void *obj)
-{
-    assert(pool && obj);
-    memMeterDec(pool->meter.inuse);
-    memMeterDel(TheMeter.inuse, pool->obj_size);
-    mem_pool_free_calls++;
-    if (TheMeter.idle.level + pool->obj_size <= mem_idle_limit) {
-	memMeterInc(pool->meter.idle);
-	memMeterAdd(TheMeter.idle, pool->obj_size);
-	memset(obj, 0, pool->obj_size);
-	stackPush(&pool->pstack, obj);
-    } else {
-	memMeterDec(pool->meter.alloc);
-	memMeterDel(TheMeter.alloc, pool->obj_size);
-	xfree(obj);
-    }
-    assert(pool->meter.idle.level <= pool->meter.alloc.level);
-}
-
-static void
-memPoolShrink(MemPool * pool, ssize_t new_limit)
-{
-    assert(pool);
-    assert(new_limit >= 0);
-    while (pool->meter.idle.level > new_limit && pool->pstack.count > 0) {
-	memMeterDec(pool->meter.alloc);
-	memMeterDec(pool->meter.idle);
-	memMeterDel(TheMeter.idle, pool->obj_size);
-	memMeterDel(TheMeter.alloc, pool->obj_size);
-	xfree(stackPop(&pool->pstack));
-    }
-    assert(pool->meter.idle.level <= new_limit);	/* paranoid */
-}
-
-int
-memPoolWasUsed(const MemPool * pool)
-{
-    assert(pool);
-    return pool->meter.alloc.hwater_level > 0;
-}
-
-int
-memPoolInUseCount(const MemPool * pool)
-{
-    assert(pool);
-    return pool->meter.inuse.level;
-}
-
-size_t
-memPoolInUseSize(const MemPool * pool)
-{
-    assert(pool);
-    return pool->obj_size * pool->meter.inuse.level;
-}
-
-/* to-do: make debug level a parameter? */
-static void
-memPoolDescribe(const MemPool * pool)
-{
-    assert(pool);
-    debug(63, 2) ("%-20s: %6d x %4d bytes = %5ld KB\n",
-	pool->label, memPoolInUseCount(pool), (int) pool->obj_size,
-	(long int) toKB(memPoolInUseSize(pool)));
-}
-
-size_t
-memTotalAllocated(void)
-{
-    return TheMeter.alloc.level;
-}
-
-void
-memPoolReport(const MemPool * pool, StoreEntry * e)
-{
-    assert(pool);
-    storeAppendPrintf(e, "%-20s\t %4d\t ",
-	pool->label, (int) pool->obj_size);
-    memPoolMeterReport(&pool->meter, pool->obj_size,
-	pool->meter.alloc.level, pool->meter.inuse.level, pool->meter.idle.level,
-	e);
-}
-
-void
-memReport(StoreEntry * e)
-{
-    size_t overhd_size = 0;
-    int alloc_count = 0;
-    int inuse_count = 0;
-    int idle_count = 0;
-    int i;
-    /* caption */
-    storeAppendPrintf(e, "Current memory usage:\n");
-    /* heading */
-    storeAppendPrintf(e, "Pool\t Obj Size\t"
-	"Allocated\t\t\t\t\t In Use\t\t\t\t Idle\t\t\t Allocations Saved\t\t\t Hit Rate\t\n"
-	" \t (bytes)\t"
-	"(#)\t (KB)\t high (KB)\t high (hrs)\t impact (%%total)\t"
-	"(#)\t (KB)\t high (KB)\t portion (%%alloc)\t"
-	"(#)\t (KB)\t high (KB)\t"
-	"(number)\t (%%num)\t (%%vol)\t"
-	"(%%num)\t"
-	"(number)"
-	"\n");
-    /* main table */
-    for (i = 0; i < Pools.count; i++) {
-	const MemPool *pool = Pools.items[i];
-	if (memPoolWasUsed(pool)) {
-	    memPoolReport(pool, e);
-	    alloc_count += pool->meter.alloc.level;
-	    inuse_count += pool->meter.inuse.level;
-	    idle_count += pool->meter.idle.level;
-	}
-	overhd_size += sizeof(MemPool) + sizeof(MemPool *) +
-	    strlen(pool->label) + 1 +
-	    pool->pstack.capacity * sizeof(void *);
-    }
-    overhd_size += sizeof(Pools) + Pools.capacity * sizeof(MemPool *);
-    /* totals */
-    storeAppendPrintf(e, "%-20s\t %-4s\t ", "Total", "-");
-    memPoolMeterReport(&TheMeter, 1, alloc_count, inuse_count, idle_count, e);
-    storeAppendPrintf(e, "Cumulative allocated volume: %s\n", gb_to_str(&mem_traffic_volume));
-    /* overhead */
-    storeAppendPrintf(e, "Current overhead: %ld bytes (%.3f%%)\n",
-	(long int) overhd_size, xpercent(overhd_size, TheMeter.inuse.level));
-    /* limits */
-    storeAppendPrintf(e, "Idle pool limit: %.2f MB\n", toMB(mem_idle_limit));
-    storeAppendPrintf(e, "memPoolAlloc calls: %d\n", mem_pool_alloc_calls);
-    storeAppendPrintf(e, "memPoolFree calls: %d\n", mem_pool_free_calls);
-}
Index: squid/src/defines.h
diff -u squid/src/defines.h:1.17 squid/src/defines.h:1.10.4.5
--- squid/src/defines.h:1.17	Wed Feb 27 01:16:52 2002
+++ squid/src/defines.h	Fri Mar  8 01:44:23 2002
@@ -221,10 +221,6 @@
 /* were to look for errors if config path fails */
 #define DEFAULT_SQUID_ERROR_DIR "/usr/local/squid/etc/errors"
 
-/* gb_type operations */
-#define gb_flush_limit (0x3FFFFFFF)
-#define gb_inc(gb, delta) { if ((gb)->bytes > gb_flush_limit || delta > gb_flush_limit) gb_flush(gb); (gb)->bytes += delta; (gb)->count++; }
-
 /* iteration for HttpHdrRange */
 #define HttpHdrRangeInitPos (-1)
 
Index: squid/src/main.c
diff -u squid/src/main.c:1.30 squid/src/main.c:1.20.2.7
--- squid/src/main.c:1.30	Wed Jan 16 01:30:48 2002
+++ squid/src/main.c	Fri Mar  8 01:44:27 2002
@@ -559,6 +559,7 @@
 	    eventAdd("start_announce", start_announce, NULL, 3600.0, 1);
 	eventAdd("ipcache_purgelru", ipcache_purgelru, NULL, 10.0, 1);
 	eventAdd("fqdncache_purgelru", fqdncache_purgelru, NULL, 15.0, 1);
+	eventAdd("memPoolCleanIdlePools", memPoolCleanIdlePools, NULL, 15.0, 1);
     }
     configured_once = 1;
 }
Index: squid/src/mem.c
diff -u squid/src/mem.c:1.16 squid/src/mem.c:1.9.8.14
--- squid/src/mem.c:1.16	Wed Feb 27 01:16:52 2002
+++ squid/src/mem.c	Fri Mar  8 01:44:27 2002
@@ -34,10 +34,19 @@
  */
 
 #include "squid.h"
+#include "memMeter.h"
 
 /* module globals */
 
+/* local prototypes */
+static void memStringStats(StoreEntry * sentry);
+static void memStats(StoreEntry * sentry);
+static void memPoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, StoreEntry * e);
+
+/* module locals */
 static MemPool *MemPools[MEM_MAX];
+static double xm_time = 0;
+static double xm_deltat = 0;
 
 /* string pools */
 #define mem_str_pool_count 3
@@ -115,11 +124,27 @@
     storeBufferFlush(sentry);
 }
 
-
 /*
  * public routines
  */
 
+int
+memPoolInUseCount(MemPool * pool)
+{
+    MemPoolStats stats;
+    assert(pool);
+    memPoolGetStats(&stats, pool);
+    return stats.items_inuse;
+}
+
+int
+memPoolsTotalAllocated(void)
+{
+    MemPoolGlobalStats stats;
+    memPoolGetGlobalStats(&stats);
+    return stats.TheMeter->alloc.level;
+}
+
 /*
  * we have a limit on _total_ amount of idle memory so we ignore
  * max_pages for now
@@ -128,6 +153,7 @@
 memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused)
 {
     assert(name && size);
+    assert(MemPools[type] == NULL);
     MemPools[type] = memPoolCreate(name, size);
 }
 
@@ -264,12 +290,43 @@
     }
 }
 
+static double clean_interval = 15.0;	/* time to live of idle chunk before release */
+
+void
+memPoolCleanIdlePools(void *unused)
+{
+    memPoolClean(clean_interval);
+    eventAdd("memPoolCleanIdlePools", memPoolCleanIdlePools, NULL, clean_interval, 1);
+}
+
+static int mem_idle_limit = 0;
+
+void
+memConfigure(void)
+{
+    int new_pool_limit;
+    /* set to configured value first */
+    if (!Config.onoff.mem_pools)
+	new_pool_limit = 0;
+    else if (Config.MemPools.limit > 0)
+	new_pool_limit = Config.MemPools.limit;
+    else
+	new_pool_limit = mem_unlimited_size;
+
+    if (mem_idle_limit > new_pool_limit)
+	debug(63, 1) ("Shrinking idle mem pools to %.2f MB\n", toMB(new_pool_limit));
+    memPoolSetIdleLimit(new_pool_limit);
+    mem_idle_limit = new_pool_limit;
+}
 
 void
 memInit(void)
 {
     int i;
-    memInitModule();
+
+    debug(63, 1) ("Memory pools are '%s'; limit: %.2f MB\n",
+	(Config.onoff.mem_pools ? "on" : "off"), toMB(mem_idle_limit));
+
     /* set all pointers to null */
     memset(MemPools, '\0', sizeof(MemPools));
     /*
@@ -323,9 +380,11 @@
     memDataInit(MEM_REQUEST_T, "request_t", sizeof(request_t),
 	Squid_MaxFD >> 3);
     memDataInit(MEM_STOREENTRY, "StoreEntry", sizeof(StoreEntry), 0);
+    memPoolSetChunkSize(MemPools[MEM_STOREENTRY], 2048 * 1024);
     memDataInit(MEM_WORDLIST, "wordlist", sizeof(wordlist), 0);
     memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
     memDataInit(MEM_MD5_DIGEST, "MD5 digest", MD5_DIGEST_CHARS, 0);
+    memPoolSetChunkSize(MemPools[MEM_MD5_DIGEST], 512 * 1024);
     memDataInit(MEM_HELPER_REQUEST, "helper_request",
 	sizeof(helper_request), 0);
     memDataInit(MEM_HELPER_STATEFUL_REQUEST, "helper_stateful_request",
@@ -360,10 +419,29 @@
     }
 }
 
+#if UNUSED_CODE
+/* to-do: make debug level a parameter? */
+static void memPoolDescribe(const MemPool * pool);
+static void
+memPoolDescribe(const MemPool * pool)
+{
+    assert(pool);
+    debug(63, 2) ("%-20s: %6d x %4d bytes = %5d KB\n",
+	pool->label, memPoolInUseCount(pool), pool->obj_size,
+	toKB(pool->obj_size * pool->meter.inuse.level));
+}
+#endif
+
 void
 memClean(void)
 {
-    memCleanModule();
+    MemPoolGlobalStats stats;
+    memPoolSetIdleLimit(0);
+    memPoolClean(0);
+    memPoolGetGlobalStats(&stats);
+    if (stats.tot_items_inuse)
+	debug(63, 2) ("memCleanModule: %d items in %d chunks and %d pools are left dirty\n", stats.tot_items_inuse,
+	    stats.tot_chunks_inuse, stats.tot_pools_inuse);
 }
 
 int
@@ -433,3 +511,145 @@
 	return xfree;
     }
 }
+
+/* MemPoolMeter */
+
+static void
+memPoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, StoreEntry * e)
+{
+    int excess;
+    int needed = 0;
+    MemPoolMeter *pm = mp_st->meter;
+
+    storeAppendPrintf(e, "%-20s\t %4d\t ",
+	mp_st->label, mp_st->obj_size);
+
+    /* Chunks */
+    storeAppendPrintf(e, "%4d\t %4d\t ",
+	toKB(mp_st->obj_size * mp_st->chunk_capacity), mp_st->chunk_capacity);
+
+    if (mp_st->chunk_capacity) {
+	needed = mp_st->items_inuse / mp_st->chunk_capacity;
+	if (mp_st->items_inuse % mp_st->chunk_capacity)
+	    needed++;
+	excess = mp_st->chunks_inuse - needed;
+    }
+    storeAppendPrintf(e, "%4d\t %4d\t %4d\t %4d\t %.1f\t ",
+	mp_st->chunks_alloc, mp_st->chunks_inuse, mp_st->chunks_free, mp_st->chunks_partial,
+	xpercent(excess, needed));
+/*
+ *  Fragmentation calculation:
+ *    needed = inuse.level / chunk_capacity
+ *    excess = used - needed
+ *    fragmentation = excess / needed * 100%
+ *
+ *    Fragm = (alloced - (inuse / obj_ch) ) / alloced
+ */
+
+    storeAppendPrintf(e,
+	"%d\t %d\t %d\t %.2f\t %.1f\t"	/* alloc */
+	"%d\t %d\t %d\t %.1f\t"	/* in use */
+	"%d\t %d\t %d\t"	/* idle */
+	"%.0f\t %.1f\t %.1f\t %.1f\n",	/* saved */
+    /* alloc */
+	mp_st->items_alloc,
+	toKB(mp_st->obj_size * pm->alloc.level),
+	toKB(mp_st->obj_size * pm->alloc.hwater_level),
+	(double) ((squid_curtime - pm->alloc.hwater_stamp) / 3600.),
+	xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level),
+    /* in use */
+	mp_st->items_inuse,
+	toKB(mp_st->obj_size * pm->inuse.level),
+	toKB(mp_st->obj_size * pm->inuse.hwater_level),
+	xpercent(pm->inuse.level, pm->alloc.level),
+    /* idle */
+	mp_st->items_idle,
+	toKB(mp_st->obj_size * pm->idle.level),
+	toKB(mp_st->obj_size * pm->idle.hwater_level),
+    /* saved */
+	pm->gb_saved.count,
+	xpercent(pm->gb_saved.count, AllMeter->gb_saved.count),
+	xpercent(pm->gb_saved.bytes, AllMeter->gb_saved.bytes),
+	xdiv(pm->gb_saved.count - pm->gb_osaved.count, xm_deltat));
+    pm->gb_osaved.count = pm->gb_saved.count;
+}
+
+void
+memReport(StoreEntry * e)
+{
+    static char buf[64];
+    static MemPoolStats mp_stats;
+    static MemPoolGlobalStats mp_total;
+    int not_used = 0;
+    MemPoolIterator *iter;
+    MemPool *pool;
+
+    /* caption */
+    storeAppendPrintf(e, "Current memory usage:\n");
+    /* heading */
+    storeAppendPrintf(e,
+	"Pool\t Obj Size\t"
+	"Chunks\t\t\t\t\t\t\t"
+	"Allocated\t\t\t\t\t"
+	"In Use\t\t\t\t"
+	"Idle\t\t\t"
+	"Allocations Saved\t\t\t"
+	"Hit Rate\t"
+	"\n"
+	" \t (bytes)\t"
+	"KB/ch\t obj/ch\t"
+	"(#)\t used\t free\t part\t %%Frag\t "
+	"(#)\t (KB)\t high (KB)\t high (hrs)\t %%Tot\t"
+	"(#)\t (KB)\t high (KB)\t %%alloc\t"
+	"(#)\t (KB)\t high (KB)\t"
+	"(#)\t %%cnt\t %%vol\t"
+	"(#) / sec\t"
+	"\n");
+    xm_deltat = current_dtime - xm_time;
+    xm_time = current_dtime;
+
+    /* Get stats for Totals report line */
+    memPoolGetGlobalStats(&mp_total);
+
+    /* main table */
+    iter = memPoolIterate();
+    while ((pool = memPoolIterateNext(iter))) {
+	memPoolGetStats(&mp_stats, pool);
+	if (!mp_stats.pool)	/* pool destroyed */
+	    continue;
+	if (mp_stats.pool->meter.gb_saved.count > 0)	/* this pool has been used */
+	    memPoolReport(&mp_stats, mp_total.TheMeter, e);
+	else
+	    not_used++;
+    }
+    memPoolIterateDone(&iter);
+
+    mp_stats.pool = NULL;
+    mp_stats.label = "Total";
+    mp_stats.meter = mp_total.TheMeter;
+    mp_stats.obj_size = 1;
+    mp_stats.chunk_capacity = 0;
+    mp_stats.chunk_size = 0;
+    mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
+    mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
+    mp_stats.chunks_partial = mp_total.tot_chunks_partial;
+    mp_stats.chunks_free = mp_total.tot_chunks_free;
+    mp_stats.items_alloc = mp_total.tot_items_alloc;
+    mp_stats.items_inuse = mp_total.tot_items_inuse;
+    mp_stats.items_idle = mp_total.tot_items_idle;
+    mp_stats.overhead = mp_total.tot_overhead;
+
+    memPoolReport(&mp_stats, mp_total.TheMeter, e);
+
+    /* Cumulative */
+    storeAppendPrintf(e, "Cumulative allocated volume: %s\n", double_to_str(buf, 64, mp_total.TheMeter->gb_saved.bytes));
+    /* overhead */
+    storeAppendPrintf(e, "Current overhead: %d bytes (%.3f%%)\n",
+	mp_total.tot_overhead, xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level));
+    /* limits */
+    storeAppendPrintf(e, "Idle pool limit: %.2f MB\n", toMB(mp_total.mem_idle_limit));
+    /* limits */
+    storeAppendPrintf(e, "Total Pools created: %d\n", mp_total.tot_pools_alloc);
+    storeAppendPrintf(e, "Pools ever used:     %d (shown above)\n",mp_total.tot_pools_alloc - not_used);
+    storeAppendPrintf(e, "Currently in use:    %d\n", mp_total.tot_pools_inuse);
+}
Index: squid/src/protos.h
diff -u squid/src/protos.h:1.48 squid/src/protos.h:1.23.4.14
--- squid/src/protos.h:1.48	Wed Feb 27 01:16:52 2002
+++ squid/src/protos.h	Fri Mar  8 01:44:27 2002
@@ -824,14 +824,6 @@
 extern StatHistBinDumper statHistIntDumper;
 
 
-/* MemMeter */
-extern void memMeterSyncHWater(MemMeter * m);
-#define memMeterCheckHWater(m) { if ((m).hwater_level < (m).level) memMeterSyncHWater(&(m)); }
-#define memMeterInc(m) { (m).level++; memMeterCheckHWater(m); }
-#define memMeterDec(m) { (m).level--; }
-#define memMeterAdd(m, sz) { (m).level += (sz); memMeterCheckHWater(m); }
-#define memMeterDel(m, sz) { (m).level -= (sz); }
-
 /* mem */
 extern void memInit(void);
 extern void memClean(void);
@@ -849,23 +841,28 @@
 extern void memFreeBuf(size_t size, void *);
 extern FREE *memFreeBufFunc(size_t size);
 extern int memInUse(mem_type);
-extern size_t memTotalAllocated(void);
 extern void memDataInit(mem_type, const char *, size_t, int);
 extern void memCheckInit(void);
 
 /* MemPool */
 extern MemPool *memPoolCreate(const char *label, size_t obj_size);
-extern void memPoolDestroy(MemPool * pool);
 extern void *memPoolAlloc(MemPool * pool);
 extern void memPoolFree(MemPool * pool, void *obj);
-extern int memPoolWasUsed(const MemPool * pool);
-extern int memPoolInUseCount(const MemPool * pool);
-extern size_t memPoolInUseSize(const MemPool * pool);
-extern int memPoolUsedCount(const MemPool * pool);
-extern void memPoolReport(const MemPool * pool, StoreEntry * e);
+extern void memPoolDestroy(MemPool ** pool);
+extern MemPoolIterator * memPoolGetFirst(void);
+extern MemPool * memPoolGetNext(MemPoolIterator ** iter);
+extern void memPoolSetChunkSize(MemPool * pool, size_t chunksize);
+extern void memPoolSetIdleLimit(size_t new_idle_limit);
+extern int memPoolGetStats(MemPoolStats * stats, MemPool * pool);
+extern int memPoolGetGlobalStats(MemPoolGlobalStats * stats);
+extern void memPoolClean(time_t maxage);
 
 /* Mem */
 extern void memReport(StoreEntry * e);
+extern void memConfigure(void);
+extern void memPoolCleanIdlePools(void *unused);
+extern int memPoolInUseCount(MemPool * pool);
+extern int memPoolsTotalAllocated(void);
 
 extern int stmemFreeDataUpto(mem_hdr *, int);
 extern void stmemAppend(mem_hdr *, const char *, int);
@@ -1151,9 +1148,6 @@
 extern dlink_node *dlinkNodeNew(void);
 
 extern void kb_incr(kb_t *, size_t);
-extern double gb_to_double(const gb_t *);
-extern const char *gb_to_str(const gb_t *);
-extern void gb_flush(gb_t *);	/* internal, do not use this */
 extern int stringHasWhitespace(const char *);
 extern int stringHasCntl(const char *);
 extern void linklistPush(link_list **, void *);
Index: squid/src/squid.h
diff -u squid/src/squid.h:1.14 squid/src/squid.h:1.11.6.4
--- squid/src/squid.h:1.14	Mon Jan  7 20:55:09 2002
+++ squid/src/squid.h	Fri Mar  8 01:44:28 2002
@@ -406,14 +406,16 @@
 #include "hash.h"
 #include "rfc1035.h"
 
+
 #include "defines.h"
 #include "enums.h"
 #include "typedefs.h"
+#include "util.h"
+#include "MemPool.h"
 #include "structs.h"
 #include "protos.h"
 #include "globals.h"
 
-#include "util.h"
 
 #if !HAVE_TEMPNAM
 #include "tempnam.h"
Index: squid/src/stat.c
diff -u squid/src/stat.c:1.15 squid/src/stat.c:1.9.8.8
--- squid/src/stat.c:1.15	Wed Feb 27 01:16:52 2002
+++ squid/src/stat.c	Fri Mar  8 01:44:28 2002
@@ -571,10 +571,13 @@
 	mp.fordblks >> 10);
     t = mp.uordblks + mp.usmblks + mp.hblkhd;
     storeAppendPrintf(sentry, "\tTotal in use:          %6d KB %d%%\n",
-	t >> 10, percent(t, mp.arena));
+	t >> 10, percent(t, mp.arena + mp.hblkhd));
     t = mp.fsmblks + mp.fordblks;
     storeAppendPrintf(sentry, "\tTotal free:            %6d KB %d%%\n",
-	t >> 10, percent(t, mp.arena));
+	t >> 10, percent(t, mp.arena + mp.hblkhd));
+    t = mp.arena + mp.hblkhd;
+    storeAppendPrintf(sentry, "\tTotal size:            %6d KB\n",
+	t >> 10);
 #if HAVE_EXT_MALLINFO
     storeAppendPrintf(sentry, "\tmax size of small blocks:\t%d\n", mp.mxfast);
     storeAppendPrintf(sentry, "\tnumber of small blocks in a holding block:\t%d\n",
@@ -589,13 +592,25 @@
 #endif /* HAVE_EXT_MALLINFO */
 #endif /* HAVE_MALLINFO */
     storeAppendPrintf(sentry, "Memory accounted for:\n");
+#if !(HAVE_MSTATS && HAVE_GNUMALLOC_H) && HAVE_MALLINFO && HAVE_STRUCT_MALLINFO
+    storeAppendPrintf(sentry, "\tTotal accounted:       %6d KB %3d%%\n",
+	statMemoryAccounted() >> 10, percent(statMemoryAccounted(), t));
+#else
     storeAppendPrintf(sentry, "\tTotal accounted:       %6d KB\n",
 	statMemoryAccounted() >> 10);
-    storeAppendPrintf(sentry, "\tmemPoolAlloc calls: %d\n",
-	mem_pool_alloc_calls);
-    storeAppendPrintf(sentry, "\tmemPoolFree calls: %d\n",
-	mem_pool_free_calls);
-
+#endif
+    {
+	MemPoolGlobalStats mp_stats;
+	memPoolGetGlobalStats(&mp_stats);
+	storeAppendPrintf(sentry, "\tmemPool accounted:     %6d KB %3d%%\n",
+	    mp_stats.TheMeter->alloc.level >> 10, percent(mp_stats.TheMeter->alloc.level, t));
+	storeAppendPrintf(sentry, "\tmemPool unaccounted:   %6d KB %3d%%\n",
+	    (t - mp_stats.TheMeter->alloc.level) >> 10, percent((t - mp_stats.TheMeter->alloc.level), t));
+	storeAppendPrintf(sentry, "\tmemPoolAlloc calls: %9.0f\n",
+	    mp_stats.TheMeter->gb_saved.count);
+	storeAppendPrintf(sentry, "\tmemPoolFree calls:  %9.0f\n",
+	    mp_stats.TheMeter->gb_freed.count);
+    }
     storeAppendPrintf(sentry, "File descriptor usage for %s:\n", appname);
     storeAppendPrintf(sentry, "\tMaximum number of file descriptors:   %4d\n",
 	Squid_MaxFD);
@@ -1520,10 +1535,10 @@
     GENGRAPH(cputime, "cputime", "CPU utilisation");
 }
 
+#endif /* STAT_GRAPHS */
+
 int
 statMemoryAccounted(void)
 {
-    memTotalAllocated();
+    return memPoolsTotalAllocated();
 }
-
-#endif /* STAT_GRAPHS */
Index: squid/src/structs.h
diff -u squid/src/structs.h:1.50 squid/src/structs.h:1.28.4.9
--- squid/src/structs.h:1.50	Wed Feb 27 01:16:53 2002
+++ squid/src/structs.h	Fri Mar  8 01:44:29 2002
@@ -1895,30 +1895,6 @@
     unsigned char key[MD5_DIGEST_CHARS];
 };
 
-/* object to track per-action memory usage (e.g. #idle objects) */
-struct _MemMeter {
-    ssize_t level;		/* current level (count or volume) */
-    ssize_t hwater_level;	/* high water mark */
-    time_t hwater_stamp;	/* timestamp of last high water mark change */
-};
-
-/* object to track per-pool memory usage (alloc = inuse+idle) */
-struct _MemPoolMeter {
-    MemMeter alloc;
-    MemMeter inuse;
-    MemMeter idle;
-    gb_t saved;
-    gb_t total;
-};
-
-/* a pool is a [growing] space for objects of the same size */
-struct _MemPool {
-    const char *label;
-    size_t obj_size;
-    Stack pstack;		/* stack for free pointers */
-    MemPoolMeter meter;
-};
-
 struct _ClientInfo {
     hash_link hash;		/* must be first */
     struct in_addr addr;
Index: squid/src/tools.c
diff -u squid/src/tools.c:1.19 squid/src/tools.c:1.11.8.5
--- squid/src/tools.c:1.19	Sat Nov 17 17:15:42 2001
+++ squid/src/tools.c	Fri Mar  8 01:44:29 2002
@@ -834,43 +834,6 @@
 }
 
 void
-gb_flush(gb_t * g)
-{
-    g->gb += (g->bytes >> 30);
-    g->bytes &= (1 << 30) - 1;
-}
-
-double
-gb_to_double(const gb_t * g)
-{
-    return ((double) g->gb) * ((double) (1 << 30)) + ((double) g->bytes);
-}
-
-const char *
-gb_to_str(const gb_t * g)
-{
-    /*
-     * it is often convenient to call gb_to_str several times for _one_ printf
-     */
-#define max_cc_calls 5
-    typedef char GbBuf[32];
-    static GbBuf bufs[max_cc_calls];
-    static int call_id = 0;
-    double value = gb_to_double(g);
-    char *buf = bufs[call_id++];
-    if (call_id >= max_cc_calls)
-	call_id = 0;
-    /* select format */
-    if (value < 1e9)
-	snprintf(buf, sizeof(GbBuf), "%.2f MB", value / 1e6);
-    else if (value < 1e12)
-	snprintf(buf, sizeof(GbBuf), "%.2f GB", value / 1e9);
-    else
-	snprintf(buf, sizeof(GbBuf), "%.2f TB", value / 1e12);
-    return buf;
-}
-
-void
 debugObj(int section, int level, const char *label, void *obj, ObjPackMethod pm)
 {
     MemBuf mb;
Index: squid/src/typedefs.h
diff -u squid/src/typedefs.h:1.25 squid/src/typedefs.h:1.17.8.8
--- squid/src/typedefs.h:1.25	Wed Oct 10 11:07:43 2001
+++ squid/src/typedefs.h	Fri Mar  8 01:44:29 2002
@@ -46,12 +46,6 @@
     size_t kb;
 } kb_t;
 
-typedef struct {
-    size_t count;
-    size_t bytes;
-    size_t gb;
-} gb_t;
-
 /*
  * grep '^struct' structs.h \
  * | perl -ne '($a,$b)=split;$c=$b;$c=~s/^_//; print "typedef struct $b $c;\n";'
@@ -163,9 +157,6 @@
 typedef struct _cacheSwap cacheSwap;
 typedef struct _StatHist StatHist;
 typedef struct _String String;
-typedef struct _MemMeter MemMeter;
-typedef struct _MemPoolMeter MemPoolMeter;
-typedef struct _MemPool MemPool;
 typedef struct _ClientInfo ClientInfo;
 typedef struct _cd_guess_stats cd_guess_stats;
 typedef struct _CacheDigest CacheDigest;
Index: squid/src/auth/basic/auth_basic.c
diff -u squid/src/auth/basic/auth_basic.c:1.17 squid/src/auth/basic/auth_basic.c:1.11.4.4
--- squid/src/auth/basic/auth_basic.c:1.17	Wed Oct 24 02:42:14 2001
+++ squid/src/auth/basic/auth_basic.c	Fri Mar  8 01:44:29 2002
@@ -120,7 +120,7 @@
 	helperFree(basicauthenticators);
     basicauthenticators = NULL;
     if (basic_data_pool) {
-	memPoolDestroy(basic_data_pool);
+	memPoolDestroy(&basic_data_pool);
 	basic_data_pool = NULL;
     }
     debug(29, 2) ("authBasicDone: Basic authentication Shutdown.\n");
Index: squid/src/auth/digest/auth_digest.c
diff -u squid/src/auth/digest/auth_digest.c:1.13 squid/src/auth/digest/auth_digest.c:1.5.8.4
--- squid/src/auth/digest/auth_digest.c:1.13	Sat Jan 12 19:21:39 2002
+++ squid/src/auth/digest/auth_digest.c	Fri Mar  8 01:44:31 2002
@@ -238,8 +238,7 @@
     }
     if (digest_nonce_pool) {
 	assert(memPoolInUseCount(digest_nonce_pool) == 0);
-	memPoolDestroy(digest_nonce_pool);
-	digest_nonce_pool = NULL;
+	memPoolDestroy(&digest_nonce_pool);
     }
     debug(29, 2) ("authenticateDigestNonceShutdown: Nonce cache shutdown\n");
 }
@@ -480,8 +479,7 @@
     }
     if (digest_user_pool) {
 	assert(memPoolInUseCount(digest_user_pool) == 0);
-	memPoolDestroy(digest_user_pool);
-	digest_user_pool = NULL;
+	memPoolDestroy(&digest_user_pool);
     }
 }
 
@@ -544,8 +542,7 @@
     /* No requests should be in progress when we get here */
     if (digest_request_pool) {
 	assert(memPoolInUseCount(digest_request_pool) == 0);
-	memPoolDestroy(digest_request_pool);
-	digest_request_pool = NULL;
+	memPoolDestroy(&digest_request_pool);
     }
 }
 
Index: squid/src/auth/ntlm/auth_ntlm.c
diff -u squid/src/auth/ntlm/auth_ntlm.c:1.19 squid/src/auth/ntlm/auth_ntlm.c:1.9.6.4
--- squid/src/auth/ntlm/auth_ntlm.c:1.19	Mon Feb 18 15:43:12 2002
+++ squid/src/auth/ntlm/auth_ntlm.c	Fri Mar  8 01:44:31 2002
@@ -109,18 +109,15 @@
     ntlmauthenticators = NULL;
     if (ntlm_helper_state_pool) {
 	assert(memPoolInUseCount(ntlm_helper_state_pool) == 0);
-	memPoolDestroy(ntlm_helper_state_pool);
-	ntlm_helper_state_pool = NULL;
+	memPoolDestroy(&ntlm_helper_state_pool);
     }
     if (ntlm_request_pool) {
 	assert(memPoolInUseCount(ntlm_request_pool) == 0);
-	memPoolDestroy(ntlm_request_pool);
-	ntlm_request_pool = NULL;
+	memPoolDestroy(&ntlm_request_pool);
     }
     if (ntlm_user_pool) {
 	assert(memPoolInUseCount(ntlm_user_pool) == 0);
-	memPoolDestroy(ntlm_user_pool);
-	ntlm_user_pool = NULL;
+	memPoolDestroy(&ntlm_user_pool);
     }
     debug(29, 2) ("authNTLMDone: NTLM authentication Shutdown.\n");
 }
Index: squid/src/fs/coss/store_dir_coss.c
diff -u squid/src/fs/coss/store_dir_coss.c:1.17 squid/src/fs/coss/store_dir_coss.c:1.11.8.4
--- squid/src/fs/coss/store_dir_coss.c:1.17	Tue Nov 13 14:19:34 2001
+++ squid/src/fs/coss/store_dir_coss.c	Fri Mar  8 01:44:34 2002
@@ -878,7 +878,8 @@
 static void
 storeCossDirDone(void)
 {
-    memPoolDestroy(coss_state_pool);
+    memPoolDestroy(&coss_state_pool);
+/*  memPoolDestroy(&coss_index_pool);  XXX Should be here? */
     coss_initialised = 0;
 }
 
Index: squid/src/fs/diskd/store_dir_diskd.c
diff -u squid/src/fs/diskd/store_dir_diskd.c:1.33 squid/src/fs/diskd/store_dir_diskd.c:1.21.6.6
--- squid/src/fs/diskd/store_dir_diskd.c:1.33	Tue Feb 19 18:48:29 2002
+++ squid/src/fs/diskd/store_dir_diskd.c	Fri Mar  8 01:44:34 2002
@@ -1945,7 +1945,7 @@
 static void
 storeDiskdDirDone(void)
 {
-    memPoolDestroy(diskd_state_pool);
+    memPoolDestroy(&diskd_state_pool);
     diskd_initialised = 0;
 }
 
Index: squid/src/fs/ufs/store_dir_ufs.c
diff -u squid/src/fs/ufs/store_dir_ufs.c:1.22 squid/src/fs/ufs/store_dir_ufs.c:1.16.6.4
--- squid/src/fs/ufs/store_dir_ufs.c:1.22	Tue Feb 19 18:48:29 2002
+++ squid/src/fs/ufs/store_dir_ufs.c	Fri Mar  8 01:44:34 2002
@@ -1669,7 +1669,7 @@
 static void
 storeUfsDirDone(void)
 {
-    memPoolDestroy(ufs_state_pool);
+    memPoolDestroy(&ufs_state_pool);
     ufs_initialised = 0;
 }
 
Index: squid/src/repl/lru/store_repl_lru.c
diff -u squid/src/repl/lru/store_repl_lru.c:1.9 squid/src/repl/lru/store_repl_lru.c:1.7.8.3
--- squid/src/repl/lru/store_repl_lru.c:1.9	Thu Aug 16 00:39:04 2001
+++ squid/src/repl/lru/store_repl_lru.c	Wed Aug 22 10:24:07 2001
@@ -281,8 +281,10 @@
     /* no arguments expected or understood */
     assert(!args);
     /* Initialize */
-    if (!lru_node_pool)
+    if (!lru_node_pool) {
 	lru_node_pool = memPoolCreate("LRU policy node", sizeof(LruNode));
+	memPoolSetChunkSize(lru_node_pool, 512 * 1024);
+    }
     /* Allocate the needed structures */
     lru_data = xcalloc(1, sizeof(*lru_data));
     policy = cbdataAlloc(RemovalPolicy);
squid-chunked_mempools-HEAD.new squid-chunked_mempools-HEAD differ: char 76, line 2