--------------------- PatchSet 934 Date: 2000/12/16 09:29:03 Author: adri Branch: modio Tag: (none) Log: the 'hash' fs type will be used to implement the next level of FS abstractions with. Right now it is simply a copy of src/fs/ufs/* with all the ufs references turned into hash references, but that will change.. Members: src/fs/hash/Makefile.in:1.1->1.1.2.1 src/fs/hash/store_dir_hash.c:1.1->1.1.2.1 src/fs/hash/store_hash.h:1.1->1.1.2.1 src/fs/hash/store_io_hash.c:1.1->1.1.2.1 --- /dev/null Wed Feb 14 00:45:56 2007 +++ squid/src/fs/hash/Makefile.in Wed Feb 14 00:47:18 2007 @@ -0,0 +1,56 @@ +# +# Makefile for the hash storage driver for the Squid Object Cache server +# +# $Id: Makefile.in,v 1.1.2.1 2000/12/16 09:29:03 adri Exp $ +# + +FS = hash + +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ + +CC = @CC@ +MAKEDEPEND = @MAKEDEPEND@ +AR_R = @AR_R@ +RANLIB = @RANLIB@ +AC_CFLAGS = @CFLAGS@ +SHELL = /bin/sh + +INCLUDE = -I../../../include -I$(top_srcdir)/include -I$(top_srcdir)/src/ +CFLAGS = $(AC_CFLAGS) $(INCLUDE) $(DEFINES) + +OUT = ../$(FS).a + +OBJS = \ + store_dir_hash.o \ + store_io_hash.o + + +all: $(OUT) + +$(OUT): $(OBJS) + @rm -f ../stamp + $(AR_R) $(OUT) $(OBJS) + $(RANLIB) $(OUT) + +$(OBJS): $(top_srcdir)/include/version.h ../../../include/autoconf.h + +.c.o: + @rm -f ../stamp + $(CC) $(CFLAGS) -c $< + +clean: + -rm -rf *.o *pure_* core ../$(FS).a + +distclean: clean + -rm -f Makefile + -rm -f Makefile.bak + -rm -f tags + +install: + +tags: + ctags *.[ch] $(top_srcdir)/src/*.[ch] $(top_srcdir)/include/*.h $(top_srcdir)/lib/*.[ch] + +depend: + $(MAKEDEPEND) $(INCLUDE) -fMakefile *.c --- /dev/null Wed Feb 14 00:45:56 2007 +++ squid/src/fs/hash/store_dir_hash.c Wed Feb 14 00:47:18 2007 @@ -0,0 +1,1698 @@ + +/* + * $Id: store_dir_hash.c,v 1.1.2.1 2000/12/16 09:29:03 adri Exp $ + * + * DEBUG: section 47 Store Directory Routines + * AUTHOR: Duane Wessels + * + * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ + * ---------------------------------------------------------- + * + * Squid is the result of efforts by numerous individuals from the + * Internet community. Development is led by Duane Wessels of the + * National Laboratory for Applied Network Research and funded by the + * National Science Foundation. Squid is Copyrighted (C) 1998 by + * Duane Wessels and the University of California San Diego. Please + * see the COPYRIGHT file for full details. Squid incorporates + * software developed and/or copyrighted by other sources. Please see + * the CREDITS file for full details. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. + * + */ + +#include "squid.h" +#if HAVE_STATVFS +#if HAVE_SYS_STATVFS_H +#include +#endif +#endif + +#include "store_hash.h" + +#define DefaultLevelOneDirs 16 +#define DefaultLevelTwoDirs 256 +#define STORE_META_BHASHZ 4096 + +typedef struct _RebuildState RebuildState; +struct _RebuildState { + SwapDir *sd; + int n_read; + FILE *log; + int speed; + int curlvl1; + int curlvl2; + struct { + unsigned int need_to_validate:1; + unsigned int clean:1; + unsigned int init:1; + } flags; + int done; + int in_dir; + int fn; + struct dirent *entry; + DIR *td; + char fullpath[SQUID_MAXPATHLEN]; + char fullfilename[SQUID_MAXPATHLEN]; + struct _store_rebuild_data counts; +}; + +static int n_hash_dirs = 0; +static int *hash_dir_index = NULL; +MemPool *hash_state_pool = NULL; +static int hash_initialised = 0; + +static char *storeHashDirSwapSubDir(SwapDir *, int subdirn); +static int storeHashDirCreateDirectory(const char *path, int); +static int storeHashDirVerifyCacheDirs(SwapDir *); +static int storeHashDirVerifyDirectory(const char *path); +static void storeHashDirCreateSwapSubDirs(SwapDir *); +static char *storeHashDirSwapLogFile(SwapDir *, const char *); +static EVH storeHashDirRebuildFromDirectory; +static EVH storeHashDirRebuildFromSwapLog; +static int storeHashDirGetNextFile(RebuildState *, int *sfileno, int *size); +static StoreEntry *storeHashDirAddDiskRestore(SwapDir * SD, const cache_key * key, + int file_number, + size_t swap_file_sz, + time_t expires, + time_t timestamp, + time_t lastref, + time_t lastmod, + u_num32 refcount, + u_short flags, + int clean); +static void storeHashDirRebuild(SwapDir * sd); +static void storeHashDirCloseTmpSwapLog(SwapDir * sd); +static FILE *storeHashDirOpenTmpSwapLog(SwapDir *, int *, int *); +static STLOGOPEN storeHashDirOpenSwapLog; +static STINIT storeHashDirInit; +static STFREE storeHashDirFree; +static STLOGCLEANSTART storeHashDirWriteCleanStart; +static STLOGCLEANNEXTENTRY storeHashDirCleanLogNextEntry; +static STLOGCLEANWRITE storeHashDirWriteCleanEntry; +static STLOGCLEANDONE storeHashDirWriteCleanDone; +static STLOGCLOSE storeHashDirCloseSwapLog; +static STLOGWRITE storeHashDirSwapLog; +static STNEWFS storeHashDirNewfs; +static STDUMP storeHashDirDump; +static STMAINTAINFS storeHashDirMaintain; +static STCHECKOBJ storeHashDirCheckObj; +static STREFOBJ storeHashDirRefObj; +static STUNREFOBJ storeHashDirUnrefObj; +static QS rev_int_sort; +static int storeHashDirClean(int swap_index); +static EVH storeHashDirCleanEvent; +static int storeHashDirIs(SwapDir * sd); +static int storeHashFilenoBelongsHere(int fn, int F0, int F1, int F2); +static int storeHashCleanupDoubleCheck(SwapDir *, StoreEntry *); +static void storeHashDirStats(SwapDir *, StoreEntry *); +static void storeHashDirInitBitmap(SwapDir *); +static int storeHashDirValidFileno(SwapDir *, sfileno, int); + +/* + * These functions were ripped straight out of the heart of store_dir.c. + * They assume that the given filenum is on a hash partiton, which may or + * may not be true.. + * XXX this evilness should be tidied up at a later date! + */ + +int +storeHashDirMapBitTest(SwapDir * SD, int fn) +{ + sfileno filn = fn; + hashinfo_t *hashinfo; + hashinfo = (hashinfo_t *) SD->fsdata; + return file_map_bit_test(hashinfo->map, filn); +} + +void +storeHashDirMapBitSet(SwapDir * SD, int fn) +{ + sfileno filn = fn; + hashinfo_t *hashinfo; + hashinfo = (hashinfo_t *) SD->fsdata; + file_map_bit_set(hashinfo->map, filn); +} + +void +storeHashDirMapBitReset(SwapDir * SD, int fn) +{ + sfileno filn = fn; + hashinfo_t *hashinfo; + hashinfo = (hashinfo_t *) SD->fsdata; + /* + * We have to test the bit before calling file_map_bit_reset. + * file_map_bit_reset doesn't do bounds checking. It assumes + * filn is a valid file number, but it might not be because + * the map is dynamic in size. Also clearing an already clear + * bit puts the map counter of-of-whack. + */ + if (file_map_bit_test(hashinfo->map, filn)) + file_map_bit_reset(hashinfo->map, filn); +} + +int +storeHashDirMapBitAllocate(SwapDir * SD) +{ + hashinfo_t *hashinfo = (hashinfo_t *) SD->fsdata; + int fn; + fn = file_map_allocate(hashinfo->map, hashinfo->suggest); + file_map_bit_set(hashinfo->map, fn); + hashinfo->suggest = fn + 1; + return fn; +} + +/* + * Initialise the hash bitmap + * + * If there already is a bitmap, and the numobjects is larger than currently + * configured, we allocate a new bitmap and 'grow' the old one into it. + */ +static void +storeHashDirInitBitmap(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + + if (hashinfo->map == NULL) { + /* First time */ + hashinfo->map = file_map_create(); + } else if (hashinfo->map->max_n_files) { + /* it grew, need to expand */ + /* XXX We don't need it anymore .. */ + } + /* else it shrunk, and we leave the old one in place */ +} + +static char * +storeHashDirSwapSubDir(SwapDir * sd, int subdirn) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + + LOCAL_ARRAY(char, fullfilename, SQUID_MAXPATHLEN); + assert(0 <= subdirn && subdirn < hashinfo->l1); + snprintf(fullfilename, SQUID_MAXPATHLEN, "%s/%02X", sd->path, subdirn); + return fullfilename; +} + +static int +storeHashDirCreateDirectory(const char *path, int should_exist) +{ + int created = 0; + struct stat st; + getCurrentTime(); + if (0 == stat(path, &st)) { + if (S_ISDIR(st.st_mode)) { + debug(20, should_exist ? 3 : 1) ("%s exists\n", path); + } else { + fatalf("Swap directory %s is not a directory.", path); + } + } else if (0 == mkdir(path, 0755)) { + debug(20, should_exist ? 1 : 3) ("%s created\n", path); + created = 1; + } else { + fatalf("Failed to make swap directory %s: %s", + path, xstrerror()); + } + return created; +} + +static int +storeHashDirVerifyDirectory(const char *path) +{ + struct stat sb; + if (stat(path, &sb) < 0) { + debug(20, 0) ("%s: %s\n", path, xstrerror()); + return -1; + } + if (S_ISDIR(sb.st_mode) == 0) { + debug(20, 0) ("%s is not a directory\n", path); + return -1; + } + return 0; +} + +/* + * This function is called by storeHashDirInit(). If this returns < 0, + * then Squid exits, complains about swap directories not + * existing, and instructs the admin to run 'squid -z' + */ +static int +storeHashDirVerifyCacheDirs(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + int j; + const char *path = sd->path; + + if (storeHashDirVerifyDirectory(path) < 0) + return -1; + for (j = 0; j < hashinfo->l1; j++) { + path = storeHashDirSwapSubDir(sd, j); + if (storeHashDirVerifyDirectory(path) < 0) + return -1; + } + return 0; +} + +static void +storeHashDirCreateSwapSubDirs(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + int i, k; + int should_exist; + LOCAL_ARRAY(char, name, MAXPATHLEN); + for (i = 0; i < hashinfo->l1; i++) { + snprintf(name, MAXPATHLEN, "%s/%02X", sd->path, i); + if (storeHashDirCreateDirectory(name, 0)) + should_exist = 0; + else + should_exist = 1; + debug(47, 1) ("Making directories in %s\n", name); + for (k = 0; k < hashinfo->l2; k++) { + snprintf(name, MAXPATHLEN, "%s/%02X/%02X", sd->path, i, k); + storeHashDirCreateDirectory(name, should_exist); + } + } +} + +static char * +storeHashDirSwapLogFile(SwapDir * sd, const char *ext) +{ + LOCAL_ARRAY(char, path, SQUID_MAXPATHLEN); + LOCAL_ARRAY(char, pathtmp, SQUID_MAXPATHLEN); + LOCAL_ARRAY(char, digit, 32); + char *pathtmp2; + if (Config.Log.swap) { + xstrncpy(pathtmp, sd->path, SQUID_MAXPATHLEN - 64); + while (index(pathtmp, '/')) + *index(pathtmp, '/') = '.'; + while (strlen(pathtmp) && pathtmp[strlen(pathtmp) - 1] == '.') + pathtmp[strlen(pathtmp) - 1] = '\0'; + for (pathtmp2 = pathtmp; *pathtmp2 == '.'; pathtmp2++); + snprintf(path, SQUID_MAXPATHLEN - 64, Config.Log.swap, pathtmp2); + if (strncmp(path, Config.Log.swap, SQUID_MAXPATHLEN - 64) == 0) { + strcat(path, "."); + snprintf(digit, 32, "%02d", sd->index); + strncat(path, digit, 3); + } + } else { + xstrncpy(path, sd->path, SQUID_MAXPATHLEN - 64); + strcat(path, "/swap.state"); + } + if (ext) + strncat(path, ext, 16); + return path; +} + +static void +storeHashDirOpenSwapLog(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + char *path; + int fd; + path = storeHashDirSwapLogFile(sd, NULL); + fd = file_open(path, O_WRONLY | O_CREAT); + if (fd < 0) { + debug(50, 1) ("%s: %s\n", path, xstrerror()); + fatal("storeHashDirOpenSwapLog: Failed to open swap log."); + } + debug(47, 3) ("Cache Dir #%d log opened on FD %d\n", sd->index, fd); + hashinfo->swaplog_fd = fd; + if (0 == n_hash_dirs) + assert(NULL == hash_dir_index); + n_hash_dirs++; + assert(n_hash_dirs <= Config.cacheSwap.n_configured); +} + +static void +storeHashDirCloseSwapLog(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + if (hashinfo->swaplog_fd < 0) /* not open */ + return; + file_close(hashinfo->swaplog_fd); + debug(47, 3) ("Cache Dir #%d log closed on FD %d\n", + sd->index, hashinfo->swaplog_fd); + hashinfo->swaplog_fd = -1; + n_hash_dirs--; + assert(n_hash_dirs >= 0); + if (0 == n_hash_dirs) + safe_free(hash_dir_index); +} + +static void +storeHashDirInit(SwapDir * sd) +{ + static int started_clean_event = 0; + static const char *errmsg = + "\tFailed to verify one of the swap directories, Check cache.log\n" + "\tfor details. Run 'squid -z' to create swap directories\n" + "\tif needed, or if running Squid for the first time."; + storeHashDirInitBitmap(sd); + if (storeHashDirVerifyCacheDirs(sd) < 0) + fatal(errmsg); + storeHashDirOpenSwapLog(sd); + storeHashDirRebuild(sd); + if (!started_clean_event) { + eventAdd("storeDirClean", storeHashDirCleanEvent, NULL, 15.0, 1); + started_clean_event = 1; + } +} + +static void +storeHashDirRebuildFromDirectory(void *data) +{ + RebuildState *rb = data; + SwapDir *SD = rb->sd; + LOCAL_ARRAY(char, hdr_buf, SM_PAGE_SIZE); + StoreEntry *e = NULL; + StoreEntry tmpe; + cache_key key[MD5_DIGEST_CHARS]; + int sfileno = 0; + int count; + int size; + struct stat sb; + int swap_hdr_len; + int fd = -1; + tlv *tlv_list; + tlv *t; + assert(rb != NULL); + debug(20, 3) ("storeHashDirRebuildFromDirectory: DIR #%d\n", rb->sd->index); + for (count = 0; count < rb->speed; count++) { + assert(fd == -1); + fd = storeHashDirGetNextFile(rb, &sfileno, &size); + if (fd == -2) { + debug(20, 1) ("Done scanning %s swaplog (%d entries)\n", + rb->sd->path, rb->n_read); + store_dirs_rebuilding--; + storeHashDirCloseTmpSwapLog(rb->sd); + storeRebuildComplete(&rb->counts); + cbdataFree(rb); + return; + } else if (fd < 0) { + continue; + } + assert(fd > -1); + /* lets get file stats here */ + if (fstat(fd, &sb) < 0) { + debug(20, 1) ("storeHashDirRebuildFromDirectory: fstat(FD %d): %s\n", + fd, xstrerror()); + file_close(fd); + store_open_disk_fd--; + fd = -1; + continue; + } + if ((++rb->counts.scancount & 0xFFFF) == 0) + debug(20, 3) (" %s %7d files opened so far.\n", + rb->sd->path, rb->counts.scancount); + debug(20, 9) ("file_in: fd=%d %08X\n", fd, sfileno); + statCounter.syscalls.disk.reads++; + if (read(fd, hdr_buf, SM_PAGE_SIZE) < 0) { + debug(20, 1) ("storeHashDirRebuildFromDirectory: read(FD %d): %s\n", + fd, xstrerror()); + file_close(fd); + store_open_disk_fd--; + fd = -1; + continue; + } + file_close(fd); + store_open_disk_fd--; + fd = -1; + swap_hdr_len = 0; +#if USE_TRUNCATE + if (sb.st_size == 0) + continue; +#endif + tlv_list = storeSwapMetaUnpack(hdr_buf, &swap_hdr_len); + if (tlv_list == NULL) { + debug(20, 1) ("storeHashDirRebuildFromDirectory: failed to get meta data\n"); + /* XXX shouldn't this be a call to storeHashUnlink ? */ + storeHashDirUnlinkFile(SD, sfileno); + continue; + } + debug(20, 3) ("storeHashDirRebuildFromDirectory: successful swap meta unpacking\n"); + memset(key, '\0', MD5_DIGEST_CHARS); + memset(&tmpe, '\0', sizeof(StoreEntry)); + for (t = tlv_list; t; t = t->next) { + switch (t->type) { + case STORE_META_KEY: + assert(t->length == MD5_DIGEST_CHARS); + xmemcpy(key, t->value, MD5_DIGEST_CHARS); + break; + case STORE_META_STD: + assert(t->length == STORE_HDR_METASIZE); + xmemcpy(&tmpe.timestamp, t->value, STORE_HDR_METASIZE); + break; + default: + break; + } + } + storeSwapTLVFree(tlv_list); + tlv_list = NULL; + if (storeKeyNull(key)) { + debug(20, 1) ("storeHashDirRebuildFromDirectory: NULL key\n"); + storeHashDirUnlinkFile(SD, sfileno); + continue; + } + tmpe.hash.key = key; + /* check sizes */ + if (tmpe.swap_file_sz == 0) { + tmpe.swap_file_sz = sb.st_size; + } else if (tmpe.swap_file_sz == sb.st_size - swap_hdr_len) { + tmpe.swap_file_sz = sb.st_size; + } else if (tmpe.swap_file_sz != sb.st_size) { + debug(20, 1) ("storeHashDirRebuildFromDirectory: SIZE MISMATCH %d!=%d\n", + tmpe.swap_file_sz, (int) sb.st_size); + storeHashDirUnlinkFile(SD, sfileno); + continue; + } + if (EBIT_TEST(tmpe.flags, KEY_PRIVATE)) { + storeHashDirUnlinkFile(SD, sfileno); + rb->counts.badflags++; + continue; + } + e = storeGet(key); + if (e && e->lastref >= tmpe.lastref) { + /* key already exists, current entry is newer */ + /* keep old, ignore new */ + rb->counts.dupcount++; + continue; + } else if (NULL != e) { + /* URL already exists, this swapfile not being used */ + /* junk old, load new */ + storeRelease(e); /* release old entry */ + rb->counts.dupcount++; + } + rb->counts.objcount++; + storeEntryDump(&tmpe, 5); + e = storeHashDirAddDiskRestore(SD, key, + sfileno, + tmpe.swap_file_sz, + tmpe.expires, + tmpe.timestamp, + tmpe.lastref, + tmpe.lastmod, + tmpe.refcount, /* refcount */ + tmpe.flags, /* flags */ + (int) rb->flags.clean); + storeDirSwapLog(e, SWAP_LOG_ADD); + } + eventAdd("storeRebuild", storeHashDirRebuildFromDirectory, rb, 0.0, 1); +} + +static void +storeHashDirRebuildFromSwapLog(void *data) +{ + RebuildState *rb = data; + SwapDir *SD = rb->sd; + StoreEntry *e = NULL; + storeSwapLogData s; + size_t ss = sizeof(storeSwapLogData); + int count; + int used; /* is swapfile already in use? */ + int disk_entry_newer; /* is the log entry newer than current entry? */ + double x; + assert(rb != NULL); + /* load a number of objects per invocation */ + for (count = 0; count < rb->speed; count++) { + if (fread(&s, ss, 1, rb->log) != 1) { + debug(20, 1) ("Done reading %s swaplog (%d entries)\n", + rb->sd->path, rb->n_read); + fclose(rb->log); + rb->log = NULL; + store_dirs_rebuilding--; + storeHashDirCloseTmpSwapLog(rb->sd); + storeRebuildComplete(&rb->counts); + cbdataFree(rb); + return; + } + rb->n_read++; + if (s.op <= SWAP_LOG_NOP) + continue; + if (s.op >= SWAP_LOG_MAX) + continue; + /* + * BC: during 2.4 development, we changed the way swap file + * numbers are assigned and stored. The high 16 bits used + * to encode the SD index number. There used to be a call + * to storeDirProperFileno here that re-assigned the index + * bits. Now, for backwards compatibility, we just need + * to mask it off. + */ + s.swap_filen &= 0x00FFFFFF; + debug(20, 3) ("storeHashDirRebuildFromSwapLog: %s %s %08X\n", + swap_log_op_str[(int) s.op], + storeKeyText(s.key), + s.swap_filen); + if (s.op == SWAP_LOG_ADD) { + (void) 0; + } else if (s.op == SWAP_LOG_DEL) { + if ((e = storeGet(s.key)) != NULL) { + /* + * Make sure we don't unlink the file, it might be + * in use by a subsequent entry. Also note that + * we don't have to subtract from store_swap_size + * because adding to store_swap_size happens in + * the cleanup procedure. + */ + storeExpireNow(e); + storeReleaseRequest(e); + storeHashDirReplRemove(e); + if (e->swap_filen > -1) { + storeHashDirMapBitReset(SD, e->swap_filen); + e->swap_filen = -1; + e->swap_dirn = -1; + } + storeRelease(e); + rb->counts.objcount--; + rb->counts.cancelcount++; + } + continue; + } else { + x = log(++rb->counts.bad_log_op) / log(10.0); + if (0.0 == x - (double) (int) x) + debug(20, 1) ("WARNING: %d invalid swap log entries found\n", + rb->counts.bad_log_op); + rb->counts.invalid++; + continue; + } + if ((++rb->counts.scancount & 0xFFF) == 0) { + struct stat sb; + if (0 == fstat(fileno(rb->log), &sb)) + storeRebuildProgress(SD->index, + (int) sb.st_size / ss, rb->n_read); + } + if (!storeHashDirValidFileno(SD, s.swap_filen, 0)) { + rb->counts.invalid++; + continue; + } + if (EBIT_TEST(s.flags, KEY_PRIVATE)) { + rb->counts.badflags++; + continue; + } + e = storeGet(s.key); + used = storeHashDirMapBitTest(SD, s.swap_filen); + /* If this URL already exists in the cache, does the swap log + * appear to have a newer entry? Compare 'lastref' from the + * swap log to e->lastref. */ + disk_entry_newer = e ? (s.lastref > e->lastref ? 1 : 0) : 0; + if (used && !disk_entry_newer) { + /* log entry is old, ignore it */ + rb->counts.clashcount++; + continue; + } else if (used && e && e->swap_filen == s.swap_filen && e->swap_dirn == SD->index) { + /* swapfile taken, same URL, newer, update meta */ + if (e->store_status == STORE_OK) { + e->lastref = s.timestamp; + e->timestamp = s.timestamp; + e->expires = s.expires; + e->lastmod = s.lastmod; + e->flags = s.flags; + e->refcount += s.refcount; + storeHashDirUnrefObj(SD, e); + } else { + debug_trap("storeHashDirRebuildFromSwapLog: bad condition"); + debug(20, 1) ("\tSee %s:%d\n", __FILE__, __LINE__); + } + continue; + } else if (used) { + /* swapfile in use, not by this URL, log entry is newer */ + /* This is sorta bad: the log entry should NOT be newer at this + * point. If the log is dirty, the filesize check should have + * caught this. If the log is clean, there should never be a + * newer entry. */ + debug(20, 1) ("WARNING: newer swaplog entry for dirno %d, fileno %08X\n", + SD->index, s.swap_filen); + /* I'm tempted to remove the swapfile here just to be safe, + * but there is a bad race condition in the NOVM version if + * the swapfile has recently been opened for writing, but + * not yet opened for reading. Because we can't map + * swapfiles back to StoreEntrys, we don't know the state + * of the entry using that file. */ + /* We'll assume the existing entry is valid, probably because + * were in a slow rebuild and the the swap file number got taken + * and the validation procedure hasn't run. */ + assert(rb->flags.need_to_validate); + rb->counts.clashcount++; + continue; + } else if (e && !disk_entry_newer) { + /* key already exists, current entry is newer */ + /* keep old, ignore new */ + rb->counts.dupcount++; + continue; + } else if (e) { + /* key already exists, this swapfile not being used */ + /* junk old, load new */ + storeExpireNow(e); + storeReleaseRequest(e); + storeHashDirReplRemove(e); + if (e->swap_filen > -1) { + /* Make sure we don't actually unlink the file */ + storeHashDirMapBitReset(SD, e->swap_filen); + e->swap_filen = -1; + e->swap_dirn = -1; + } + storeRelease(e); + rb->counts.dupcount++; + } else { + /* URL doesnt exist, swapfile not in use */ + /* load new */ + (void) 0; + } + /* update store_swap_size */ + rb->counts.objcount++; + e = storeHashDirAddDiskRestore(SD, s.key, + s.swap_filen, + s.swap_file_sz, + s.expires, + s.timestamp, + s.lastref, + s.lastmod, + s.refcount, + s.flags, + (int) rb->flags.clean); + storeDirSwapLog(e, SWAP_LOG_ADD); + } + eventAdd("storeRebuild", storeHashDirRebuildFromSwapLog, rb, 0.0, 1); +} + +static int +storeHashDirGetNextFile(RebuildState * rb, int *sfileno, int *size) +{ + SwapDir *SD = rb->sd; + hashinfo_t *hashinfo = (hashinfo_t *) SD->fsdata; + int fd = -1; + int used = 0; + int dirs_opened = 0; + debug(20, 3) ("storeHashDirGetNextFile: flag=%d, %d: /%02X/%02X\n", + rb->flags.init, + rb->sd->index, + rb->curlvl1, rb->curlvl2); + if (rb->done) + return -2; + while (fd < 0 && rb->done == 0) { + fd = -1; + if (0 == rb->flags.init) { /* initialize, open first file */ + rb->done = 0; + rb->curlvl1 = 0; + rb->curlvl2 = 0; + rb->in_dir = 0; + rb->flags.init = 1; + assert(Config.cacheSwap.n_configured > 0); + } + if (0 == rb->in_dir) { /* we need to read in a new directory */ + snprintf(rb->fullpath, SQUID_MAXPATHLEN, "%s/%02X/%02X", + rb->sd->path, + rb->curlvl1, + rb->curlvl2); + if (rb->flags.init && rb->td != NULL) + closedir(rb->td); + rb->td = NULL; + if (dirs_opened) + return -1; + rb->td = opendir(rb->fullpath); + dirs_opened++; + if (rb->td == NULL) { + debug(50, 1) ("storeHashDirGetNextFile: opendir: %s: %s\n", + rb->fullpath, xstrerror()); + } else { + rb->entry = readdir(rb->td); /* skip . and .. */ + rb->entry = readdir(rb->td); + if (rb->entry == NULL && errno == ENOENT) + debug(20, 1) ("storeHashDirGetNextFile: directory does not exist!.\n"); + debug(20, 3) ("storeHashDirGetNextFile: Directory %s\n", rb->fullpath); + } + } + if (rb->td != NULL && (rb->entry = readdir(rb->td)) != NULL) { + rb->in_dir++; + if (sscanf(rb->entry->d_name, "%x", &rb->fn) != 1) { + debug(20, 3) ("storeHashDirGetNextFile: invalid %s\n", + rb->entry->d_name); + continue; + } + if (!storeHashFilenoBelongsHere(rb->fn, rb->sd->index, rb->curlvl1, rb->curlvl2)) { + debug(20, 3) ("storeHashDirGetNextFile: %08X does not belong in %d/%d/%d\n", + rb->fn, rb->sd->index, rb->curlvl1, rb->curlvl2); + continue; + } + used = storeHashDirMapBitTest(SD, rb->fn); + if (used) { + debug(20, 3) ("storeHashDirGetNextFile: Locked, continuing with next.\n"); + continue; + } + snprintf(rb->fullfilename, SQUID_MAXPATHLEN, "%s/%s", + rb->fullpath, rb->entry->d_name); + debug(20, 3) ("storeHashDirGetNextFile: Opening %s\n", rb->fullfilename); + fd = file_open(rb->fullfilename, O_RDONLY); + if (fd < 0) + debug(50, 1) ("storeHashDirGetNextFile: %s: %s\n", rb->fullfilename, xstrerror()); + else + store_open_disk_fd++; + continue; + } + rb->in_dir = 0; + if (++rb->curlvl2 < hashinfo->l2) + continue; + rb->curlvl2 = 0; + if (++rb->curlvl1 < hashinfo->l1) + continue; + rb->curlvl1 = 0; + rb->done = 1; + } + *sfileno = rb->fn; + return fd; +} + +/* Add a new object to the cache with empty memory copy and pointer to disk + * use to rebuild store from disk. */ +static StoreEntry * +storeHashDirAddDiskRestore(SwapDir * SD, const cache_key * key, + int file_number, + size_t swap_file_sz, + time_t expires, + time_t timestamp, + time_t lastref, + time_t lastmod, + u_num32 refcount, + u_short flags, + int clean) +{ + StoreEntry *e = NULL; + debug(20, 5) ("storeHashAddDiskRestore: %s, fileno=%08X\n", storeKeyText(key), file_number); + /* if you call this you'd better be sure file_number is not + * already in use! */ + e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL, NULL); + e->store_status = STORE_OK; + storeSetMemStatus(e, NOT_IN_MEMORY); + e->swap_status = SWAPOUT_DONE; + e->swap_filen = file_number; + e->swap_dirn = SD->index; + e->swap_file_sz = swap_file_sz; + e->lock_count = 0; + e->lastref = lastref; + e->timestamp = timestamp; + e->expires = expires; + e->lastmod = lastmod; + e->refcount = refcount; + e->flags = flags; + EBIT_SET(e->flags, ENTRY_CACHABLE); + EBIT_CLR(e->flags, RELEASE_REQUEST); + EBIT_CLR(e->flags, KEY_PRIVATE); + e->ping_status = PING_NONE; + EBIT_CLR(e->flags, ENTRY_VALIDATED); + storeHashDirMapBitSet(SD, e->swap_filen); + storeHashInsert(e, key); /* do it after we clear KEY_PRIVATE */ + storeHashDirReplAdd(SD, e); + return e; +} + +static void +storeHashDirRebuild(SwapDir * sd) +{ + RebuildState *rb = xcalloc(1, sizeof(*rb)); + int clean = 0; + int zero = 0; + FILE *fp; + EVH *func = NULL; + rb->sd = sd; + rb->speed = opt_foreground_rebuild ? 1 << 30 : 50; + /* + * If the swap.state file exists in the cache_dir, then + * we'll use storeHashDirRebuildFromSwapLog(), otherwise we'll + * use storeHashDirRebuildFromDirectory() to open up each file + * and suck in the meta data. + */ + fp = storeHashDirOpenTmpSwapLog(sd, &clean, &zero); + if (fp == NULL || zero) { + if (fp != NULL) + fclose(fp); + func = storeHashDirRebuildFromDirectory; + } else { + func = storeHashDirRebuildFromSwapLog; + rb->log = fp; + rb->flags.clean = (unsigned int) clean; + } + if (!clean) + rb->flags.need_to_validate = 1; + debug(20, 1) ("Rebuilding storage in %s (%s)\n", + sd->path, clean ? "CLEAN" : "DIRTY"); + store_dirs_rebuilding++; + cbdataAdd(rb, cbdataXfree, 0); + eventAdd("storeRebuild", func, rb, 0.0, 1); +} + +static void +storeHashDirCloseTmpSwapLog(SwapDir * sd) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + char *swaplog_path = xstrdup(storeHashDirSwapLogFile(sd, NULL)); + char *new_path = xstrdup(storeHashDirSwapLogFile(sd, ".new")); + int fd; + file_close(hashinfo->swaplog_fd); +#ifdef _SQUID_OS2_ + if (unlink(swaplog_path) < 0) { + debug(50, 0) ("%s: %s\n", swaplog_path, xstrerror()); + fatal("storeHashDirCloseTmpSwapLog: unlink failed"); + } +#endif + if (xrename(new_path, swaplog_path) < 0) { + fatal("storeHashDirCloseTmpSwapLog: rename failed"); + } + fd = file_open(swaplog_path, O_WRONLY | O_CREAT); + if (fd < 0) { + debug(50, 1) ("%s: %s\n", swaplog_path, xstrerror()); + fatal("storeHashDirCloseTmpSwapLog: Failed to open swap log."); + } + safe_free(swaplog_path); + safe_free(new_path); + hashinfo->swaplog_fd = fd; + debug(47, 3) ("Cache Dir #%d log opened on FD %d\n", sd->index, fd); +} + +static FILE * +storeHashDirOpenTmpSwapLog(SwapDir * sd, int *clean_flag, int *zero_flag) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + char *swaplog_path = xstrdup(storeHashDirSwapLogFile(sd, NULL)); + char *clean_path = xstrdup(storeHashDirSwapLogFile(sd, ".last-clean")); + char *new_path = xstrdup(storeHashDirSwapLogFile(sd, ".new")); + struct stat log_sb; + struct stat clean_sb; + FILE *fp; + int fd; + if (stat(swaplog_path, &log_sb) < 0) { + debug(47, 1) ("Cache Dir #%d: No log file\n", sd->index); + safe_free(swaplog_path); + safe_free(clean_path); + safe_free(new_path); + return NULL; + } + *zero_flag = log_sb.st_size == 0 ? 1 : 0; + /* close the existing write-only FD */ + if (hashinfo->swaplog_fd >= 0) + file_close(hashinfo->swaplog_fd); + /* open a write-only FD for the new log */ + fd = file_open(new_path, O_WRONLY | O_CREAT | O_TRUNC); + if (fd < 0) { + debug(50, 1) ("%s: %s\n", new_path, xstrerror()); + fatal("storeDirOpenTmpSwapLog: Failed to open swap log."); + } + hashinfo->swaplog_fd = fd; + /* open a read-only stream of the old log */ + fp = fopen(swaplog_path, "r"); + if (fp == NULL) { + debug(50, 0) ("%s: %s\n", swaplog_path, xstrerror()); + fatal("Failed to open swap log for reading"); + } + memset(&clean_sb, '\0', sizeof(struct stat)); + if (stat(clean_path, &clean_sb) < 0) + *clean_flag = 0; + else if (clean_sb.st_mtime < log_sb.st_mtime) + *clean_flag = 0; + else + *clean_flag = 1; + safeunlink(clean_path, 1); + safe_free(swaplog_path); + safe_free(clean_path); + safe_free(new_path); + return fp; +} + +struct _clean_state { + char *cur; + char *new; + char *cln; + char *outbuf; + off_t outbuf_offset; + int fd; + RemovalPolicyWalker *walker; +}; + +#define CLEAN_BUF_SZ 16384 +/* + * Begin the process to write clean cache state. For HASH this means + * opening some log files and allocating write buffers. Return 0 if + * we succeed, and assign the 'func' and 'data' return pointers. + */ +static int +storeHashDirWriteCleanStart(SwapDir * sd) +{ + struct _clean_state *state = xcalloc(1, sizeof(*state)); + struct stat sb; + sd->log.clean.write = NULL; + sd->log.clean.state = NULL; + state->new = xstrdup(storeHashDirSwapLogFile(sd, ".clean")); + state->fd = file_open(state->new, O_WRONLY | O_CREAT | O_TRUNC); + if (state->fd < 0) { + xfree(state->new); + xfree(state); + return -1; + } + state->cur = xstrdup(storeHashDirSwapLogFile(sd, NULL)); + state->cln = xstrdup(storeHashDirSwapLogFile(sd, ".last-clean")); + state->outbuf = xcalloc(CLEAN_BUF_SZ, 1); + state->outbuf_offset = 0; + state->walker = sd->repl->WalkInit(sd->repl); + unlink(state->new); + unlink(state->cln); + debug(20, 3) ("storeDirWriteCleanLogs: opened %s, FD %d\n", + state->new, state->fd); +#if HAVE_FCHMOD + if (stat(state->cur, &sb) == 0) + fchmod(state->fd, sb.st_mode); +#endif + sd->log.clean.write = storeHashDirWriteCleanEntry; + sd->log.clean.state = state; + return 0; +} + +/* + * Get the next entry that is a candidate for clean log writing + */ +const StoreEntry * +storeHashDirCleanLogNextEntry(SwapDir * sd) +{ + const StoreEntry *entry = NULL; + struct _clean_state *state = sd->log.clean.state; + if (state->walker) + entry = state->walker->Next(state->walker); + return entry; +} + +/* + * "write" an entry to the clean log file. + */ +static void +storeHashDirWriteCleanEntry(SwapDir * sd, const StoreEntry * e) +{ + storeSwapLogData s; + static size_t ss = sizeof(storeSwapLogData); + struct _clean_state *state = sd->log.clean.state; + memset(&s, '\0', ss); + s.op = (char) SWAP_LOG_ADD; + s.swap_filen = e->swap_filen; + s.timestamp = e->timestamp; + s.lastref = e->lastref; + s.expires = e->expires; + s.lastmod = e->lastmod; + s.swap_file_sz = e->swap_file_sz; + s.refcount = e->refcount; + s.flags = e->flags; + xmemcpy(&s.key, e->hash.key, MD5_DIGEST_CHARS); + xmemcpy(state->outbuf + state->outbuf_offset, &s, ss); + state->outbuf_offset += ss; + /* buffered write */ + if (state->outbuf_offset + ss > CLEAN_BUF_SZ) { + if (write(state->fd, state->outbuf, state->outbuf_offset) < 0) { + debug(50, 0) ("storeDirWriteCleanLogs: %s: write: %s\n", + state->new, xstrerror()); + debug(20, 0) ("storeDirWriteCleanLogs: Current swap logfile not replaced.\n"); + file_close(state->fd); + state->fd = -1; + unlink(state->new); + safe_free(state); + sd->log.clean.state = NULL; + sd->log.clean.write = NULL; + } + state->outbuf_offset = 0; + } +} + +static void +storeHashDirWriteCleanDone(SwapDir * sd) +{ + struct _clean_state *state = sd->log.clean.state; + if (NULL == state) + return; + if (state->fd < 0) + return; + state->walker->Done(state->walker); + if (write(state->fd, state->outbuf, state->outbuf_offset) < 0) { + debug(50, 0) ("storeDirWriteCleanLogs: %s: write: %s\n", + state->new, xstrerror()); + debug(20, 0) ("storeDirWriteCleanLogs: Current swap logfile " + "not replaced.\n"); + file_close(state->fd); + state->fd = -1; + unlink(state->new); + } + safe_free(state->outbuf); + /* + * You can't rename open files on Microsoft "operating systems" + * so we have to close before renaming. + */ + storeHashDirCloseSwapLog(sd); + /* rename */ + if (state->fd >= 0) { +#ifdef _SQUID_OS2_ + file_close(state->fd); + state->fd = -1; + if (unlink(cur) < 0) + debug(50, 0) ("storeDirWriteCleanLogs: unlinkd failed: %s, %s\n", + xstrerror(), cur); +#endif + xrename(state->new, state->cur); + } + /* touch a timestamp file if we're not still validating */ + if (store_dirs_rebuilding) + (void) 0; + else if (state->fd < 0) + (void) 0; + else + file_close(file_open(state->cln, O_WRONLY | O_CREAT | O_TRUNC)); + /* close */ + safe_free(state->cur); + safe_free(state->new); + safe_free(state->cln); + if (state->fd >= 0) + file_close(state->fd); + state->fd = -1; + safe_free(state); + sd->log.clean.state = NULL; + sd->log.clean.write = NULL; +} + +static void +storeSwapLogDataFree(void *s) +{ + memFree(s, MEM_SWAP_LOG_DATA); +} + +static void +storeHashDirSwapLog(const SwapDir * sd, const StoreEntry * e, int op) +{ + hashinfo_t *hashinfo = (hashinfo_t *) sd->fsdata; + storeSwapLogData *s = memAllocate(MEM_SWAP_LOG_DATA); + s->op = (char) op; + s->swap_filen = e->swap_filen; + s->timestamp = e->timestamp; + s->lastref = e->lastref; + s->expires = e->expires; + s->lastmod = e->lastmod; + s->swap_file_sz = e->swap_file_sz; + s->refcount = e->refcount; + s->flags = e->flags; + xmemcpy(s->key, e->hash.key, MD5_DIGEST_CHARS); + file_write(hashinfo->swaplog_fd, + -1, + s, + sizeof(storeSwapLogData), + NULL, + NULL, + (FREE *) storeSwapLogDataFree); +} + +static void +storeHashDirNewfs(SwapDir * sd) +{ + debug(47, 3) ("Creating swap space in %s\n", sd->path); + storeHashDirCreateDirectory(sd->path, 0); + storeHashDirCreateSwapSubDirs(sd); +} + +static int +rev_int_sort(const void *A, const void *B) +{ + const int *i1 = A; + const int *i2 = B; + return *i2 - *i1; +} + +static int +storeHashDirClean(int swap_index) +{ + DIR *dp = NULL; + struct dirent *de = NULL; + LOCAL_ARRAY(char, p1, MAXPATHLEN + 1); + LOCAL_ARRAY(char, p2, MAXPATHLEN + 1); +#if USE_TRUNCATE + struct stat sb; +#endif + int files[20]; + int swapfileno; + int fn; /* same as swapfileno, but with dirn bits set */ + int n = 0; + int k = 0; + int N0, N1, N2; + int D0, D1, D2; + SwapDir *SD; + hashinfo_t *hashinfo; + N0 = n_hash_dirs; + D0 = hash_dir_index[swap_index % N0]; + SD = &Config.cacheSwap.swapDirs[D0]; + hashinfo = (hashinfo_t *) SD->fsdata; + N1 = hashinfo->l1; + D1 = (swap_index / N0) % N1; + N2 = hashinfo->l2; + D2 = ((swap_index / N0) / N1) % N2; + snprintf(p1, SQUID_MAXPATHLEN, "%s/%02X/%02X", + Config.cacheSwap.swapDirs[D0].path, D1, D2); + debug(36, 3) ("storeDirClean: Cleaning directory %s\n", p1); + dp = opendir(p1); + if (dp == NULL) { + if (errno == ENOENT) { + debug(36, 0) ("storeDirClean: WARNING: Creating %s\n", p1); + if (mkdir(p1, 0777) == 0) + return 0; + } + debug(50, 0) ("storeDirClean: %s: %s\n", p1, xstrerror()); + safeunlink(p1, 1); + return 0; + } + while ((de = readdir(dp)) != NULL && k < 20) { + if (sscanf(de->d_name, "%X", &swapfileno) != 1) + continue; + fn = swapfileno; /* XXX should remove this cruft ! */ + if (storeHashDirValidFileno(SD, fn, 1)) + if (storeHashDirMapBitTest(SD, fn)) + if (storeHashFilenoBelongsHere(fn, D0, D1, D2)) + continue; +#if USE_TRUNCATE + if (!stat(de->d_name, &sb)) + if (sb.st_size == 0) + continue; +#endif + files[k++] = swapfileno; + } + closedir(dp); + if (k == 0) + return 0; + qsort(files, k, sizeof(int), rev_int_sort); + if (k > 10) + k = 10; + for (n = 0; n < k; n++) { + debug(36, 3) ("storeDirClean: Cleaning file %08X\n", files[n]); + snprintf(p2, MAXPATHLEN + 1, "%s/%08X", p1, files[n]); +#if USE_TRUNCATE + truncate(p2, 0); +#else + safeunlink(p2, 0); +#endif + statCounter.swap.files_cleaned++; + } + debug(36, 3) ("Cleaned %d unused files from %s\n", k, p1); + return k; +} + +static void +storeHashDirCleanEvent(void *unused) +{ + static int swap_index = 0; + int i; + int j = 0; + int n = 0; + /* + * Assert that there are HASH cache_dirs configured, otherwise + * we should never be called. + */ + assert(n_hash_dirs); + if (NULL == hash_dir_index) { + SwapDir *sd; + hashinfo_t *hashinfo; + /* + * Initialize the little array that translates HASH cache_dir + * number into the Config.cacheSwap.swapDirs array index. + */ + hash_dir_index = xcalloc(n_hash_dirs, sizeof(*hash_dir_index)); + for (i = 0, n = 0; i < Config.cacheSwap.n_configured; i++) { + sd = &Config.cacheSwap.swapDirs[i]; + if (!storeHashDirIs(sd)) + continue; + hash_dir_index[n++] = i; + hashinfo = (hashinfo_t *) sd->fsdata; + j += (hashinfo->l1 * hashinfo->l2); + } + assert(n == n_hash_dirs); + /* + * Start the storeHashDirClean() swap_index with a random + * value. j equals the total number of HASH level 2 + * swap directories + */ + swap_index = (int) (squid_random() % j); + } + if (0 == store_dirs_rebuilding) { + n = storeHashDirClean(swap_index); + swap_index++; + } + eventAdd("storeDirClean", storeHashDirCleanEvent, NULL, + 15.0 * exp(-0.25 * n), 1); +} + +static int +storeHashDirIs(SwapDir * sd) +{ + if (strncmp(sd->type, "hash", 3) == 0) + return 1; + return 0; +} + +/* + * Does swapfile number 'fn' belong in cachedir #F0, + * level1 dir #F1, level2 dir #F2? + */ +static int +storeHashFilenoBelongsHere(int fn, int F0, int F1, int F2) +{ + int D1, D2; + int L1, L2; + int filn = fn; + hashinfo_t *hashinfo; + assert(F0 < Config.cacheSwap.n_configured); + hashinfo = (hashinfo_t *) Config.cacheSwap.swapDirs[F0].fsdata; + L1 = hashinfo->l1; + L2 = hashinfo->l2; + D1 = ((filn / L2) / L2) % L1; + if (F1 != D1) + return 0; + D2 = (filn / L2) % L2; + if (F2 != D2) + return 0; + return 1; +} + +int +storeHashDirValidFileno(SwapDir * SD, sfileno filn, int flag) +{ + hashinfo_t *hashinfo = (hashinfo_t *) SD->fsdata; + if (filn < 0) + return 0; + /* + * If flag is set it means out-of-range file number should + * be considered invalid. + */ + if (flag) + if (filn > hashinfo->map->max_n_files) + return 0; + return 1; +} + +void +storeHashDirMaintain(SwapDir * SD) +{ + StoreEntry *e = NULL; + int removed = 0; + int max_scan; + int max_remove; + double f; + RemovalPurgeWalker *walker; + /* We can't delete objects while rebuilding swap */ + if (store_dirs_rebuilding) { + return; + } else { + f = (double) (SD->cur_size - SD->low_size) / (SD->max_size - SD->low_size); + f = f < 0.0 ? 0.0 : f > 1.0 ? 1.0 : f; + max_scan = (int) (f * 400.0 + 100.0); + max_remove = (int) (f * 70.0 + 10.0); + /* + * This is kinda cheap, but so we need this priority hack? + */ + } + debug(20, 3) ("storeMaintainSwapSpace: f=%f, max_scan=%d, max_remove=%d\n", f, max_scan, max_remove); + walker = SD->repl->PurgeInit(SD->repl, max_scan); + while (1) { + if (SD->cur_size < SD->low_size) + break; + if (removed >= max_remove) + break; + e = walker->Next(walker); + if (!e) + break; /* no more objects */ + removed++; + storeRelease(e); + } + walker->Done(walker); + debug(20, (removed ? 2 : 3)) ("storeHashDirMaintain: %s removed %d/%d f=%.03f max_scan=%d\n", + SD->path, removed, max_remove, f, max_scan); +} + +/* + * storeHashDirCheckObj + * + * This routine is called by storeDirSelectSwapDir to see if the given + * object is able to be stored on this filesystem. HASH filesystems will + * happily store anything as long as the LRU time isn't too small. + */ +int +storeHashDirCheckObj(SwapDir * SD, const StoreEntry * e) +{ +#if OLD_UNUSED_CODE + if (storeHashDirExpiredReferenceAge(SD) < 300) { + debug(20, 3) ("storeHashDirCheckObj: NO: LRU Age = %d\n", + storeHashDirExpiredReferenceAge(SD)); + /* store_check_cachable_hist.no.lru_age_too_low++; */ + return -1; + } +#endif + /* Return 999 (99.9%) constant load */ + return 999; +} + +/* + * storeHashDirRefObj + * + * This routine is called whenever an object is referenced, so we can + * maintain replacement information within the storage fs. + */ +void +storeHashDirRefObj(SwapDir * SD, StoreEntry * e) +{ + debug(1, 3) ("storeHashDirRefObj: referencing %p %d/%d\n", e, e->swap_dirn, + e->swap_filen); + if (SD->repl->Referenced) + SD->repl->Referenced(SD->repl, e, &e->repl); +} + +/* + * storeHashDirUnrefObj + * This routine is called whenever the last reference to an object is + * removed, to maintain replacement information within the storage fs. + */ +void +storeHashDirUnrefObj(SwapDir * SD, StoreEntry * e) +{ + debug(1, 3) ("storeHashDirUnrefObj: referencing %p %d/%d\n", e, e->swap_dirn, + e->swap_filen); + if (SD->repl->Dereferenced) + SD->repl->Dereferenced(SD->repl, e, &e->repl); +} + +/* + * storeHashDirUnlinkFile + * + * This routine unlinks a file and pulls it out of the bitmap. + * It used to be in storeHashUnlink(), however an interface change + * forced this bit of code here. Eeek. + */ +void +storeHashDirUnlinkFile(SwapDir * SD, sfileno f) +{ + debug(79, 3) ("storeHashDirUnlinkFile: unlinking fileno %08X\n", f); + /* storeHashDirMapBitReset(SD, f); */ + unlinkdUnlink(storeHashDirFullPath(SD, f, NULL)); +} + +/* + * Add and remove the given StoreEntry from the replacement policy in + * use. + */ + +void +storeHashDirReplAdd(SwapDir * SD, StoreEntry * e) +{ + debug(20, 4) ("storeHashDirReplAdd: added node %p to dir %d\n", e, + SD->index); + SD->repl->Add(SD->repl, e, &e->repl); +} + + +void +storeHashDirReplRemove(StoreEntry * e) +{ + SwapDir *SD = INDEXSD(e->swap_dirn); + debug(20, 4) ("storeHashDirReplRemove: remove node %p from dir %d\n", e, + SD->index); + SD->repl->Remove(SD->repl, e, &e->repl); +} + + + +/* ========== LOCAL FUNCTIONS ABOVE, GLOBAL FUNCTIONS BELOW ========== */ + +void +storeHashDirStats(SwapDir * SD, StoreEntry * sentry) +{ + hashinfo_t *hashinfo; +#if HAVE_STATVFS + struct statvfs sfs; +#endif + hashinfo = (hashinfo_t *) SD->fsdata; + storeAppendPrintf(sentry, "First level subdirectories: %d\n", hashinfo->l1); + storeAppendPrintf(sentry, "Second level subdirectories: %d\n", hashinfo->l2); + storeAppendPrintf(sentry, "Maximum Size: %d KB\n", SD->max_size); + storeAppendPrintf(sentry, "Current Size: %d KB\n", SD->cur_size); + storeAppendPrintf(sentry, "Percent Used: %0.2f%%\n", + 100.0 * SD->cur_size / SD->max_size); + storeAppendPrintf(sentry, "Filemap bits in use: %d of %d (%d%%)\n", + hashinfo->map->n_files_in_map, hashinfo->map->max_n_files, + percent(hashinfo->map->n_files_in_map, hashinfo->map->max_n_files)); +#if HAVE_STATVFS +#define fsbtoblk(num, fsbs, bs) \ + (((fsbs) != 0 && (fsbs) < (bs)) ? \ + (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs))) + if (!statvfs(SD->path, &sfs)) { + storeAppendPrintf(sentry, "Filesystem Space in use: %d/%d KB (%d%%)\n", + fsbtoblk((sfs.f_blocks - sfs.f_bfree), sfs.f_frsize, 1024), + fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024), + percent(sfs.f_blocks - sfs.f_bfree, sfs.f_blocks)); + storeAppendPrintf(sentry, "Filesystem Inodes in use: %d/%d (%d%%)\n", + sfs.f_files - sfs.f_ffree, sfs.f_files, + percent(sfs.f_files - sfs.f_ffree, sfs.f_files)); + } +#endif + storeAppendPrintf(sentry, "Flags:"); + if (SD->flags.selected) + storeAppendPrintf(sentry, " SELECTED"); + if (SD->flags.read_only) + storeAppendPrintf(sentry, " READ-ONLY"); + storeAppendPrintf(sentry, "\n"); +#if OLD_UNUSED_CODE +#if !HEAP_REPLACEMENT + storeAppendPrintf(sentry, "LRU Expiration Age: %6.2f days\n", + (double) storeHashDirExpiredReferenceAge(SD) / 86400.0); +#else + storeAppendPrintf(sentry, "Storage Replacement Threshold:\t%f\n", + heap_peepminkey(sd.repl.heap.heap)); +#endif +#endif /* OLD_UNUSED_CODE */ +} + +/* + * storeHashDirReconfigure + * + * This routine is called when the given swapdir needs reconfiguring + */ +void +storeHashDirReconfigure(SwapDir * sd, int index, char *path) +{ + char *token; + int i; + int size; + int l1; + int l2; + unsigned int read_only = 0; + + i = GetInteger(); + size = i << 10; /* Mbytes to kbytes */ + if (size <= 0) + fatal("storeHashDirReconfigure: invalid size value"); + i = GetInteger(); + l1 = i; + if (l1 <= 0) + fatal("storeHashDirReconfigure: invalid level 1 directories value"); + i = GetInteger(); + l2 = i; + if (l2 <= 0) + fatal("storeHashDirReconfigure: invalid level 2 directories value"); + if ((token = strtok(NULL, w_space))) + if (!strcasecmp(token, "read-only")) + read_only = 1; + + /* just reconfigure it */ + if (size == sd->max_size) + debug(3, 1) ("Cache dir '%s' size remains unchanged at %d KB\n", + path, size); + else + debug(3, 1) ("Cache dir '%s' size changed to %d KB\n", + path, size); + sd->max_size = size; + if (sd->flags.read_only != read_only) + debug(3, 1) ("Cache dir '%s' now %s\n", + path, read_only ? "Read-Only" : "Read-Write"); + sd->flags.read_only = read_only; + return; +} + +void +storeHashDirDump(StoreEntry * entry, const char *name, SwapDir * s) +{ + hashinfo_t *hashinfo = (hashinfo_t *) s->fsdata; + storeAppendPrintf(entry, "%s %s %s %d %d %d\n", + name, + "hash", + s->path, + s->max_size >> 10, + hashinfo->l1, + hashinfo->l2); +} + +/* + * Only "free" the filesystem specific stuff here + */ +static void +storeHashDirFree(SwapDir * s) +{ + hashinfo_t *hashinfo = (hashinfo_t *) s->fsdata; + if (hashinfo->swaplog_fd > -1) { + file_close(hashinfo->swaplog_fd); + hashinfo->swaplog_fd = -1; + } + filemapFreeMemory(hashinfo->map); + xfree(hashinfo); + s->fsdata = NULL; /* Will aid debugging... */ + +} + +char * +storeHashDirFullPath(SwapDir * SD, sfileno filn, char *fullpath) +{ + LOCAL_ARRAY(char, fullfilename, SQUID_MAXPATHLEN); + hashinfo_t *hashinfo = (hashinfo_t *) SD->fsdata; + int L1 = hashinfo->l1; + int L2 = hashinfo->l2; + if (!fullpath) + fullpath = fullfilename; + fullpath[0] = '\0'; + snprintf(fullpath, SQUID_MAXPATHLEN, "%s/%02X/%02X/%08X", + SD->path, + ((filn / L2) / L2) % L1, + (filn / L2) % L2, + filn); + return fullpath; +} + +/* + * storeHashCleanupDoubleCheck + * + * This is called by storeCleanup() if -S was given on the command line. + */ +static int +storeHashCleanupDoubleCheck(SwapDir * sd, StoreEntry * e) +{ + struct stat sb; + if (stat(storeHashDirFullPath(sd, e->swap_filen, NULL), &sb) < 0) { + debug(20, 0) ("storeHashCleanupDoubleCheck: MISSING SWAP FILE\n"); + debug(20, 0) ("storeHashCleanupDoubleCheck: FILENO %08X\n", e->swap_filen); + debug(20, 0) ("storeHashCleanupDoubleCheck: PATH %s\n", + storeHashDirFullPath(sd, e->swap_filen, NULL)); + storeEntryDump(e, 0); + return -1; + } + if (e->swap_file_sz != sb.st_size) { + debug(20, 0) ("storeHashCleanupDoubleCheck: SIZE MISMATCH\n"); + debug(20, 0) ("storeHashCleanupDoubleCheck: FILENO %08X\n", e->swap_filen); + debug(20, 0) ("storeHashCleanupDoubleCheck: PATH %s\n", + storeHashDirFullPath(sd, e->swap_filen, NULL)); + debug(20, 0) ("storeHashCleanupDoubleCheck: ENTRY SIZE: %d, FILE SIZE: %d\n", + e->swap_file_sz, (int) sb.st_size); + storeEntryDump(e, 0); + return -1; + } + return 0; +} + +/* + * storeHashDirParse + * + * Called when a *new* fs is being setup. + */ +void +storeHashDirParse(SwapDir * sd, int index, char *path) +{ + char *token; + int i; + int size; + int l1; + int l2; + unsigned int read_only = 0; + hashinfo_t *hashinfo; + + i = GetInteger(); + size = i << 10; /* Mbytes to kbytes */ + if (size <= 0) + fatal("storeHashDirParse: invalid size value"); + i = GetInteger(); + l1 = i; + if (l1 <= 0) + fatal("storeHashDirParse: invalid level 1 directories value"); + i = GetInteger(); + l2 = i; + if (l2 <= 0) + fatal("storeHashDirParse: invalid level 2 directories value"); + if ((token = strtok(NULL, w_space))) + if (!strcasecmp(token, "read-only")) + read_only = 1; + + hashinfo = xmalloc(sizeof(hashinfo_t)); + if (hashinfo == NULL) + fatal("storeHashDirParse: couldn't xmalloc() hashinfo_t!\n"); + + sd->index = index; + sd->path = xstrdup(path); + sd->max_size = size; + sd->fsdata = hashinfo; + hashinfo->l1 = l1; + hashinfo->l2 = l2; + hashinfo->swaplog_fd = -1; + hashinfo->map = NULL; /* Debugging purposes */ + hashinfo->suggest = 0; + sd->flags.read_only = read_only; + sd->init = storeHashDirInit; + sd->newfs = storeHashDirNewfs; + sd->dump = storeHashDirDump; + sd->freefs = storeHashDirFree; + sd->dblcheck = storeHashCleanupDoubleCheck; + sd->statfs = storeHashDirStats; + sd->maintainfs = storeHashDirMaintain; + sd->checkobj = storeHashDirCheckObj; + sd->refobj = storeHashDirRefObj; + sd->unrefobj = storeHashDirUnrefObj; + sd->callback = NULL; + sd->sync = NULL; + sd->obj.create = storeHashCreate; + sd->obj.open = storeHashOpen; + sd->obj.close = storeHashClose; + sd->obj.read = storeHashRead; + sd->obj.write = storeHashWrite; + sd->obj.unlink = storeHashUnlink; + sd->log.open = storeHashDirOpenSwapLog; + sd->log.close = storeHashDirCloseSwapLog; + sd->log.write = storeHashDirSwapLog; + sd->log.clean.start = storeHashDirWriteCleanStart; + sd->log.clean.nextentry = storeHashDirCleanLogNextEntry; + sd->log.clean.done = storeHashDirWriteCleanDone; + + /* Initialise replacement policy stuff */ + sd->repl = createRemovalPolicy(Config.replPolicy); +} + +/* + * Initial setup / end destruction + */ +void +storeHashDirDone(void) +{ + memPoolDestroy(hash_state_pool); + hash_initialised = 0; +} + +void +storeFsSetup_hash(storefs_entry_t * storefs) +{ + assert(!hash_initialised); + storefs->parsefunc = storeHashDirParse; + storefs->reconfigurefunc = storeHashDirReconfigure; + storefs->donefunc = storeHashDirDone; + hash_state_pool = memPoolCreate("HASH IO State data", sizeof(hashstate_t)); + hash_initialised = 1; +} --- /dev/null Wed Feb 14 00:45:56 2007 +++ squid/src/fs/hash/store_hash.h Wed Feb 14 00:47:18 2007 @@ -0,0 +1,50 @@ +/* + * store_hash.h + * + * Internal declarations for the hash routines + */ + +#ifndef __STORE_HASH_H__ +#define __STORE_HASH_H__ + +struct _hashinfo_t { + int swaplog_fd; + int l1; + int l2; + fileMap *map; + int suggest; +}; + +struct _hashstate_t { + int fd; + struct { + unsigned int close_request:1; + unsigned int reading:1; + unsigned int writing:1; + } flags; +}; + +typedef struct _hashinfo_t hashinfo_t; +typedef struct _hashstate_t hashstate_t; + +/* The hash_state memory pool */ +extern MemPool *hash_state_pool; + +extern void storeHashDirMapBitReset(SwapDir *, sfileno); +extern int storeHashDirMapBitAllocate(SwapDir *); +extern char *storeHashDirFullPath(SwapDir * SD, sfileno filn, char *fullpath); +extern void storeHashDirUnlinkFile(SwapDir *, sfileno); +extern void storeHashDirReplAdd(SwapDir * SD, StoreEntry *); +extern void storeHashDirReplRemove(StoreEntry *); + +/* + * Store IO stuff + */ +extern STOBJCREATE storeHashCreate; +extern STOBJOPEN storeHashOpen; +extern STOBJCLOSE storeHashClose; +extern STOBJREAD storeHashRead; +extern STOBJWRITE storeHashWrite; +extern STOBJUNLINK storeHashUnlink; + +#endif --- /dev/null Wed Feb 14 00:45:56 2007 +++ squid/src/fs/hash/store_io_hash.c Wed Feb 14 00:47:18 2007 @@ -0,0 +1,268 @@ + +/* + * $Id: store_io_hash.c,v 1.1.2.1 2000/12/16 09:29:03 adri Exp $ + * + * DEBUG: section 79 Storage Manager HASH Interface + * AUTHOR: Duane Wessels + * + * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ + * ---------------------------------------------------------- + * + * Squid is the result of efforts by numerous individuals from the + * Internet community. Development is led by Duane Wessels of the + * National Laboratory for Applied Network Research and funded by the + * National Science Foundation. Squid is Copyrighted (C) 1998 by + * Duane Wessels and the University of California San Diego. Please + * see the COPYRIGHT file for full details. Squid incorporates + * software developed and/or copyrighted by other sources. Please see + * the CREDITS file for full details. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. + * + */ + +#include "squid.h" +#include "store_hash.h" + + +static DRCB storeHashReadDone; +static DWCB storeHashWriteDone; +static void storeHashIOCallback(storeIOState * sio, int errflag); +static void storeHashIOFreeEntry(void *, int); + +/* === PUBLIC =========================================================== */ + +storeIOState * +storeHashOpen(SwapDir * SD, StoreEntry * e, STFNCB * file_callback, + STIOCB * callback, void *callback_data) +{ + sfileno f = e->swap_filen; + char *path = storeHashDirFullPath(SD, f, NULL); + storeIOState *sio; + struct stat sb; + int fd; + debug(79, 3) ("storeHashOpen: fileno %08X\n", f); + fd = file_open(path, O_RDONLY); + if (fd < 0) { + debug(79, 3) ("storeHashOpen: got failure (%d)\n", errno); + return NULL; + } + debug(79, 3) ("storeHashOpen: opened FD %d\n", fd); + sio = memAllocate(MEM_STORE_IO); + cbdataAdd(sio, storeHashIOFreeEntry, MEM_STORE_IO); + sio->fsstate = memPoolAlloc(hash_state_pool); + + sio->swap_filen = f; + sio->swap_dirn = SD->index; + sio->mode = O_RDONLY; + sio->callback = callback; + sio->callback_data = callback_data; + cbdataLock(callback_data); + sio->e = e; + ((hashstate_t *) (sio->fsstate))->fd = fd; + ((hashstate_t *) (sio->fsstate))->flags.writing = 0; + ((hashstate_t *) (sio->fsstate))->flags.reading = 0; + ((hashstate_t *) (sio->fsstate))->flags.close_request = 0; + if (fstat(fd, &sb) == 0) + sio->st_size = sb.st_size; + store_open_disk_fd++; + + /* We should update the heap/dlink position here ! */ + return sio; +} + +storeIOState * +storeHashCreate(SwapDir * SD, StoreEntry * e, STFNCB * file_callback, STIOCB * callback, void *callback_data) +{ + storeIOState *sio; + int fd; + int mode = (O_WRONLY | O_CREAT | O_TRUNC); + char *path; + hashinfo_t *hashinfo = (hashinfo_t *) SD->fsdata; + sfileno filn; + sdirno dirn; + + /* Allocate a number */ + dirn = SD->index; + filn = storeHashDirMapBitAllocate(SD); + hashinfo->suggest = filn + 1; + /* Shouldn't we handle a 'bitmap full' error here? */ + path = storeHashDirFullPath(SD, filn, NULL); + + debug(79, 3) ("storeHashCreate: fileno %08X\n", filn); + fd = file_open(path, mode); + if (fd < 0) { + debug(79, 3) ("storeHashCreate: got failure (%d)\n", errno); + return NULL; + } + debug(79, 3) ("storeHashCreate: opened FD %d\n", fd); + sio = memAllocate(MEM_STORE_IO); + cbdataAdd(sio, storeHashIOFreeEntry, MEM_STORE_IO); + sio->fsstate = memPoolAlloc(hash_state_pool); + + sio->swap_filen = filn; + sio->swap_dirn = dirn; + sio->mode = mode; + sio->callback = callback; + sio->callback_data = callback_data; + cbdataLock(callback_data); + sio->e = (StoreEntry *) e; + ((hashstate_t *) (sio->fsstate))->fd = fd; + ((hashstate_t *) (sio->fsstate))->flags.writing = 0; + ((hashstate_t *) (sio->fsstate))->flags.reading = 0; + ((hashstate_t *) (sio->fsstate))->flags.close_request = 0; + store_open_disk_fd++; + + /* now insert into the replacement policy */ + storeHashDirReplAdd(SD, e); + return sio; +} + +void +storeHashClose(SwapDir * SD, storeIOState * sio) +{ + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + + debug(79, 3) ("storeHashClose: dirno %d, fileno %08X, FD %d\n", + sio->swap_dirn, sio->swap_filen, hashstate->fd); + if (hashstate->flags.reading || hashstate->flags.writing) { + hashstate->flags.close_request = 1; + return; + } + storeHashIOCallback(sio, 0); +} + +void +storeHashRead(SwapDir * SD, storeIOState * sio, char *buf, size_t size, off_t offset, STRCB * callback, void *callback_data) +{ + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + + assert(sio->read.callback == NULL); + assert(sio->read.callback_data == NULL); + sio->read.callback = callback; + sio->read.callback_data = callback_data; + cbdataLock(callback_data); + debug(79, 3) ("storeHashRead: dirno %d, fileno %08X, FD %d\n", + sio->swap_dirn, sio->swap_filen, hashstate->fd); + sio->offset = offset; + hashstate->flags.reading = 1; + file_read(hashstate->fd, + buf, + size, + offset, + storeHashReadDone, + sio); +} + +void +storeHashWrite(SwapDir * SD, storeIOState * sio, char *buf, size_t size, off_t offset, FREE * free_func) +{ + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + debug(79, 3) ("storeHashWrite: dirn %d, fileno %08X, FD %d\n", sio->swap_dirn, sio->swap_filen, hashstate->fd); + hashstate->flags.writing = 1; + file_write(hashstate->fd, + offset, + buf, + size, + storeHashWriteDone, + sio, + free_func); +} + +void +storeHashUnlink(SwapDir * SD, StoreEntry * e) +{ + debug(79, 3) ("storeHashUnlink: fileno %08X\n", e->swap_filen); + storeHashDirReplRemove(e); + storeHashDirMapBitReset(SD, e->swap_filen); + storeHashDirUnlinkFile(SD, e->swap_filen); +} + +/* === STATIC =========================================================== */ + +static void +storeHashReadDone(int fd, const char *buf, int len, int errflag, void *my_data) +{ + storeIOState *sio = my_data; + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + STRCB *callback = sio->read.callback; + void *their_data = sio->read.callback_data; + ssize_t rlen; + + debug(79, 3) ("storeHashReadDone: dirno %d, fileno %08X, FD %d, len %d\n", + sio->swap_dirn, sio->swap_filen, fd, len); + hashstate->flags.reading = 0; + if (errflag) { + debug(79, 3) ("storeHashReadDone: got failure (%d)\n", errflag); + rlen = -1; + } else { + rlen = (ssize_t) len; + sio->offset += len; + } + assert(callback); + assert(their_data); + sio->read.callback = NULL; + sio->read.callback_data = NULL; + if (cbdataValid(their_data)) + callback(their_data, buf, (size_t) rlen); + cbdataUnlock(their_data); +} + +static void +storeHashWriteDone(int fd, int errflag, size_t len, void *my_data) +{ + storeIOState *sio = my_data; + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + debug(79, 3) ("storeHashWriteDone: dirno %d, fileno %08X, FD %d, len %d\n", + sio->swap_dirn, sio->swap_filen, fd, len); + hashstate->flags.writing = 0; + if (errflag) { + debug(79, 0) ("storeHashWriteDone: got failure (%d)\n", errflag); + storeHashIOCallback(sio, errflag); + return; + } + sio->offset += len; + if (hashstate->flags.close_request) + storeHashIOCallback(sio, errflag); +} + +static void +storeHashIOCallback(storeIOState * sio, int errflag) +{ + hashstate_t *hashstate = (hashstate_t *) sio->fsstate; + debug(79, 3) ("storeHashIOCallback: errflag=%d\n", errflag); + if (hashstate->fd > -1) { + file_close(hashstate->fd); + store_open_disk_fd--; + } + if (cbdataValid(sio->callback_data)) + sio->callback(sio->callback_data, errflag, sio); + cbdataUnlock(sio->callback_data); + sio->callback_data = NULL; + sio->callback = NULL; + cbdataFree(sio); +} + + +/* + * We can't pass memFree() as a free function here, because we need to free + * the fsstate variable .. + */ +static void +storeHashIOFreeEntry(void *sio, int foo) +{ + memPoolFree(hash_state_pool, ((storeIOState *) sio)->fsstate); + memFree(sio, MEM_STORE_IO); +}