--------------------- PatchSet 3337 Date: 2001/11/20 06:48:42 Author: jkay Branch: push Tag: (none) Log: Hint cache handling of on-disk hint cache. Members: src/HintCacheDisk.c:1.1->1.1.2.1 --- /dev/null Wed Feb 14 00:55:47 2007 +++ squid/src/HintCacheDisk.c Wed Feb 14 00:56:31 2007 @@ -0,0 +1,828 @@ +/* + * $Date: 2001/11/20 06:48:42 $ $Id: HintCacheDisk.c,v 1.1.2.1 2001/11/20 06:48:42 jkay Exp $ + * + * AUTHOR: Mike Dahlin, UT Austin, 1998 + * DEBUG: section 82 On-disk hint cache + * + * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ + * -------------------------------------------------------- + * + * Squid is the result of efforts by numerous individuals from the + * Internet community. Development is led by Duane Wessels of the + * National Laboratory for Applied Network Research and funded by + * the National Science Foundation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +/* + *------------------------------------------------------------------ + * + * LESS Group + * Computer Sciences Department + * University of Texas at Austin + * + * HintCacheDisk --- + * The on-disk cache of hints. The key data structure + * is an mmapped array which is treated as a - + * associative cache. + * + *------------------------------------------------------------------ + */ +#include "squid.h" + +#define HCDISK_DEBUG 82 + +#define BIG_ENOUGH 1024 + +static const char *suffix = ".hints"; + +static int readLine(int fd, char *buffer, int max); +static void initializeFile(int dataFD, int nbuckets, int associativity); +static int findKey(HintCacheDisk *d, int b, URLKey key, + HintCacheDiskEntry *foundRet); +static int deleteKey(HintCacheDisk *d, int b, HintCacheDiskEntry *oldEntry, + HintCacheDiskEntry *survivingEntry); +static void insertKey(HintCacheDisk *d, int b, HintCacheDiskEntry *newEntry); +#ifdef MADV_RANDOM +static caddr_t pageAddr(caddr_t e); +#endif +static void sanityCheckMatch(HintCacheDisk *d, HintCacheDiskEntry *bucketp, + HintCacheDiskEntry *entry, + int expectFullMatch, int expectKeyMatch); +static int bucket(HintCacheDisk *d, URLKey key); +static HintCacheDiskEntry *readBucket(HintCacheDisk *d, int b); +static int writeBucket(HintCacheDisk *d, int b, HintCacheDiskEntry *bucketp); +#ifdef DOTEST +static void timeMadvise(); +#endif + +static long pageSize; + +/* + * Should we try to keep stuff we just read or wrote in the cache. + */ +/* + * You may not want to cache writes since they may or may not + * be about stuff you will later read. On the other hand, there + * may be temporal locality about stuff that gets updated -- + * I may be likely to read or discard the object in the near + * future. Also, telling system to kick it out of cache costs + * about 1ms per call. + */ +static const int CACHE_RECENT_WRITES = 1; +/* + * You may not want to cache reads since these are things + * that you will now have in your cache (so why would + * you look at the hints any time soon. On the other hand, + * telling system to kick out reads costs about 1ms per call + */ +static const int CACHE_RECENT_READS = 0; +/* + * You may not want to prefetch at all (we can only prefetch + * on network updates, and those are not on critical + * path anywhere. Maybe better to sacrifice latency for + * reduced overhead. + * + * No point int trying to pipeline if there are not many + * updates to pipeline. Otherwise the overhead of the + * system calls outweighs the reduced latency for disk + * reads. In the best case, madvise (tight loop of madvise + * about same location), madvise appears to take about 1ms per call. + */ +static const int DO_PREFETCH = 0; + +/* HintCacheDisk constructor */ +void +hintCacheDiskInit(HintCacheDisk *d, char *diskPath) +{ + int header; + int dataF; + char buffer[BIG_ENOUGH], token[BIG_ENOUGH], dataFile[BIG_ENOUGH]; + int version; + int size, buckets, associativity; + int gotVersion = 0, gotData = 0, gotAssoc = 0, + gotSize = 0, gotBuckets = 0; + + if((pageSize = sysconf(_SC_PAGESIZE)) < 0){ + perror("Cannot determine page size\n"); + exit(-1); + } + + header = open(diskPath, O_RDONLY); + if(header < 0){ + debug(HCDISK_DEBUG, 1) + ("warning: Can\'t open %s, hint caching disabled\n", diskPath); + HCDisk = NULL; + return; + } + + while(readLine(header, buffer, BIG_ENOUGH)){ + if(buffer[0] != '#'){ + sscanf(buffer, "%s", token); + if(!strcmp(token, "VERSION")){ + gotVersion = sscanf(buffer, "VERSION %d\n", &version); + assert(gotVersion == 1); + assert(version == HCD_VERSION); + } + else if(!strcmp(token, "DATA_FILE")){ + gotData = sscanf(buffer, "DATA_FILE %s\n", dataFile); + assert(gotData == 1); + } + else if(!strcmp(token, "SIZE_BYTES")){ + gotSize = sscanf(buffer, "SIZE_BYTES %d\n", &size); + assert(gotSize == 1); + } + else if(!strcmp(token, "BUCKETS")){ + gotBuckets = sscanf(buffer, "BUCKETS %d\n", &buckets); + assert(gotBuckets == 1); + } + else if(!strcmp(token, "ASSOCIATIVITY")){ + gotAssoc = sscanf(buffer, "ASSOCIATIVITY %d\n", &associativity); + assert(gotAssoc == 1); + } + else{ + /* Ignore what we don't understand */ + } + } + } + close(header); + assert(gotVersion && gotData && gotAssoc && gotSize && gotBuckets); + assert(size == buckets * associativity * sizeof(HintCacheDiskEntry)); + d->nbuckets = buckets; + d->entriesPerBucket = associativity; + dataF = open(dataFile, O_RDWR); + if(dataF < 0){ + debug(HCDISK_DEBUG, 1) + ("warning: Can\'t open %s, hint caching disabled\n", dataFile); + HCDisk = NULL; + return; + } + if (Config.Hints.usemmap) { + d->mmappedArray = (HintCacheDiskEntry *)mmap(0, size, PROT_READ|PROT_WRITE, + MAP_SHARED, dataF, 0); + if(!d->mmappedArray){ + debug(HCDISK_DEBUG, 1) + ("warning: Can\'t mmap %s, hint caching disabled\n", dataFile); + HCDisk = NULL; + return; + } +#ifdef MADV_RANDOM + if(madvise((char *)d->mmappedArray, size, MADV_RANDOM)){ + debug(HCDISK_DEBUG, 1) + ("warning: madvise on hint cache data failed"); + } +#endif + close(dataF); + d->fd = -1; + } + else { + d->fd = dataF; + } +} + +/* + * Read one line of input from file. We really want + * to use fgets(), but the fopen() etc. routines + * don't work if you have large numbers of + * files open (60 or 255 are the max for some implementations). + * So, we use the basic open/read/close routines + * instead, which let us open up to the max number + * of allowed file descriptors. + * + * Store a line of text ending with \n\0 in buffer + * and return nonzero if we succeeded in getting + * a well-formed line. + */ +static int +readLine(int fd, char *buffer, int max) +{ + ssize_t got; + int count = 0; + + assert(buffer); + assert(fd >= 0); + assert(fd <= 10000); + assert(max > 2); /* need room for \n\0 */ + + while(count < max-2){ + got = read(fd, &buffer[count], 1); + if(got != 1){ + return 0; + } + if(buffer[count] == '\n'){ + buffer[count+1] = '\0'; + return 1; + } + count++; + } + return 0; +} + +/* + * Does the hard work of setting up a new empty file. + * + * Note: we use raw open() and write() rather than + * fopen() and fwrite() because the f-routines + * get unhappy in programs that open lots of files + * (they often die if you have more than 60 or 255 + * files open, even if you are allowed to + * have more open file descriptors than that). + */ +void +hintCacheDiskCreateFile(char *hcPath, int size, int associativity) +{ + char *dataPath; + int buckets; + int headerF, dataF; + char buffer[BIG_ENOUGH]; + int len; + + len = strlen(hcPath); + dataPath = (char *)xmalloc(len + strlen(suffix) + 1); + assert(dataPath); + snprintf(dataPath, len, "%s%s", hcPath, suffix); + + buckets = (size / associativity) / sizeof(HintCacheDiskEntry); + assert(buckets * associativity * sizeof(HintCacheDiskEntry) <= size); + size = buckets * associativity * sizeof(HintCacheDiskEntry); + + headerF = open(hcPath, O_WRONLY|O_CREAT|O_TRUNC, 0744); + if(headerF < 0){ + perror("Opening Hint Cache header"); + exit(-1); + } + + snprintf(buffer, BIG_ENOUGH, + "# *** Header for hint cache. Do not modify ***\n"); + write(headerF, buffer, strlen(buffer)); + snprintf(buffer, BIG_ENOUGH, "VERSION %d\n", HCD_VERSION); + write(headerF, buffer, strlen(buffer)); + snprintf(buffer, BIG_ENOUGH, "DATA_FILE %s\n", dataPath); + write(headerF, buffer, strlen(buffer)); + snprintf(buffer, BIG_ENOUGH, "SIZE_BYTES %d\n", size); + write(headerF, buffer, strlen(buffer)); + snprintf(buffer, BIG_ENOUGH, "BUCKETS %d\n", buckets); + write(headerF, buffer, strlen(buffer)); + snprintf(buffer, BIG_ENOUGH, "ASSOCIATIVITY %d\n", associativity); + write(headerF, buffer, strlen(buffer)); + + close(headerF); + + /* Open data file*/ + dataF = open(dataPath, O_WRONLY|O_CREAT|O_TRUNC, 0744); + if(dataF < 0){ + perror("Opening Hint Cache data"); + exit(-1); + } + + /* Need to pre-extend file to 'size' */ + lseek(dataF, size - 1, 0); + write(dataF, "\0", 1); + lseek(dataF, 0, 0); + + initializeFile(dataF, buckets, associativity); + close(dataF); + xfree(dataPath); +} + +/* Set a file to empty by invalidating all entries. */ +static void +initializeFile(int fd, int nbuckets, int associativity) +{ + HintCacheDiskEntry invalidEntry; + int ibucket, ientry; + int count; + + HCE_INVALIDATE(&invalidEntry.entry); + + for(ibucket = 0; ibucket < nbuckets; ibucket++){ + for(ientry = 0; ientry < associativity; ientry++){ + count = write(fd, &invalidEntry, sizeof(HintCacheDiskEntry)); + if(count != sizeof(HintCacheDiskEntry)){ + perror("ERROR Creating HintCacheCache file"); + exit(-1); + } + } + } +} + +/* Unmap the file. */ +void hintCacheDiskClose(HintCacheDisk *d) +{ + munmap((char *)d->mmappedArray, d->nbuckets * d->entriesPerBucket); + d->mmappedArray = NULL; +} + +/* Update data structures to reflect the fact a copy is cached locally. */ +void hintCacheDiskInformLocal(HintCacheDisk *d, URLKey key, unsigned mtime) +{ + int b; + HintCacheDiskEntry new, dummyRet, old; + int foundOld; + + assert(d); + + if(!URLKEY_COMPARE(key, INVALID_URL_KEY)){ + return; + } + + b = bucket(d, key); + foundOld = findKey(d, b, key, &old); + if(foundOld){ + /* Delete old instead of replace to support multiversioning */ + deleteKey(d, b, &old, &dummyRet); + } + hintCacheEntryInit(&new.entry, key, &Config.Sockaddr.http->s, mtime); + new.rtime = squid_curtime; + insertKey(d, b, &new); +#ifdef MADV_RANDOM + if(!CACHE_RECENT_WRITES){ + if(madvise(pageAddr((caddr_t) (d->mmappedArray + b)), + pageSize, MADV_DONTNEED)){ + debug(HCDISK_DEBUG, 1) ("madvise DONTNEED failed\n"); + } + } +#endif +} + + + +/* Update local data structures to reflect the fact + * that we no longer have a copy locally. */ +void hintCacheDiskInvalLocal(HintCacheDisk *d, URLKey key, + unsigned mtime) +{ + HintCacheDiskEntry myent, dummyRet; + int b; + + debug(HCDISK_DEBUG, 2) ("HintCacheDiskinvalLocalCopy: deleting %ll\n", key.key); + + if(!URLKEY_COMPARE(key, INVALID_URL_KEY)){ + return; + } + + hintCacheEntryInit(&myent.entry, key, &Config.Sockaddr.http->s, mtime); + myent.rtime = squid_curtime; + b = bucket(d, key); + deleteKey(d, b, &myent, &dummyRet); +#ifdef MADV_RANDOM + if (Config.Hints.usemmap) { + if(madvise(pageAddr((caddr_t)(d->mmappedArray + b)), + pageSize, MADV_DONTNEED)){ + debug(HCDISK_DEBUG, 1) ("Madvise DONTNEED failed\n"); + } + } +#endif +} + +/* Warn vm system that certain pages will be needed. These pages + * hold or will hold object mentioned in this hint update. */ +void +hintCacheDiskprefetch(HintCacheDisk *d, + HintCacheUpdate *uArray, + int nupdates) +{ +#ifdef MADV_RANDOM + int b; + int ii; + if(DO_PREFETCH && Config.Hints.usemmap) { + for(ii = 0; ii < nupdates; ii++){ + if(uArray[ii].action == HC_InvalToParent || + uArray[ii].action == HC_InvalToChild || + uArray[ii].action == HC_InformToParent || + uArray[ii].action == HC_InformToChild){ + b = bucket(d, uArray[ii].entry.key); + if(madvise(pageAddr((caddr_t) (d->mmappedArray + b)), + pageSize, MADV_WILLNEED)){ + debug(HCDISK_DEBUG, 1) ("madvise WILLNEED failed\n"); + } + } + } + } +#endif +} + +/* + * The network told us that is + * no longer a valid mapping. If we knew of a + * different copy than the one that was invalidated + * then report that back by returning 1 and putting + * a copy of the survivor in survivor. + * + * The function takes HCEntrys as args rather than + * HintCacheDiskEntrys because it is called exclusively + * from outside HintCacheDisk.c. + */ +int +hintCacheDisknetInvalRecord(HintCacheDisk *d, + HintCacheEntry *entry, + HintCacheEntry *survivor) +{ + HintCacheDiskEntry dentry, dsurv; + int remainingCopy; + int b; + + if(!HCE_VALID(entry)) { + survivor->key.key = 0; + return 0; + } + + dentry.entry = *entry; + dentry.rtime = squid_curtime; + + b = bucket(d, entry->key); + remainingCopy = deleteKey(d, b, &dentry, &dsurv); + *survivor = dsurv.entry; +#ifdef MADV_RANDOM + if (Config.Hints.usemmap) + if(madvise(pageAddr((caddr_t)(d->mmappedArray + b)), + pageSize, MADV_DONTNEED)){ + debug(HCDISK_DEBUG, 1) ("madvise DONTNEED failed\n"); + } +#endif + + return remainingCopy; +} + +/* + * The network told us that is a valid + * mapping. Install it if it is better than the one + * we have. + * + * Takes HintCacheEntrys as args because it's exported. + */ +int +hintCacheDiskUpdateIfCloser(HintCacheDisk *d, HintCacheEntry *new) +{ + HintCacheDiskEntry old, dummyRet, dnew, *todelete; + URLKey key = new->key; + int dochange = 0; + StoreEntry *e; + int ret = 0; + int foundOld; + int b; + + if(!HCE_VALID(new)){ + return 0; + } + + dnew.entry = *new; + dnew.rtime = squid_curtime; + + b = bucket(d, key); + foundOld = findKey(d, b, key, &old); + if (!foundOld) { + insertKey(d, b, &dnew); + todelete = &dnew; + ret = 1; + } + else + todelete = &old; + + e = storeGet(key); + if (e && e->lastmod < new->mtime) { + /* This hint is about a new version. Invalidate the old version */ + storeRelease(e); + dochange = 1; + } + else if (foundOld && squid_curtime >= old.rtime + Config.Hints.holddown) { + /* Don't propagate hints about the same version more often + that once every fifteen minutes. */ + dochange = 1; + } + else if (hintCacheHierCompareDistance(new->ipaddr, old.entry.ipaddr) < 0) { + /* This hint is for cache closer than what we got */ + dochange = 1; + } + + if (dochange) { + deleteKey(d, b, todelete, &dummyRet); + insertKey(d, b, &dnew); + ret = 1; + } +#ifdef MADV_RANDOM + if(!CACHE_RECENT_WRITES && Config.Hints.usemmap){ + if(madvise(pageAddr((caddr_t)(d->mmappedArray + b)), + pageSize, MADV_DONTNEED)){ + perror("Madvise DONTNEED failed\n"); + } + } +#endif + return ret; +} + +/* + * Find the record for the key if one exists. Return 1 + * if matching record found and a copy in *match. + * Return 0 if no match found. + * + * Note: we can't just return a pointer to match + * because its location in the bucket can change + * at any time. + * + * Takes HintCacheEntry as arg because it is exported. + */ +int +hintCacheDiskFindNearest(HintCacheDisk *d, URLKey key, + HintCacheEntry *match) +{ + int b; + int found; + HintCacheDiskEntry dent; + + assert(match != 0); + if(!URLKEY_COMPARE(key, INVALID_URL_KEY)){ + return 0; + } + + b = bucket(d, key); + found = findKey(d, b, key, &dent); + if (found) + *match = dent.entry; +#ifdef MADV_RANDOM + if(!CACHE_RECENT_READS && Config.Hints.usemmap){ + if(madvise(pageAddr((caddr_t)(d->mmappedArray + b)), + pageSize, MADV_DONTNEED)){ + perror("Madvise DONTNEED failed\n"); + } + } +#endif + return found; +} + + +/* Return pointer to first entry in bucket for key. */ +static int +bucket(HintCacheDisk *d, URLKey key) +{ + int b; + + b = key.key % d->nbuckets; + return b * d->entriesPerBucket; +} + +#ifdef MADV_RANDOM +/* + * Return the address of the first byte of the + * page that holds an address. + */ +static caddr_t +pageAddr(caddr_t e) +{ + caddr_t ret; + ret = e - ((unsigned)e % pageSize); + return ret; +} +#endif /* MADV_RANDOM */ + +/* + * Delete the record associated with the specified + * pair. + * + * If there is a record for the key that survives the + * delete (e.g. the node doesn't match) return it + * in the HintCacheEntry *surviver field and return 1. If + * no record for the key survives the delete, return 0. + * + * Note: we can't just return a pointer to survivor + * because its location in the bucket can change + * at any time. + */ +static int +deleteKey(HintCacheDisk *d, int b, HintCacheDiskEntry *entry, HintCacheDiskEntry *survivor) +{ + HintCacheDiskEntry *bucketp = d->mmappedArray + b; + int ii; + + assert(survivor != NULL); + assert(entry != survivor); + assert(HintCacheE_VALID(&entry->entry)); + + bucketp = readBucket(d, b); + for(ii = 0; ii < d->entriesPerBucket; ii++){ + if(!URLKEY_COMPARE(bucketp[ii].entry.key, entry->entry.key)){ + if(!HCEntry_Compare(&entry->entry, &bucketp[ii].entry)){ + HCE_INVALIDATE(&bucketp[ii].entry); + sanityCheckMatch(d, bucketp, entry, 0, 0); + break; + } + else{ + /* + * Matched key but not NodeId. + */ + *survivor = bucketp[ii]; + sanityCheckMatch(d, bucketp, entry, 0, 1); + sanityCheckMatch(d, bucketp, survivor, 1, 1); + return 1; + } + } + } + writeBucket(d, b, bucketp); + /* + * No match key + */ + return 0; +} + + +/* + * Insert an entry in a given bucket in hash table. + * The table is n-way associative. The new entry + * is always inserted into entry 0, and the remaining + * entries are shifted up. If there is an empty + * entry, we don't have to shift any higher entries. + * If all entries are full, entry d->entriesPerBucket - 1 + * (the last entry) is lost. + */ +static void insertKey(HintCacheDisk *d, int b, HintCacheDiskEntry *entry) +{ + HintCacheDiskEntry *bucketp; + int from, to; + + /* Get bucket */ + bucketp = readBucket(d, b); + + /* + * Caller responsible for deleting old entry before inserting + * a new one. + */ + sanityCheckMatch(d, bucketp, entry, 0, 0); + + /* + * First, see if there are any empty entries. All entries + * below the first empty one get shifted up to make + * room in entry 0. The last entry by definition is + * emtpy if all earlier ones are empty (you are always + * allowed to shift over the last entry.) + */ + for(to = 0; to < d->entriesPerBucket - 1; to++){ + if(!HCE_VALID(&bucketp[to].entry)){ + break; + } + } + assert(to <= d->entriesPerBucket -1 ); + assert(!HCE_VALID(&bucketp[to].entry) || (to == d->entriesPerBucket - 1)); + + /* + * Shift entry up to entry to make entry 0 empty. + * The last bucket we shift is entry (from==entriesPerBucket-2) + * which shifts into the last bucket (entriesPerBucket-1). + * Don't go any farther, or we clobber the next bucket. + */ + for(; to > 0; to--){ + from = to - 1; + assert(from >= 0); + bucketp[to] = bucketp[from]; + } + bucketp[0] = *entry; + sanityCheckMatch(d, bucketp, entry, 1, 1); + writeBucket(d, b, bucketp); +} + +/* + * Check to see that the expected number of entries in bucket + * match specified key. + */ +static void +sanityCheckMatch(HintCacheDisk *d, HintCacheDiskEntry *bucketp, HintCacheDiskEntry *entry, + int expectFullMatch, int expectKeyMatch) +{ + int ii; + int fullMatch = 0, keyMatch = 0; + + for(ii = 0; ii < d->entriesPerBucket; ii++){ + if(!hintCacheEntryCompare(&bucketp[ii].entry, &entry->entry)){ + fullMatch++; + } + if(!URLKEY_COMPARE(bucketp[ii].entry.key, entry->entry.key)){ + keyMatch++; + } + } + assert(fullMatch == expectFullMatch); + assert(keyMatch == expectKeyMatch); +} + +/* + * Find an entry that matches the key. Return + * true if found and put the result in foundRet. + * Since this is a read, + * update the LRU list by removing key and + * then re-inserting key. + * + * Note: we can't just return a pointer to copy + * because its location in the bucket can change + * at any time. + */ +static int +findKey(HintCacheDisk *d, int b, URLKey key, HintCacheDiskEntry *copy) +{ + HintCacheDiskEntry *bucketp; + int ii; + + bucketp = readBucket(d, b); + assert(copy != NULL); + assert(copy < bucketp || copy > &bucketp[d->entriesPerBucket-1]); + assert(URLKEY_COMPARE(key, INVALID_URL_KEY)); + for(ii = 0; ii < d->entriesPerBucket; ii++){ + if(!URLKEY_COMPARE(bucketp[ii].entry.key, key)){ + *copy = bucketp[ii]; + assert(!URLKEY_COMPARE(copy->entry.key, key)); + if(ii != 0){ + /* + * Move to front of LRU list by deleting entry and inserting + */ + HCE_INVALIDATE(&bucketp[ii].entry); + writeBucket(d, b, &bucketp[ii]); + insertKey(d, b, copy); + } + return 1; + } + } + HCE_INVALIDATE(©->entry); + return 0; +} + +static HintCacheDiskEntry * +readBucket(HintCacheDisk *d, int b) +{ + static HintCacheDiskEntry *bucketp = 0; + int bucketlen; + + /* If using mmap, just return the pointer */ + if (Config.Hints.usemmap) + return(d->mmappedArray + b); + + /* Get a static buffer to put stuff in */ + bucketlen = sizeof(HintCacheDiskEntry) * d->entriesPerBucket; + if (bucketp == 0) { + bucketp = (HintCacheDiskEntry *) xmalloc(bucketlen); + } + + /* Fetch hint from disk by Unix I/O */ + if (lseek(d->fd, b * sizeof(HintCacheDiskEntry), 0) == -1) { + debug(HCDISK_DEBUG, 1) ("warning: can't seek to hint at offset %d\n", + b * sizeof(HintCacheDiskEntry)); + return 0; + } + if (read(d->fd, bucketp, bucketlen) == -1) { + debug(HCDISK_DEBUG, 1) + ("warning: can't read hint at offset %d\n", + b * sizeof(HintCacheDiskEntry)); + return 0; + } + + return bucketp; +} + + +static int +writeBucket(HintCacheDisk *d, int b, HintCacheDiskEntry *bucketp) +{ + /* If using mmap, no action is necessary - model is that + you modify stuff via direct pointer you got from read */ + if (Config.Hints.usemmap) + return 0; + + /* Write bucket to disk by Unix I/O */ + if (lseek(d->fd, b * sizeof(HintCacheDiskEntry), 0) == -1) { + debug(HCDISK_DEBUG, 1) ("warning: can't seek to hint at offset %d\n", + b * sizeof(HintCacheDiskEntry)); + return -1; + } + if (write(d->fd, bucketp, sizeof(HintCacheDiskEntry) * d->entriesPerBucket) == -1) { + debug(HCDISK_DEBUG, 1) ("warning: can't write hint at offset %d\n", + b * sizeof(HintCacheDiskEntry)); + return -1; + } + + return 0; +} + +void +hcdiskDeleteHintCache() +{ + char hcname[256]; + + if (!Config.Hints.cachefile[0]) + return; + + unlink(Config.Hints.cachefile); + strcpy(hcname, Config.Hints.cachefile); + strcat(hcname, ".theCache"); + unlink(hcname); +} + +