Exemplo n.º 1
0
  struct addrinfo *addToCache(
      
    struct addrinfo *pAddr,
    const char      *host)

    {
    if ((pAddr->ai_family != AF_INET) ||
        (cacheDestroyed == TRUE))
      return(NULL);

    struct addrinfo    *pTmpAddr = NULL;
    char                key[65];

    pTmpAddr = in_cache(pAddr, key);

    if (pTmpAddr != NULL)
      {
      if (pTmpAddr != pAddr)
        freeaddrinfo(pAddr);

      return(pTmpAddr);
      }

    pthread_mutex_lock(&cacheMutex);

    int   index = addrs.size();

    try
      {
      addrs.push_back(pAddr);
      }
    catch(...)
      {
      pthread_mutex_unlock(&cacheMutex);
      return(NULL);
      }

    try
      {
      hosts.push_back(host);
      }
    catch(...)
      {
      addrs.pop_back();
    
      pthread_mutex_unlock(&cacheMutex);

      return(NULL);
      }

    addrToName.lock();
    nameToAddr.lock();
    addrToName.insert(index, key);
    nameToAddr.insert(index, host);
    nameToAddr.unlock();
    addrToName.unlock();

    pthread_mutex_unlock(&cacheMutex);
    return pAddr;
    }
//----------------------------------------------------------------
bool Cache::read_block(Block block,int index, Cacheable *rt)
{
	int c_ind;

	index++;	// Externe Num. --> interne Num.
	if(index <= rt->file->get_num_of_blocks() && index>0)
	{
    	if((c_ind = in_cache(index,rt))>=0)
	   		memcpy(block, cache[c_ind], blocklength);
    	else // not in Cache
    	{
    	    page_faults++;
    		c_ind = next();
    		if (c_ind >= 0) // a block has been freed in cache
    		{
        		rt -> file -> read_block(cache[c_ind],index-1); // ext. Num.
        		cache_cont[c_ind] = index;
        		cache_tree[c_ind] = rt;
        		fuf_cont[c_ind] = used;
        		LRU_indicator[c_ind] = 0;
        		memcpy(block, cache[c_ind], blocklength);
    		}
    		else
    		    rt -> file -> read_block(block,index - 1); // read-through (ext.Num.)
    	}
    	return TRUE;
	}
	else
	{
		printf("Requested block %d is illegal.", index - 1);  error("\n", true);
    }

	return false;
}
Exemplo n.º 3
0
/*
 * Inserts the entry into the pre_cache or the cache.  Ensures the cache
 * block is marked as allocated if necc.  Inserts into the hash table.
 * Sets the tick which records when the entry was last moved about.
 */
static void push(struct mq_policy *mq, struct entry *e)
{
	e->tick = mq->tick;
	hash_insert(mq, e);

	if (in_cache(mq, e))
		queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
			   queue_level(e), &e->list);
	else
		queue_push(&mq->pre_cache, queue_level(e), &e->list);
}
Exemplo n.º 4
0
Node *
canonical(Node *n)
{	Node *m;	/* assumes input is right_linked */

	if (!n) return n;
	if ((m = in_cache(n)) != ZN)
		return m;

	n->rgt = canonical(n->rgt);
	n->lft = canonical(n->lft);

	return cached(n);
}
Exemplo n.º 5
0
/* assumes input is right_linked */
Node *canonical(Node *n, Miscellaneous *miscell, int *cnt, char *uform , int *tl_yychar)
{	
	Node *m;	

	if (!n) 
		return n;

	if (m = in_cache(n, cnt, uform, tl_yychar, miscell))
		return m;

	n->rgt = canonical(n->rgt, miscell, cnt, uform, tl_yychar);
	n->lft = canonical(n->lft, miscell, cnt, uform, tl_yychar);

	return cached(n, miscell, cnt, uform, tl_yychar);
}
Exemplo n.º 6
0
/* assumes input is right_linked */
Node *canonical(Node *n)
{	
	Node *m;	

	if (!n) 
		return n;

	if (m = in_cache(n))
		return m;

	n->rgt = canonical(n->rgt);
	n->lft = canonical(n->lft);

	return cached(n);
}
Exemplo n.º 7
0
int main(int argc, char* argv[]) {
  int low, up;
  long long result = 0;
  for (low = 1; low < 10000; low++) {
    int sum = low * low;
    for (up = low + 1; up < 10000; up++) {
      sum += up * up;
      if (sum > 10000 * 10000) break;
      if (!in_cache(sum) && is_palindromic(sum)) {
        add_cache(sum);
        result += sum;
      }
    }
  }
  printf("%lli\n", result);
  return 0;
}
Exemplo n.º 8
0
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
{
  int n, pos;
  struct peer_cache *new_cache;
  uint8_t *meta;

  if (c1->metadata_size != c2->metadata_size) {
    return NULL;
  }

  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
  if (new_cache == NULL) {
    return NULL;
  }

  meta = new_cache->metadata;

  for (n = 0; n < c1->current_size; n++) {
    if (new_cache->metadata_size) {
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
      meta += new_cache->metadata_size;
    }
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
    c1->entries[n].id = NULL;
  }
  
  for (n = 0; n < c2->current_size; n++) {
    pos = in_cache(new_cache, &c2->entries[n]);
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
    }
    if (pos < 0) {
      if (new_cache->metadata_size) {
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
        meta += new_cache->metadata_size;
      }
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
      c2->entries[n].id = NULL;
    }
  }
  *size = new_cache->current_size;

  return new_cache;
}
//----------------------------------------------------------------
bool Cache::write_block(Block block, int index, Cacheable *rt)
{
	int c_ind;

	index++;	// Externe Num. --> interne Num.
	if(index <= rt->file->get_num_of_blocks() && index > 0)
	{
    	c_ind = in_cache(index, rt);
    	if(c_ind >= 0)	
		{
			memcpy(cache[c_ind], block, blocklength);
			dirty_indicator[c_ind] = true;
		}
    	else		// not in Cache
    	{
    		c_ind = next();
    		if (c_ind >= 0)
    		{
        		memcpy(cache[c_ind],block,blocklength);
        		cache_cont[c_ind] = index;
        		cache_tree[c_ind] = rt;
        		fuf_cont[c_ind] = used;
        		LRU_indicator[c_ind] = 0;
				dirty_indicator[c_ind] = true;
    		}
    		else
			{
		  		rt -> file -> write_block(block,index - 1);
				//*****line added by TAO Yufei*****
				page_faults++;
				//*********************************
			}
	    }
	    return TRUE;
	}
	else
	{
		printf("Requested block %d is illegal.", index - 1);  error("\n", true);
    }

	return false;
}
Exemplo n.º 10
0
// Try to get the sector in cache.
// If it's in cache, get this cache, otherwise create a new cache.
struct cache_block *
cache_get(block_sector_t sector, bool dirty)
{
  struct cache_block *cb = in_cache(sector);
  if (cb)
  {
    cb->access = true;
    cb->dirty |= dirty;
  }
  else
  {
    cb = cache_new();
    cb->sector_no = sector;
    cb->access = true;
    cb->dirty = dirty;
    block_read(fs_device, cb->sector_no, &cb->data);
  }
  //cache_read_ahead(sector);  //failed to read ahead
  return cb;
}
Exemplo n.º 11
0
Node *
cached(Node *n)
{	Cache *d;
	Node *m;

	if (!n) return n;
	if ((m = in_cache(n)) != ZN)
		return m;

	Caches++;
	d = (Cache *) tl_emalloc(sizeof(Cache));
	d->before = dupnode(n);
	d->after  = Canonical(n); /* n is released */

	if (ismatch(d->before, d->after))
	{	d->same = 1;
		releasenode(1, d->after);
		d->after = d->before;
	}
	d->nxt = stored;
	stored = d;
	return dupnode(d->after);
}
//----------------------------------------------------------------
bool Cache::fix_block(int index, Cacheable *rt)
{
	int c_ind;

	index++;	// Externe Num. --> interne Num.
	if (index <= rt -> file -> get_num_of_blocks() && index>0)
	{
    	if((c_ind = in_cache(index, rt)) >= 0)
		{
			fuf_cont[c_ind] = fixed;
			return true;
		}
		/*
    	else		// nicht im C.
    		if((c_ind=next())>=0)	// im Cache was frei?
    		{
        		rt->file->read_block(cache[c_ind],index-1); // ext.Num.
        		cache_cont[c_ind]=index;
        		cache_tree[c_ind]=rt;
        		fuf_cont[c_ind]=fixed;
    		}
    		else	// kein Cache verfuegbar
    		    return FALSE;
		*/ /*lines commented by TAO Yufei.  this code will read a block from
		    the disk without incrementing the page_faults.  on the second hand,
			we choose not to fetch the page if it is not in memory*/
		else
			return false;
	}
	else
	{
		printf("Requested block %d is illegal.", index - 1);  error("\n", true);
    }

	return false;;
}
Exemplo n.º 13
0
/* $begin doit */
void doit(int fd_client)
{
    rio_t rio_client;
    rio_t rio_server;
    
    int fd_server=0;
    int is_cached=0;
    
    char buf[MAXLINE],method[MAXLINE],uri[MAXLINE],version[MAXLINE];
    char pathname[MAXLINE],hostname[MAXLINE],hdr_server[MAXLINE];
    char *port=(char*)malloc(sizeof(port));
    
    strcpy(port,PORT_DEFAULT);
    
    rio_readinitb(&rio_client,fd_client);
    rio_readlineb(&rio_client,buf,MAXLINE);
    
    sscanf(buf,"%s %s %s",method,uri,version);
    
    if (strcasecmp(method, "GET")) {
        clienterror(fd_client, method, "501", "Not Implemented",
                    "Tiny does not implement this method");
        return;
    }
    
    is_cached = in_cache(mycache, uri);
    if (is_cached) {
        printf("Cache hit\n");
        doit_cached(mycache, uri, fd_client);
    }
    
    else {
    printf("Cache miss\n");
    parse_uri(uri,pathname,hostname,port);
    
    
    fd_server=Open_clientfd(hostname,port);
    hdr_concat(&rio_client,hdr_server,hostname,pathname);
    // printf("%s",hdr_server);
    rio_writen(fd_server, hdr_server, strlen(hdr_server));
    rio_readinitb(&rio_server,fd_server);
    
    char buf_server[MAXLINE],file_buf[MAX_OBJECT_SIZE];
    int n=0;
    int size=0;
    memset(buf_server, 0, MAXLINE);
        
    while((n=rio_readlineb(&rio_server,buf_server,MAXLINE))!=0){
        if (size+n <= MAX_OBJECT_SIZE) {
            memcpy(file_buf+size, buf_server, n);
            size += n;
        }
        rio_writen(fd_client, buf_server, n);
        memset(buf_server, 0, n);
    }
        if (size <= MAX_OBJECT_SIZE) {
            insert(mycache, uri, file_buf, size);
        }
    close(fd_server);
    free(port);
    }
}
Exemplo n.º 14
0
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
{
  int n1, n2;
  struct peer_cache *new_cache;
  uint8_t *meta;

  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
  if (new_cache == NULL) {
    return NULL;
  }

  meta = new_cache->metadata;
  *source = 0;
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
      return new_cache;
    }
    if (n1 == c1->current_size) {
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
        if (new_cache->metadata_size) {
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
          meta += new_cache->metadata_size;
        }
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
        c2->entries[n2].id = NULL;
        *source |= 0x02;
      }
      n2++;
    } else if (n2 == c2->current_size) {
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
        if (new_cache->metadata_size) {
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
          meta += new_cache->metadata_size;
        }
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
        c1->entries[n1].id = NULL;
        *source |= 0x01;
      }
      n1++;
    } else {
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
          if (new_cache->metadata_size) {
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
            meta += new_cache->metadata_size;
          }
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
          c1->entries[n1].id = NULL;
          *source |= 0x01;
        }
        n1++;
      } else {
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
          if (new_cache->metadata_size) {
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
            meta += new_cache->metadata_size;
          }
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
          c2->entries[n2].id = NULL;
          *source |= 0x02;
        }
        n2++;
      }
    }
  }

  return new_cache;
}
Exemplo n.º 15
0
// ---------------------------------------------------------------------------
void __fastcall FtpControl::Execute()
{
	DWORD nopwait = 0;
	DWORD nopcmdtm = GetTickCount();
	DWORD secondtm = GetTickCount();
	NameThreadForDebugging("FtpControl");

	fdeb("FtpControl thread %x", GetCurrentThreadId());

	// Priority = tpIdle;

	fce = new TEvent(false);

	while (true)
	{
		Sleep(50);

		if (GetTickCount() - secondtm >= 1000)
		{
			secondtm = GetTickCount();
			call_events(E_SECOND, (LPVOID)rand());
		}
//
//		if (GetTickCount() - nopcmdtm >= nopwait)
//		{
//			nopwait = 23400 + (rand() % 13400);
//
//			if (!FtpSocket || processing_disk_event)
//				continue;
//
//			char *cmds[] =
//			{
//				"NOp", "fEAT", "NooP", "STAT", "HELP", "TYPE", "SYST", "PWD"
//			};
//
//			int num = rand() % 8;
//
//			ftplock();
//
//			ftpcmd(cmds[num], 0, 0);
//
//			char b[11111];
//			ftprl(b, timeoutst, 0);
//
//			while (ftphasdata())
//				ftprl(b, timeoutst, 0);
//
//			ftpunlock();
//
//			// ftpunlock();
//
//			nopcmdtm = GetTickCount();
//		 }

		if (!ccs)
			continue;

		ccs->Enter();

		for (vector<CACHE>::iterator it = cache.begin(); it != cache.end(); it++)
		{

			if (GetTickCount() - (*it).lastaccessed >= 10000)
			{
				try
				{
					if ((SIZE_T)HeapSize(GetProcessHeap(), 0, (*it).buf) != (SIZE_T) - 1)
					{
						// fdeb(E_CACHEMSG, "cache: freeing %7lu bytes (%7lu in records) of '%s' @ %08X",
						// HeapSize(GetProcessHeap(), 0, (*it).buf), (*it).size, (*it).fn, (*it).buf);

						int numcaches = ((int)cache.size()) - 1;
						unsigned long heapsiz;
						heapsiz = (unsigned long)HeapSize(GetProcessHeap(), 0, (*it).buf);

						HeapFree(GetProcessHeap(), 0, (*it).buf);
						it = cache.erase(it);

						call_events(E_CACHEFREED, (LPVOID)(*it).buf, (LPVOID)(*it).fn, (LPVOID)heapsiz,
							(LPVOID)(*it).offset, (LPVOID)numcaches);

						if (it == cache.end())
							break;
					}
					else
					{
						deb(E_FTPCONTROL, "cache block @ %p for '%s' size: %lu is not heap allocated!",
							(*it).buf, (*it).fn, (*it).size);
					}
				}
				catch(...)
				{
					exp;
				}
			}
			//
			// // decrease blocks
			// unsigned long maxcachebufsize = precacheminbytes; // 8 * 1024 * 1024;
			// if ((*it).bufsize > maxcachebufsize)
			// {
			// unsigned long newsiz = maxcachebufsize - 512000;
			// memcpy((*it).buf, (void*)((unsigned long)(*it).buf + (unsigned long)(*it).size -
			// (unsigned long)maxcachebufsize), newsiz);
			//
			// (*it).buf = HeapReAlloc(GetProcessHeap(), 0, (*it).buf, newsiz);
			// cache_list();
			// deb(E_CACHEMSG, "cache block %p truncated from %lu to %lu", (*it).buf, (*it).bufsize,
			// newsiz);
			//
			//
			// (*it).offset += ((*it).bufsize - newsiz);
			// if (!(*it).buf)
			// ds;
			// (*it).size = newsiz;
			// (*it).bufsize = newsiz;
			// }

		}
		ccs->Leave();

		// fdeb(E_CACHEMSG,"check for cache");

		files_sort_diskevent();

		int seqreads;
		seqreads = cache_seq_reads();

		for (vector<FATFILE>::iterator it = files.begin(); it != files.end(); it++)
		{

			if (GetTickCount() - (*it).lastaccessed >= 2000)
			{
				(*it).sequential_reads = 0;

			}

			if ((*it).lastlen && GetTickCount() - (*it).lastdisktime <= 1500)
			{
				// fdeb("checking %s", (*it).fn);
				unsigned long precachesize = 0;
				unsigned long readoffset = 0;
				unsigned long flen = 0;

				unsigned long stofs = 0; // (*it).lastoffset + (*it).lastlen;

				if (!(*it).sequential_reads)
				{
					// fdeb(E_CACHEMSG, "have another %lu bytes @ %lu", (*it).lastlen * 10,
					// stofs - (*it).lastoffset + (*it).lastlen);
					// fdeb( "skip sequential_read");
					continue;

				}
				else
				{
					// fdeb(E_CACHEMSG, "sequential_reads %lu, cache %lu @ %lu", (*it).sequential_reads,
					// (*it).nextcachelen, (*it).nextcacheoffset);

					if (in_cache((*it).dosfn, (*it).lastoffset, (*it).nextcachelen))
					{
						// fdeb("skip precache");
						continue;
					}
				}

				// stofs = highcacheoffset((*it).dosfn);
				// stofs = seqcacheoffset((*it).dosfn);
				stofs = (*it).nextcacheoffset;
				if (stofs <= 65535)
					stofs = 0;
				deb("nextcachelen=%lu", (*it).nextcachelen);
				flen = (*it).nextcachelen;
				if (!flen)
					flen = (*it).nextcachelen ;
				if (!flen)
					flen = 512000;
				(*it).nowcaching = true;
				(*it).nowcachingoffset = stofs;
				(*it).nowcachinglen = flen;
				strcpy(nowcachingdosfn, (*it).dosfn);
				// fdeb(E_CACHEMSG, "seqcacheoffset %lu", stofs);

				if (!ftptrylock())
					continue;

				char curdir[255];
				int code = 250;
				strncpy(curdir, ftpcurdir, sizeof(curdir));
				fdeb("allocating %lu bytes", flen);
				char *buf = (char*)HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, flen);
				fdeb("flen %lu, buf @ %p %lu", flen, buf, HeapSize(GetProcessHeap(), 0, buf));
				// ftpcs->Enter();
				// fdeb("

				if (strcmpi((*it).ftppath, ftpcurdir) != 0)
				{

					ftpcd("/");
					code = 250;

					if (strlen((*it).ftppath))
						code = ftpcd((*it).ftppath);

					if (code != 250)
					{
						ftpcd("/");
						// ecs->Leave();
						// processing_disk_event = false;
						// ftpcs->Leave();
						ftpunlock();
						continue;
					}

					strcpy(ftpcurdir, (*it).ftppath);
				}

				fdeb("caching %lu '%s' @ %lu  ", flen, (*it).fn, stofs);

				call_events(E_STARTPRECACHE, (LPVOID)(*it).fn, (LPVOID)flen, (LPVOID)stofs);

				fdeb("ftpgetfile(%-15s, %8lu, %8p, %8lu)", (*it).fn, (unsigned long)stofs, buf,
					(unsigned long)flen);

				call_events(E_FTPPREGETFILE, (LPVOID)(*it).ftppath, (LPVOID)(*it).fn, (LPVOID)stofs,
					(LPVOID)buf, (LPVOID)flen);

				DWORD ftpgettm = GetTickCount();

				unsigned long nrd = ftpgetfile((*it).fn, (unsigned long)stofs, (char*)buf, flen,
					(*it).dosfn, 65535, stofs);

				ftpunlock(); // ftpcs->Leave();

				HeapFree(GetProcessHeap(), 0, buf);

				(*it).nowcaching = false;

				ftpgettm = GetTickCount() - ftpgettm;

				// fdeb(E_CACHEMSG, "precacheclstrs [%lu]", (*it).precacheclstrs);
				fdeb("ftpgetfile(%-15s, %8lu, %8p, %8lu) = %lu", (*it).fn, (unsigned long)stofs, buf,
					(unsigned long)flen, nrd);

				call_events(E_FTPGETFILE, (LPVOID)(*it).ftppath, (LPVOID)(*it).fn, (LPVOID)stofs,
					(LPVOID)buf, (LPVOID)flen, (LPVOID)ftpcontroltime, (LPVOID)ftpdatatime,
					(LPVOID)nrd);

				if (nrd > 0)
				{
					// cache_save((*it).ftppath, (*it).dosfn, ((unsigned long)stofs), nrd, buf);

					(*it).nextcacheoffset = stofs + flen;
					// (*it).lastoffset = stofs;

				}
				else
				{
					(*it).lastlen = 0;
					fdeb("zero download %lu @ %lu fsize %lu!", flen, stofs, (*it).size);
					// ds;
				}

				nopcmdtm = GetTickCount();

			}
		}

	}
}
Exemplo n.º 16
0
/* Takes a request and forwards it to its destination server by opening a client
 * connection. Returns the response of the destination server.
 * This function frees memory allocated for the request also using free_req() 
 */
void forward_request(int fd, req_t request){
    int server;
    size_t n, total_read;
    cache_obj* entry;
    char *name, *portstr, http[1024], buf[MAXLINE], cachebuf[MAX_OBJECT_SIZE];
    rio_t rio;

    cachebuf[0] = '\0';
    name = strtok(request.domain, ":");
    portstr = strtok(NULL, ":");
    if(name == NULL){ 
        free_req(request);
        return;
    }
    if(portstr == NULL) portstr = "80";
    
    // checking the cache is still updating it (age)
    P(&w);
    if((entry = in_cache(request.path, num_entries, cache)) != NULL){
        V(&w);
        Rio_writen(fd, entry->buf, entry->obj_size);
    } else {
        V(&w);
         server = Open_clientfd_r(name, atoi(portstr));
         if(server != -1){
            sprintf(http, "GET /%s HTTP/1.0\r\n", request.path);
            strcat(http, request.hdrs);
            Rio_writen(server, http, strlen(http));
            Rio_writen(server, "\r\n", 2);
        } else {
            reparse(&request);
            char *wdpath;
            wdpath = getcwd(NULL,0);
            wdpath = Realloc(wdpath, strlen(wdpath) + strlen(request.path) +1);
            strcat(wdpath, request.path);
            server = open(wdpath, O_RDONLY);
            Free(wdpath);
            if(server == -1){
                not_found(fd);
                free_req(request);
                return;
            }
        }
    	Rio_readinitb(&rio, server);

    	total_read = 0;
    	while((n = Rio_readlineb(&rio, buf, MAXLINE)) > 0){
            if(total_read+n <= MAX_OBJECT_SIZE){
                strcat(cachebuf, buf);
            }
            total_read += n;
            Rio_writen(fd, buf, n);
    	}
        // cache update, critical section
        if(total_read <= MAX_OBJECT_SIZE){
            P(&w);
            cache = cache_write(request.path, cachebuf, num_entries, cache);
            num_entries++;
            V(&w);
        } 
    }
    free_req(request);
}
Exemplo n.º 17
0
errstat
sp_commit()
{
    struct sp_table *st;
    struct sp_save  *firstmod;
    objnum  obj;
    errstat err;
    
    /* When we get an error writing to the (probably local) bulletsvr,
     * we can do nothing but panic().  We cannot abort the transaction
     * because the other members will (presumably?) not have this problem
     * and hence have accepted the modification.
     */

    /* First see if we have multiple modifications as a result of the last
     * command. (This may happen as a result of sp_install()).
     * If so, we must use the intentions module rather than the modlist module
     * in order to guarantee atomicity.
     */
    st = sp_first_update();
    firstmod = (st != NULL) ? st->st_dir->sd_mods : NULL;

    if ((firstmod != NULL && firstmod->ss_next == NULL) /* just 1 mod */
	&& (st->st_dir->sd_update == NULL)		/* just 1 dir */)
    {
	obj = st - _sp_table;

	assert(sp_in_use(st));

	if (st->st_flags & SF_DESTROYING) {
	    MON_EVENT("destroying dir");
	    (void) remove_dir(obj);
	} else {
	    /* Note: we don't increase the current seqno of the directory
	     * itself, but use as the new seqnr the (already incremented)
	     * *global* sequence number.
	     * This avoids the possibility of having different incarnations
	     * of the same directory at the same time.
	     */
	    get_global_seqno(&st->st_dir->sd_seq_no);

	    err = ml_store(obj);
	    if (err != STD_OK && err != STD_NOSPACE) {
		/* Command not handled by modlist module, or no space in the
		 * modlist itself, or not enough memory.
		 */
		if (!ml_in_use()) {
		    /* Try to modify it, when possible and efficient.
		     * Otherwise write it out as a whole.
		     */
		    err = write_new_dir(obj, 1);
		} else {
		    /* Write it as a whole and remove it from the mod list */
		    err = ml_write_dir(obj);
		}
	    }

	    switch (err) {
	    case STD_OK:
		sp_free_mods(st);
		break;

	    case STD_NOSPACE:
		/* The directory would become to big.  This error should be
		 * consistent to all members, because they have the same
		 * data and use the same buffer sizes.
		 */
		scream("dir %ld would become too big", (long) obj); 
		sp_abort();	/* undoes the modification */
		return err;

	    default:
		/* must panic; see above */
		panic("sp_commit: cannot write dir %ld (%s)",
		      (long) obj, err_why(err));
	    }
	}
    } else {
	/* There are multiple updates or no updates at all */
	capability curcaps[NBULLETS];
	capability newcaps[NBULLETS];
	capability filesvrs[NBULLETS];
	struct sp_table *next_st;
	int nintents, avail;

	/* First make sure we have room for all of the intentions. */
	avail = intent_avail();
	nintents = 0;
	for (st = sp_first_update(); st != NULL; st = sp_next_update()) {
	    nintents++;
	}
	if (nintents > avail) {
	    scream("too many intentions: %d (max %d)", nintents, avail);
	    MON_EVENT("too many intentions");
	    sp_abort();
	    return STD_NOSPACE;
	}

	fsvr_get_svrs(filesvrs, NBULLETS);

	for (st = sp_first_update(); st != NULL; st = next_st ) {
	    next_st = sp_next_update();
	    obj = st - _sp_table;

	    /* Add an updated version of this dir to the intentions list.
	     * All dirs for which we have modifications should be in memory:
	     */
	    assert(sp_in_use(st));
	    assert(in_cache(obj));
    
	    /* Note: all directories modified in as a result of the current
	     * modification get the same seqno!
	     */
	    get_global_seqno(&st->st_dir->sd_seq_no);

	    if (!ml_in_use()) {
		/* Try to modify it, when possible and efficient.
		 * Otherwise write it out as a whole.
		 */
		get_dir_caps(obj, curcaps);
		err = dirf_modify(obj, NBULLETS, curcaps, newcaps, filesvrs);
	    } else {
		err = dirf_write(obj, NBULLETS, newcaps, filesvrs);
	    }

	    switch (err) {
	    case STD_OK:
		/* Put the new cap(s) in the intentions list.  Don't install
		 * them in the supertable yet: that'll be done after we have
		 * written the intention.  It'll also give us the opportunity
		 * to destroy the old versions afterwards.
		 */
		intent_add(obj, newcaps);
		MON_EVENT("added intention");
		break;

	    case STD_NOSPACE:
		/* The directory would become to big.  This error should be
		 * consistent to all members, because they have the same
		 * data and use the same buffer sizes.
		 * Remove the intentions and let the transaction fail.
		 */
		intent_undo();
		scream("dir %ld would become too big", (long) obj); 
		sp_abort();	/* undoes the modifications */
		return err;

	    default:	    /* must panic; see above */
		panic("sp_commit: cannot write dir %ld (%s)",
		      (long) obj, err_why(err));
	    }
	}

	if (nintents > 0) {
	    /* Write the intentions created to disk */
	    super_store_intents();

	    /* Install the modified directories in the super table and remove
	     * the intention again.  If we happen to crash between storing the
	     * intention and executing it, we'll complete it the next time
	     * during recovery.
	     */
	    intent_make_permanent();
	    MON_EVENT("executed intention");
	}
    }

    sp_clear_updates();
    sp_end();
    return STD_OK;
}