Пример #1
0
// AG main loop: crawl the dataset, using the crawler 
static void* AG_crawl_loop( void* cls ) {

   int rc = 0;
   bool have_more = true;
   struct AG_state* ag = (struct AG_state*)cls;

   while( g_running && have_more ) {

      // poll on the crawler
      if( have_more ) {
         rc = AG_crawl_next_entry( ag );
         if( rc < 0 ) {
            if( rc != -ENOTCONN ) {
               SG_error("AG_crawl_next_entry rc = %d\n", rc );
               sleep(1);
            }
            else {

               SG_warn("%s", "Crawler process is no longer running\n");
               have_more = false;
            }
         }
      }

      if( rc > 0 ) {
         // done crawling 
         have_more = false;
      }
   }

   SG_debug("%s", "Crawler thread exit\n");
   return NULL;
}
Пример #2
0
// update a file's manifest, in response to a remote call 
// return 0 on success
// return -ENOENT if not found
// return -ESTALE if not local
// return -errno on error 
// NOTE: the permissions will already have been checked by the server
static int UG_impl_manifest_patch( struct SG_gateway* gateway, struct SG_request_data* reqdat, struct SG_manifest* write_delta, void* cls ) {
   
   int rc = 0;
   int ref_rc = 0;
   struct fskit_entry* fent = NULL;
   struct UG_state* ug = (struct UG_state*)SG_gateway_cls( gateway );
   
   struct fskit_core* fs = UG_state_fs( ug );
   struct UG_inode* inode = NULL;
   
   struct ms_client* ms = SG_gateway_ms( gateway );
   uint64_t volume_id = ms_client_get_volume_id( ms );

   rc = UG_consistency_path_ensure_fresh( gateway, reqdat->fs_path );
   if( rc != 0 ) {
      SG_error("UG_consistency_path_ensure_fresh('%s') rc = %d\n", reqdat->fs_path, rc );
      return rc;
   }

   rc = UG_consistency_manifest_ensure_fresh( gateway, reqdat->fs_path );
   if( rc != 0 ) {
      SG_error("UG_consistency_manifest_ensure_fresh('%s') rc = %d\n", reqdat->fs_path, rc );
      return rc;
   }
   
   // look up 
   fent = fskit_entry_resolve_path( fs, reqdat->fs_path, reqdat->user_id, volume_id, true, &rc );
   if( fent == NULL ) {
      
      return rc;
   }
   
   inode = (struct UG_inode*)fskit_entry_get_user_data( fent );
   
   // must be coordinated by us 
   if( UG_inode_coordinator_id( inode ) != SG_gateway_id( gateway ) ) {
      
      fskit_entry_unlock( fent );
      return -ESTALE;
   }
   
   // update the manifest 
   fskit_entry_ref_entry( fent );
   rc = UG_write_patch_manifest( gateway, reqdat, inode, write_delta );
   
   fskit_entry_unlock( fent );

   ref_rc = fskit_entry_unref( fs, reqdat->fs_path, fent );
   if( ref_rc != 0 ) {
      SG_warn("fskit_entry_unref('%s') rc = %d\n", reqdat->fs_path, rc );
   }
   
   return rc;
}
Пример #3
0
// evict blocks, according to their LRU ordering and whether or not they are requested to be eagerly evicted
// NOTE: we assume that only one thread calls this at a time, for a given cache
// return 0 on success
// return the last eviction-related error on failure (i.e. due to bad I/O) (see md_cache_evict_block_internal)
int md_cache_evict_blocks( struct md_syndicate_cache* cache, md_cache_lru_t* new_writes ) {
   
   md_cache_lru_t* promotes = NULL;
   md_cache_lru_t* evicts = NULL;
   int worst_rc = 0;
   
   // swap promotes
   md_cache_promotes_wlock( cache );
   
   promotes = cache->promotes;
   if( cache->promotes == cache->promotes_1 ) {
      cache->promotes = cache->promotes_2;
   }
   else {
      cache->promotes = cache->promotes_1;
   }
   
   evicts = cache->evicts;
   if( cache->evicts == cache->evicts_1 ) {
      cache->evicts = cache->evicts_2;
   }
   else {
      cache->evicts = cache->evicts_1;
   }
   
   md_cache_promotes_unlock( cache );
   
   // safe access to the promote and evicts buffers, as long as no one performs the above swap
   
   md_cache_lru_wlock( cache );
   
   // merge in the new writes, as the most-recently-used
   if( new_writes ) {
      cache->cache_lru->splice( cache->cache_lru->end(), *new_writes );
   }
   
   // process promotions
   md_cache_promote_blocks( cache->cache_lru, promotes );
   
   // process demotions 
   md_cache_demote_blocks( cache->cache_lru, evicts );
   
   // NOTE: all blocks scheduled for eager eviction are at the beginning of cache_lru.
   // we will evict them here, even if the cache is not full.
   
   // see if we should start erasing blocks
   int num_blocks_written = cache->num_blocks_written;
   int blocks_removed = 0;
   int eager_evictions = evicts->size();        // number of blocks to eagerly evict
   
   // work to do?
   if( cache->cache_lru->size() > 0 && ((unsigned)num_blocks_written > cache->soft_max_size || eager_evictions > 0) ) {
      
      // start evicting
      do { 
         
         // least-recently-used block
         struct md_cache_entry_key c = cache->cache_lru->front();
         cache->cache_lru->pop_front();
         
         int rc = md_cache_evict_block_internal( cache, c.file_id, c.file_version, c.block_id, c.block_version );
         
         if( rc != 0 && rc != -ENOENT ) {
            
            SG_warn("Failed to evict %" PRIX64 ".%" PRId64 "[%" PRIu64 ".%" PRId64 "], rc = %d\n", c.file_id, c.file_version, c.block_id, c.block_version, rc );
            worst_rc = rc;
         }
         else {
            // successfully evicted a block
            SG_debug("Cache EVICT %" PRIX64 ".%" PRId64 "[%" PRIu64 ".%" PRId64 "]\n", c.file_id, c.file_version, c.block_id, c.block_version );
            blocks_removed ++;
            eager_evictions --;
         }
         
      } while( cache->cache_lru->size() > 0 && ((unsigned)num_blocks_written - (unsigned)blocks_removed > cache->soft_max_size || eager_evictions > 0) );
      
      // blocks evicted!
      __sync_fetch_and_sub( &cache->num_blocks_written, blocks_removed );
      
      SG_debug("Cache now has %d blocks\n", cache->num_blocks_written );
   }
   
   md_cache_lru_unlock( cache );
   
   // done with this
   promotes->clear();
   evicts->clear();
   
   return worst_rc;
}
Пример #4
0
// finish up getting directory metadata, and free up the download handle
// return 0 on success, and set *batch_id to this download's batch
//   *ret_num_children to the number of children downloaded, and *max_gen to be the largest generation number seen.
// return -ENOMEM on OOM 
static int ms_client_get_dir_metadata_end( struct ms_client* client, uint64_t parent_id, struct md_download_context* dlctx, ms_client_dir_listing* dir_listing, int64_t* batch_id, size_t* ret_num_children, int64_t* max_gen ) {
   
   int rc = 0;
   int listing_error = 0;
   struct md_entry* children = NULL;
   size_t num_children = 0;
   size_t num_unique_children = 0;
   CURL* curl = NULL;
   
   int64_t biggest_generation = 0;
   
   struct ms_client_get_dir_download_state* dlstate = (struct ms_client_get_dir_download_state*)md_download_context_get_cls( dlctx );
   md_download_context_set_cls( dlctx, NULL );

   // download status?
   rc = ms_client_download_parse_errors( dlctx );
   
   if( rc != 0 ) {
      
      if( rc != -EAGAIN) {
         // fatal 
         SG_error("ms_client_download_parse_errors( %p ) rc = %d\n", dlctx, rc );
      }
      
      md_download_context_unref_free( dlctx, &curl );
      if( curl != NULL ) {
          curl_easy_cleanup( curl );
      }

      ms_client_get_dir_download_state_free( dlstate );
      dlstate = NULL;
      
      return rc;
   }
   
   // collect the data 
   rc = ms_client_listing_read_entries( client, dlctx, &children, &num_children, &listing_error );
   
   // done with the download
   md_download_context_unref_free( dlctx, &curl );
   if( curl != NULL ) {
      curl_easy_cleanup( curl );
   }

   ms_client_get_dir_download_state_free( dlstate );
   dlstate = NULL;
   
   // did we get valid data?
   if( rc != 0 ) {
      
      SG_error("ms_client_listing_read_entries(%p) rc = %d\n", dlctx, rc );
      return rc;
   }
   
   if( listing_error != MS_LISTING_NEW ) {
      
      // somehow we didn't get data.  shouldn't happen in listdir
      SG_error("BUG: failed to get listing data for %" PRIX64 ", listing_error = %d\n", parent_id, listing_error );
      return -ENODATA;
   }
   
   // merge children in 
   for( unsigned int i = 0; i < num_children; i++ ) {
      
      uint64_t file_id = children[i].file_id;
      
      SG_debug("%p: %" PRIX64 "\n", dlctx, file_id );
      
      if( dir_listing->count( file_id ) > 0 ) {
         
         SG_warn("Duplicate child %" PRIX64 "\n", file_id );
         md_entry_free( &children[i] );
         continue;
      }
      
      try {
         
         (*dir_listing)[ file_id ] = children[i];
      }
      catch( bad_alloc& ba ) {
         rc = -ENOMEM;
         break;
      }
      
      // generation?
      if( children[i].generation > biggest_generation ) {
         
         biggest_generation = children[i].generation;
      }

      num_unique_children ++;
   }
   
   // NOTE: shallow free--we've copied the children into dir_listing
   SG_safe_free( children );
   
   *ret_num_children = num_unique_children;
   *max_gen = biggest_generation;
   
   return 0;
}