示例#1
0
文件: AcDc.c 项目: haiwei624/data
void gencode(Program prog, FILE * target,HashTable *Htable)//changed
{
    Statements *stmts = prog.statements;
    Statement stmt;

    while(stmts != NULL){
        stmt = stmts->first;
        switch(stmt.type){
            case Print:
                fprintf(target,"l%c\n",HashName(stmt.stmt.variable, Htable));
                fprintf(target,"p\n");
                break;
            case Assignment:
                fprint_expr(target, stmt.stmt.assign.expr, Htable);
                /*
                   if(stmt.stmt.assign.type == Int){
                   fprintf(target,"0 k\n");
                   }
                   else if(stmt.stmt.assign.type == Float){
                   fprintf(target,"5 k\n");
                   }*/
                fprintf(target,"s%c\n",HashName(stmt.stmt.assign.id, Htable));
                fprintf(target,"0 k\n");
                break;
        }
        stmts=stmts->rest;
    }

}
示例#2
0
CondorResource *CondorResource::FindOrCreateResource( const char * resource_name,
													  const char *pool_name,
													  const Proxy *proxy )
{
	int rc;
	CondorResource *resource = NULL;

	rc = ResourcesByName.lookup( HashKey( HashName( resource_name,
													pool_name,
													proxy ? proxy->subject->fqan : NULL ) ),
								 resource );
	if ( rc != 0 ) {
		resource = new CondorResource( resource_name, pool_name,
									   proxy );
		ASSERT(resource);
		resource->Reconfig();
		ResourcesByName.insert( HashKey( HashName( resource_name,
												   pool_name,
												   proxy ? proxy->subject->fqan : NULL ) ),
								resource );
	} else {
		ASSERT(resource);
	}

	return resource;
}
示例#3
0
文件: AcDc.c 项目: haiwei624/data
void fprint_expr( FILE *target, Expression *expr,HashTable *Htable)//changed
{

    if(expr->leftOperand == NULL){
        switch( (expr->v).type ){
            case Identifier:
                fprintf(target,"l%c\n",HashName((expr->v).val.id, Htable));
                break;
            case IntConst:
                fprintf(target,"%d\n",(expr->v).val.ivalue);
                break;
            case FloatConst:
                fprintf(target,"%f\n", (expr->v).val.fvalue);
                break;
            default:
                fprintf(target,"Error In fprint_left_expr. (expr->v).type=%d\n",(expr->v).type);
                break;
        }
    }
    else{
        fprint_expr(target, expr->leftOperand, Htable);
        if(expr->rightOperand == NULL){
            fprintf(target,"5k\n");
        }
        else{
            //	fprint_right_expr(expr->rightOperand);
            fprint_expr(target, expr->rightOperand ,Htable);
            fprint_op(target, (expr->v).type);
        }
    }
}
示例#4
0
GlobusResource *GlobusResource::FindOrCreateResource( const char *resource_name,
													  const Proxy *proxy,
													  bool is_gt5 )
{
	int rc;
	GlobusResource *resource = NULL;

	const char *canonical_name = CanonicalName( resource_name );
	ASSERT(canonical_name);

	const char *hash_name = HashName( canonical_name, proxy->subject->fqan );
	ASSERT(hash_name);

	rc = ResourcesByName.lookup( HashKey( hash_name ), resource );
	if ( rc != 0 ) {
		resource = new GlobusResource( canonical_name, proxy, is_gt5 );
		ASSERT(resource);
		if ( resource->Init() == false ) {
			delete resource;
			resource = NULL;
		} else {
			ResourcesByName.insert( HashKey( hash_name ), resource );
		}
	} else {
		ASSERT(resource);
	}

	return resource;
}
示例#5
0
EC2Resource::~EC2Resource()
{
	ResourcesByName.remove( HashKey( HashName( resourceName, m_public_key_file,
											   m_private_key_file ) ) );
	if ( gahp ) delete gahp;
	if (m_public_key_file) free(m_public_key_file);
	if (m_private_key_file) free(m_private_key_file);
}
示例#6
0
DCloudResource::~DCloudResource()
{
	ResourcesByName.remove( HashKey( HashName( resourceName, m_username,
											   m_password ) ) );
	delete gahp;
	free( m_username );
	free( m_password );
}
示例#7
0
文件: AcDc.c 项目: haiwei624/data
DataType lookup_table( SymbolTable *table, char c[] ,HashTable *Htable)//changed
{
    //int id = c-'a';
	int id = HashName(c, Htable)-'a';
    if( table->table[id] != Int && table->table[id] != Float)
        printf("Error : identifier %s is not declared\n", c);//error
    return table->table[id];
}
示例#8
0
文件: AcDc.c 项目: haiwei624/data
void add_table( SymbolTable *table, char c[], DataType t ,HashTable *Htable)//changed
{
    //int index = (int)(c - 'a');
	int index = HashName(c, Htable)-'a';
    if(table->table[index] != Notype)
        printf("Error : id %s has been declared\n", c);//error
    table->table[index] = t;
}
示例#9
0
EC2Resource* EC2Resource::FindOrCreateResource(const char * resource_name, 
	const char * public_key_file, const char * private_key_file )
{
	int rc;
	EC2Resource *resource = NULL;

	rc = ResourcesByName.lookup( HashKey( HashName( resource_name, public_key_file, private_key_file ) ), resource );
	if ( rc != 0 ) {
		resource = new EC2Resource( resource_name, public_key_file, private_key_file );
		ASSERT(resource);
		resource->Reconfig();
		ResourcesByName.insert( HashKey( HashName( resource_name, public_key_file, private_key_file ) ), resource );
	} else {
		ASSERT(resource);
	}

	return resource;
}
示例#10
0
CondorResource::~CondorResource()
{
	ResourcesByName.remove( HashKey( HashName( resourceName,
											   poolName,
											   proxyFQAN ) ) );

		// Make sure we don't leak a ScheddPollInfo. If there are other
		// CondorResources that still want to use it, they'll recreate it.
		// Don't delete it if we know another CondorResource is doing a
		// poll of the remote schedd right now.
		// TODO Track how many CondorResources are still using this
		//   ScheddPollInfo and delete it only if we're the last one.
	ScheddPollInfo *poll_info = NULL;
	PollInfoByName.lookup( HashKey( HashName( scheddName, poolName, NULL ) ),
						   poll_info );
	if ( poll_info && ( poll_info->m_pollActive == false ||
		 scheddStatusActive == true ) ) {
		PollInfoByName.remove( HashKey( HashName( scheddName, poolName,
												  NULL ) ) );
		delete poll_info;
	}
	if ( proxySubject != NULL ) {
		free( proxySubject );
	}
	free( proxyFQAN );
	if ( scheddPollTid != TIMER_UNSET ) {
		daemonCore->Cancel_Timer( scheddPollTid );
	}
	if ( gahp != NULL ) {
		delete gahp;
	}
	if ( ping_gahp != NULL ) {
		delete ping_gahp;
	}
	if ( lease_gahp != NULL ) {
		delete lease_gahp;
	}
	if ( scheddName != NULL ) {
		free( scheddName );
	}
	if ( poolName != NULL ) {
		free( poolName );
	}
}
示例#11
0
DCloudResource* DCloudResource::FindOrCreateResource(const char *resource_name,
													 const char *username,
													 const char *password )
{
	int rc;
	MyString resource_key;
	DCloudResource *resource = NULL;

	rc = ResourcesByName.lookup( HashKey( HashName( resource_name, username, password ) ), resource );
	if ( rc != 0 ) {
		resource = new DCloudResource( resource_name, username, password );
		ASSERT(resource);
		resource->Reconfig();
		ResourcesByName.insert( HashKey( HashName( resource_name, username, password ) ), resource );
	} else {
		ASSERT(resource);
	}

	return resource;
}
示例#12
0
 void DumpActorLook(json::Value& value, uint32 aid) {
   SnoFile<Actor> actor(Actor::name(aid));
   if (!actor) return;
   SnoFile<Appearance> app(actor->x014_AppearanceSno.name());
   if (!app) return;
   auto& val = value[fmtstring("%d", aid)]["looks"];
   uint32 index = 0;
   for (auto& look : app->x1C0_AppearanceLooks) {
     val[fmtstring("%u", HashName(look.x00_Text))] = index++;
   }
 }
示例#13
0
void CondorResource::UnregisterJob( CondorJob *job )
{
	ScheddPollInfo *poll_info = NULL;
	PollInfoByName.lookup( HashKey( HashName( scheddName, poolName, NULL ) ),
						   poll_info );
	if ( poll_info ) {
		poll_info->m_submittedJobs.Delete( job );
	}

		// This may call delete, so don't put anything after it!
	BaseResource::UnregisterJob( job );
}
示例#14
0
INFNBatchResource* INFNBatchResource::FindOrCreateResource(const char * batch_type, 
	const char * resource_name )
{
	int rc;
	INFNBatchResource *resource = NULL;

	if ( resource_name == NULL ) {
		resource_name = "";
	}

	rc = ResourcesByName.lookup( HashKey( HashName( batch_type, resource_name ) ), resource );
	if ( rc != 0 ) {
		resource = new INFNBatchResource( batch_type, resource_name );
		ASSERT(resource);
		resource->Reconfig();
		ResourcesByName.insert( HashKey( HashName( batch_type, resource_name ) ), resource );
	} else {
		ASSERT(resource);
	}

	return resource;
}
示例#15
0
NordugridResource::~NordugridResource()
{
	ResourcesByName.remove( HashKey( HashName( resourceName, proxyFQAN ) ) );
	free( proxyFQAN );
	if ( proxySubject ) {
		free( proxySubject );
	}
	if ( gahp ) {
		delete gahp;
	}
	delete m_statusGahp;
	if ( m_jobStatusTid != TIMER_UNSET ) {
		daemonCore->Cancel_Timer( m_jobStatusTid );
	}
}
示例#16
0
std::string TextureReplacer::LookupHashFile(u64 cachekey, u32 hash, int level) {
	ReplacementAliasKey key(cachekey, hash, level);
	auto alias = aliases_.find(key);
	if (alias == aliases_.end()) {
		// Also check for a few more aliases with zeroed portions:
		// Only clut hash (very dangerous in theory, in practice not more than missing "just" data hash)
		key.cachekey = cachekey & 0xFFFFFFFFULL;
		key.hash = 0;
		alias = aliases_.find(key);

		if (alias == aliases_.end()) {
			// No data hash.
			key.cachekey = cachekey;
			key.hash = 0;
			alias = aliases_.find(key);
		}

		if (alias == aliases_.end()) {
			// No address.
			key.cachekey = cachekey & 0xFFFFFFFFULL;
			key.hash = hash;
			alias = aliases_.find(key);
		}

		if (alias == aliases_.end()) {
			// Address, but not clut hash (in case of garbage clut data.)
			key.cachekey = cachekey & ~0xFFFFFFFFULL;
			key.hash = hash;
			alias = aliases_.find(key);
		}

		if (alias == aliases_.end()) {
			// Anything with this data hash (a little dangerous.)
			key.cachekey = 0;
			key.hash = hash;
			alias = aliases_.find(key);
		}
	}

	if (alias != aliases_.end()) {
		// Note: this will be blank if explicitly ignored.
		return alias->second;
	}

	return HashName(cachekey, hash, level) + ".png";
}
示例#17
0
GlobusResource::~GlobusResource()
{
	ResourcesByName.remove( HashKey( HashName( resourceName, proxyFQAN ) ) );
	if ( checkMonitorTid != TIMER_UNSET ) {
		daemonCore->Cancel_Timer( checkMonitorTid );
	}
	CleanupMonitorJob();
	if ( gahp != NULL ) {
		delete gahp;
	}
	if ( monitorGahp != NULL ) {
		delete monitorGahp;
	}
	if ( proxySubject ) {
		free( proxySubject );
	}
	free( proxyFQAN );
}
示例#18
0
//---------------------------------------------
// nsZipArchive::GetItem
//---------------------------------------------
nsZipItem*  nsZipArchive::GetItem(const char * aEntryName)
{
  if (aEntryName) {
    //-- If the request is for a directory, make sure that synthetic entries 
    //-- are created for the directories without their own entry.
    if (!mBuiltSynthetics) {
        PRUint32 len = strlen(aEntryName);
        if ((len > 0) && (aEntryName[len-1] == '/')) {
            if (BuildSynthetics() != ZIP_OK)
                return 0;
        }
    }

    nsZipItem* item = mFiles[ HashName(aEntryName) ];
    while (item) {
      if (!strcmp(aEntryName, item->name))
        return item; //-- found it
      item = item->next;
    }
  }
  return 0;
}
示例#19
0
CreamResource::~CreamResource()
{
dprintf(D_FULLDEBUG,"*** ~CreamResource\n");
	CreamProxyDelegation *next_deleg;
	delegatedProxies.Rewind();
	while ( (next_deleg = delegatedProxies.Next()) != NULL ) {
dprintf(D_FULLDEBUG,"    deleting %s\n",next_deleg->deleg_uri?next_deleg->deleg_uri:"(undelegated)");
		delegatedProxies.DeleteCurrent();
		free( next_deleg->deleg_uri );
		ReleaseProxy( next_deleg->proxy,
					  (TimerHandlercpp)&CreamResource::ProxyCallback, this );
		delete next_deleg;
	}
	if ( delegationServiceUri != NULL ) {
		free( delegationServiceUri );
	}

	if ( serviceUri != NULL ) {
		free( serviceUri );
	}

	ResourcesByName.remove( HashKey( HashName( resourceName, proxySubject, proxyFirstFQAN ) ) );

	daemonCore->Cancel_Timer( delegationTimerId );
	if ( gahp != NULL ) {
		delete gahp;
	}
	if ( deleg_gahp != NULL ) {
		delete deleg_gahp;
	}
	delete status_gahp;
	delete m_leaseGahp;
	if ( proxySubject ) {
		free( proxySubject );
	}
	free( proxyFQAN );
	free( proxyFirstFQAN );
}
示例#20
0
const char *NordugridResource::GetHashName()
{
	return HashName( resourceName, proxyFQAN );
}
示例#21
0
void CondorResource::DoScheddPoll()
{
	int rc;
	ScheddPollInfo *poll_info = NULL;

	if ( ( registeredJobs.IsEmpty() || resourceDown ) &&
		 scheddStatusActive == false ) {
			// No jobs or we can't talk to the schedd, so no point
			// in polling
		daemonCore->Reset_Timer( scheddPollTid, BatchStatusInterval() );
		return;
	}

	if ( gahp->isStarted() == false ) {
		// The gahp isn't started yet. Wait a few seconds for a CondorJob
		// object to start it (and possibly initialize x509 credentials).
		daemonCore->Reset_Timer( scheddPollTid, 5 );
		return;
	}

	PollInfoByName.lookup( HashKey( HashName( scheddName, poolName, NULL ) ),
						   poll_info );

	daemonCore->Reset_Timer( scheddPollTid, TIMER_NEVER );

	if ( scheddStatusActive == false ) {

			// We share polls across all CondorResource objects going to
			// the same schedd. If another object has done a poll
			// recently, then don't bother doing one ourselves.
		if ( poll_info  == NULL ) {
			poll_info = new ScheddPollInfo;
			poll_info->m_lastPoll = 0;
			poll_info->m_pollActive = false;
			PollInfoByName.insert( HashKey( HashName( scheddName, poolName,
													  NULL ) ),
								   poll_info );
		}

		if ( poll_info->m_pollActive == true ||
			 poll_info->m_lastPoll + BatchStatusInterval() > time(NULL) ) {
			daemonCore->Reset_Timer( scheddPollTid, BatchStatusInterval() );
			return;
		}

			// start schedd status command
		dprintf( D_FULLDEBUG, "Starting collective poll: %s\n",
				 scheddName );
		std::string constraint;

			// create a list of jobs we expect to hear about in our
			// status command
			// Since we're sharing the results of this status command with
			// all CondorResource objects going to the same schedd, look
			// for their jobs as well.
		poll_info->m_submittedJobs.Rewind();
		while ( poll_info->m_submittedJobs.Next() ) {
			poll_info->m_submittedJobs.DeleteCurrent();
		}
		CondorResource *next_resource;
		BaseJob *job;
		std::string job_id;
		ResourcesByName.startIterations();
		while ( ResourcesByName.iterate( next_resource ) != 0 ) {
			if ( strcmp( scheddName, next_resource->scheddName ) ||
				 strcmp( poolName ? poolName : "",
						 next_resource->poolName ? next_resource->poolName : "" ) ) {
				continue;
			}

			next_resource->registeredJobs.Rewind();
			while ( ( job = next_resource->registeredJobs.Next() ) ) {
				if ( job->jobAd->LookupString( ATTR_GRID_JOB_ID, job_id ) ) {
					poll_info->m_submittedJobs.Append( (CondorJob *)job );
				}
			}
		}

		formatstr( constraint, "(%s)", submitter_constraint.c_str() );

		rc = gahp->condor_job_status_constrained( scheddName,
												  constraint.c_str(),
												  NULL, NULL );

		if ( rc != GAHPCLIENT_COMMAND_PENDING ) {
			dprintf( D_ALWAYS,
					 "gahp->condor_job_status_constrained returned %d for remote schedd: %s\n",
					 rc, scheddName );
			EXCEPT( "condor_job_status_constrained failed!" );
		}
		scheddStatusActive = true;
		poll_info->m_pollActive = true;

	} else {

			// finish schedd status command
		int num_status_ads;
		ClassAd **status_ads = NULL;

		ASSERT( poll_info );

		rc = gahp->condor_job_status_constrained( NULL, NULL,
												  &num_status_ads,
												  &status_ads );

		if ( rc == GAHPCLIENT_COMMAND_PENDING ) {
			return;
		} else if ( rc != 0 ) {
			dprintf( D_ALWAYS,
					 "gahp->condor_job_status_constrained returned %d for remote schedd %s\n",
					 rc, scheddName );
			dprintf( D_ALWAYS, "Requesting ping of resource\n" );
			RequestPing( NULL );
		}

		if ( rc == 0 ) {
			for ( int i = 0; i < num_status_ads; i++ ) {
				int cluster, proc;
				int rc2;
				std::string job_id_string;
				BaseJob *base_job = NULL;
				CondorJob *job;

				if( status_ads[i] == NULL ) {
					dprintf(D_ALWAYS, "DoScheddPoll was given null pointer for classad #%d\n", i);
					continue;
				}

				status_ads[i]->LookupInteger( ATTR_CLUSTER_ID, cluster );
				status_ads[i]->LookupInteger( ATTR_PROC_ID, proc );

				formatstr( job_id_string, "condor %s %s %d.%d", scheddName,
									   poolName, cluster, proc );

				rc2 = BaseJob::JobsByRemoteId.lookup( HashKey( job_id_string.c_str() ),
													  base_job );
				job = dynamic_cast<CondorJob*>( base_job );
				if ( rc2 == 0 ) {
					job->NotifyNewRemoteStatus( status_ads[i] );
					poll_info->m_submittedJobs.Delete( job );
				} else {
					delete status_ads[i];
				}
			}

			poll_info->m_lastPoll = time(NULL);
		}
		poll_info->m_pollActive = false;

		if ( status_ads != NULL ) {
			free( status_ads );
		}

			// Check if any jobs were missing from the status result
		if ( rc == 0 ) {
			CondorJob *job;
			std::string job_id;
			poll_info->m_submittedJobs.Rewind();
			while ( ( job = poll_info->m_submittedJobs.Next() ) ) {
				if ( job->jobAd->LookupString( ATTR_GRID_JOB_ID, job_id ) ) {
						// We should have gotten a status ad for this job,
						// but didn't. Tell the job that there may be
						// something wrong by giving it a NULL status ad.
					job->NotifyNewRemoteStatus( NULL );
				}
				poll_info->m_submittedJobs.DeleteCurrent();
			}
		}

		scheddStatusActive = false;

		dprintf( D_FULLDEBUG, "Collective poll complete: %s\n", scheddName );

		daemonCore->Reset_Timer( scheddPollTid, BatchStatusInterval() );
	}
}
示例#22
0
const char *CondorResource::GetHashName()
{
	return HashName( resourceName, poolName, proxyFQAN );
}
示例#23
0
INFNBatchResource::~INFNBatchResource()
{
	ResourcesByName.remove( HashKey( HashName( m_batchType.c_str(), resourceName ) ) );
	if ( gahp ) delete gahp;
}
示例#24
0
//---------------------------------------------
//  nsZipArchive::BuildFileList
//---------------------------------------------
nsresult nsZipArchive::BuildFileList()
{
  PRUint8   buf[4*BR_BUF_SIZE];

  //-----------------------------------------------------------------------
  // locate the central directory via the End record
  //-----------------------------------------------------------------------

  //-- get archive size using end pos
  PRInt32  pos = PR_Seek(mFd, 0, PR_SEEK_END);
#ifndef STANDALONE
  if (pos <= 0)
#else
  if (pos || ((pos = ftell(mFd)) <= 0))
#endif
    return ZIP_ERR_CORRUPT;

  PRBool bEndsigFound = PR_FALSE;
  while (!bEndsigFound)
  {
    //-- read backwards in 1K-sized chunks (unless file is less than 1K)
    PRInt32  bufsize = pos > BR_BUF_SIZE ? BR_BUF_SIZE : pos;
    pos -= bufsize;

    if (!ZIP_Seek(mFd, pos, PR_SEEK_SET))
      return ZIP_ERR_CORRUPT;

    if (PR_Read(mFd, buf, bufsize) != (READTYPE)bufsize)
      return ZIP_ERR_CORRUPT;

    //-- scan for ENDSIG
    PRUint8 *endp = buf + bufsize;
    for (endp -= ZIPEND_SIZE; endp >= buf; endp--)
    {
      if (xtolong(endp) == ENDSIG)
      { 
        //-- Seek to start of central directory
        PRInt32 central = xtolong(((ZipEnd *) endp)->offset_central_dir);
        if (!ZIP_Seek(mFd, central, PR_SEEK_SET))
          return ZIP_ERR_CORRUPT;

        bEndsigFound = PR_TRUE;
        break;
      }
    }

    if (bEndsigFound)
      break;

    if (pos <= 0)
      //-- We're at the beginning of the file, and still no sign
      //-- of the end signature.  File must be corrupted!
      return ZIP_ERR_CORRUPT;

    //-- backward read must overlap ZipEnd length
    pos += ZIPEND_SIZE;

  } /* while looking for end signature */


  //-------------------------------------------------------
  // read the central directory headers
  //-------------------------------------------------------
  PRInt32 byteCount = PR_Read(mFd, &buf, sizeof(buf));
  pos = 0;
  PRUint32 sig = xtolong(buf);
  while (sig == CENTRALSIG) {
    //-- make sure we've read enough
    if (byteCount - pos < ZIPCENTRAL_SIZE)
      return ZIP_ERR_CORRUPT;

    //-------------------------------------------------------
    // read the fixed-size data
    //-------------------------------------------------------
    ZipCentral* central = (ZipCentral*)(buf+pos);

    PRUint16 namelen = xtoint(central->filename_len);
    PRUint16 extralen = xtoint(central->extrafield_len);
    PRUint16 commentlen = xtoint(central->commentfield_len);

    //-- sanity check variable sizes and refuse to deal with
    //-- anything too big: it's likely a corrupt archive
    if (namelen > BR_BUF_SIZE || extralen > BR_BUF_SIZE || commentlen > 2*BR_BUF_SIZE)
      return ZIP_ERR_CORRUPT;

    nsZipItem* item = CreateZipItem(namelen);
    if (!item)
      return ZIP_ERR_MEMORY;

    item->headerOffset  = xtolong(central->localhdr_offset);
    item->dataOffset    = 0;
    item->size          = xtolong(central->size);
    item->realsize      = xtolong(central->orglen);
    item->crc32         = xtolong(central->crc32);
    item->time          = xtoint(central->time);
    item->date          = xtoint(central->date);
    item->isSynthetic   = PR_FALSE;
    item->hasDataOffset = PR_FALSE;

    PRUint16 compression = xtoint(central->method);
    item->compression   = (compression < UNSUPPORTED) ? (PRUint8)compression
                                                      : UNSUPPORTED;

    item->mode = ExtractMode(central->external_attributes);
#if defined(XP_UNIX) || defined(XP_BEOS)
    // Check if item is a symlink
    item->isSymlink = IsSymlink(central->external_attributes);
#endif

    pos += ZIPCENTRAL_SIZE;

    //-------------------------------------------------------
    // Make sure that remainder of this record (name, comments, extra)
    // and the next ZipCentral is all in the buffer
    //-------------------------------------------------------
    PRInt32 leftover = byteCount - pos;
    if (leftover < (namelen + extralen + commentlen + ZIPCENTRAL_SIZE)) {
      //-- not enough data left to process at top of loop.
      //-- move leftover and read more
      memcpy(buf, buf+pos, leftover);
      byteCount = leftover + PR_Read(mFd, buf+leftover, sizeof(buf)-leftover);
      pos = 0;

      if (byteCount < (namelen + extralen + commentlen + sizeof(sig))) {
        // truncated file
        return ZIP_ERR_CORRUPT;
      }
    }

    //-------------------------------------------------------
    // get the item name
    //-------------------------------------------------------
    memcpy(item->name, buf+pos, namelen);
    item->name[namelen] = 0;

    //-- an item whose name ends with '/' is a directory
    item->isDirectory = ('/' == item->name[namelen - 1]);

    //-- add item to file table
    //-- note that an explicit entry for a directory will override
    //-- a fake entry created for that directory (as in the case
    //-- of processing foo/bar.txt and then foo/) -- this will
    //-- preserve an explicit directory's metadata at the cost of
    //-- an extra nsZipItem (and that only happens if we process a
    //-- file inside that directory before processing the directory
    //-- entry itself)
    PRUint32 hash = HashName(item->name);
    item->next = mFiles[hash];
    mFiles[hash] = item;

    //-------------------------------------------------------
    // set up to process the next item at the top of loop
    //-------------------------------------------------------
    pos += namelen + extralen + commentlen;
    sig = xtolong(buf+pos);
  } /* while reading central directory records */

  if (sig != ENDSIG)
    return ZIP_ERR_CORRUPT;

  return ZIP_OK;
}
示例#25
0
const char *INFNBatchResource::GetHashName()
{
	return HashName( m_batchType.c_str(), resourceName );
}
示例#26
0
//---------------------------------------------
//  nsZipArchive::BuildSynthetics
//---------------------------------------------
nsresult nsZipArchive::BuildSynthetics()
{
  if (mBuiltSynthetics)
    return ZIP_OK;
  mBuiltSynthetics = PR_TRUE;

  // Create synthetic entries for any missing directories.
  // Do this when all ziptable has scanned to prevent double entries.
  for (int i = 0; i < ZIP_TABSIZE; ++i)
  {
    for (nsZipItem* item = mFiles[i]; item != 0; item = item->next)
    {
      if (item->isSynthetic)
        continue;
    
      //-- add entries for directories in the current item's path
      //-- go from end to beginning, because then we can stop trying
      //-- to create diritems if we find that the diritem we want to
      //-- create already exists
      //-- start just before the last char so as to not add the item
      //-- twice if it's a directory
      PRUint16 namelen = strlen(item->name);
      for (char* p = item->name + namelen - 2; p >= item->name; p--)
      {
        if ('/' != *p)
          continue;

        // See whether we need to create any more implicit directories,
        // because if we don't we can avoid a lot of work.
        // We can even avoid (de)allocating space for a bogus dirname with
        // a little trickery -- save the char at item->name[dirnamelen],
        // set it to 0, compare the strings, and restore the saved
        // char when done
        const PRUint32 dirnamelen = p + 1 - item->name;
        const char savedChar = item->name[dirnamelen];
        item->name[dirnamelen] = 0;

        // Is the directory in the file table?
        PRUint32 hash = HashName(item->name);
        PRBool found = PR_FALSE;
        for (nsZipItem* zi = mFiles[hash]; zi != NULL; zi = zi->next)
        {
          if (0 == strcmp(item->name, zi->name))
          {
            // we've already added this dir and all its parents
            found = PR_TRUE;
            break;
          }
        }

        // restore the char immediately
        item->name[dirnamelen] = savedChar;

        // if the directory was found, break out of the directory
        // creation loop now that we know all implicit directories
        // are there -- otherwise, start creating the zip item
        if (found)
          break;

        nsZipItem* diritem = CreateZipItem(dirnamelen);
        if (!diritem)
          return ZIP_ERR_MEMORY;

        memcpy(diritem->name, item->name, dirnamelen);
        diritem->name[dirnamelen] = 0;

        diritem->isDirectory = PR_TRUE;
        diritem->isSynthetic = PR_TRUE;
        diritem->compression = STORED;
        diritem->size = diritem->realsize = 0;
        diritem->crc32 = 0;
        diritem->mode = 0755;

        // Set an obviously wrong last-modified date/time, because
        // finding something more accurate like the most recent
        // last-modified date/time of the dir's contents is a lot
        // of effort.  The date/time corresponds to 1980-01-01 00:00.
        diritem->time = 0;
        diritem->date = 1 + (1 << 5) + (0 << 9);

        // add diritem to the file table
        diritem->next = mFiles[hash];
        mFiles[hash] = diritem;
      } /* end processing of dirs in item's name */
    }
  }
  return ZIP_OK;
}
示例#27
0
const char *GlobusResource::GetHashName()
{
	return HashName( resourceName, proxyFQAN );
}
示例#28
0
const char *CreamResource::GetHashName()
{
	return HashName( resourceName, proxySubject, proxyFirstFQAN );
}
示例#29
0
const char *EC2Resource::GetHashName()
{
	return HashName( resourceName, m_public_key_file, m_private_key_file );
}
示例#30
0
const char *DCloudResource::GetHashName()
{
	return HashName( resourceName, m_username, m_password );
}