// TODO give this a better name.  It gets used outside this file now.
void do_url(
    SG_context* pCtx,
    const char* pszUrl,
    const char* psz_method,
    const char* psz_data,
    const char* psz_username,
    const char* psz_password,
    SG_string** ppstr,
    SG_pathname* pPath,
    SG_bool b_progress
)
{
    SG_string* pstr = NULL;
    CFHTTPMessageRef myRequest = NULL;
    CFHTTPMessageRef myResponse = NULL;
    CFStringRef s_username = NULL;
    CFStringRef s_password = NULL;

    if (pPath && ppstr)
    {
        SG_ERR_RESET_THROW2(SG_ERR_INVALIDARG, (pCtx, "do_url() returns into a string or a file, but not both"));
    }

    if (!pPath && !ppstr)
    {
        SG_ERR_RESET_THROW2(SG_ERR_INVALIDARG, (pCtx, "do_url() returns into a string or a file, one or the other"));
    }

    SG_ERR_CHECK(  make_request(pCtx, pszUrl, psz_method, psz_data, &myRequest)  );

#if 0
    {
        CFDataRef d = CFHTTPMessageCopySerializedMessage(myRequest);
        fprintf(stderr, "%s\n", CFDataGetBytePtr(d));
        CFRelease(d);
    }
#endif

    if (pPath)
    {
        SG_ERR_CHECK(  perform_request__file(pCtx, myRequest, &myResponse, pPath, b_progress)  );
    }
    else
    {
        SG_ERR_CHECK(  perform_request__string(pCtx, myRequest, &myResponse, &pstr)  );
    }

    if (!myResponse)
    {
        SG_ERR_THROW2(SG_ERR_UNSPECIFIED, (pCtx, "No response from server"));
    }

    //fprintf(stderr, "\n%s\n", SG_string__sz(pstr));
    UInt32 statusCode = CFHTTPMessageGetResponseStatusCode(myResponse);
#if 0
    {
        CFDataRef d = CFHTTPMessageCopySerializedMessage(myResponse);
        fprintf(stderr, "%s\n", CFDataGetBytePtr(d));
        CFRelease(d);
    }
#endif

    if (
        psz_username
        && psz_password
        && (statusCode == 401 || statusCode == 407)
    )
    {
        s_username = CFStringCreateWithCString(kCFAllocatorDefault, psz_username, kCFStringEncodingUTF8);
        s_password = CFStringCreateWithCString(kCFAllocatorDefault, psz_password, kCFStringEncodingUTF8);
        if (!CFHTTPMessageAddAuthentication(myRequest, myResponse, s_username, s_password, kCFHTTPAuthenticationSchemeDigest, FALSE))
        {
            SG_ERR_THROW2(SG_ERR_UNSPECIFIED, (pCtx, "CFHTTPMessageAddAuthentication failed"));
        }

#if 0
        {
            CFDataRef d = CFHTTPMessageCopySerializedMessage(myRequest);
            fprintf(stderr, "%s\n", CFDataGetBytePtr(d));
            CFRelease(d);
        }
#endif

        CFRelease(s_username);
        s_username = NULL;

        CFRelease(s_password);
        s_password = NULL;

        CFRelease(myResponse);
        myResponse = NULL;

        if (pPath)
        {
            SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pPath)  );
            SG_ERR_CHECK(  perform_request__file(pCtx, myRequest, &myResponse, pPath, b_progress)  );
        }
        else
        {
            SG_STRING_NULLFREE(pCtx, pstr);
            SG_ERR_CHECK(  perform_request__string(pCtx, myRequest, &myResponse, &pstr)  );
        }
#if 0
        {
            CFDataRef d = CFHTTPMessageCopySerializedMessage(myResponse);
            fprintf(stderr, "%s\n", CFDataGetBytePtr(d));
            CFRelease(d);
        }
#endif
        statusCode = CFHTTPMessageGetResponseStatusCode(myResponse);
    }

    if (statusCode != 200)
    {
        if (401 == statusCode)
        {
            SG_ERR_THROW(SG_ERR_HTTP_401);
        }
        else if (404 == statusCode)
        {
            SG_ERR_THROW(SG_ERR_HTTP_404);
        }
        else if (502 == statusCode)
        {
            SG_ERR_THROW(SG_ERR_HTTP_502);
        }
        else
        {
            SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", (int) statusCode));
        }
    }

    if (ppstr)
    {
        *ppstr = pstr;
        pstr = NULL;
    }

    /* fall through */

fail:
    if (s_username)
    {
        CFRelease(s_username);
        s_username = NULL;
    }

    if (s_password)
    {
        CFRelease(s_password);
        s_password = NULL;
    }

    if (myRequest)
    {
        CFRelease(myRequest);
        myRequest = NULL;
    }

    if (myResponse)
    {
        CFRelease(myResponse);
        myResponse = NULL;
    }

    SG_STRING_NULLFREE(pCtx, pstr);
}
コード例 #2
0
/**
 * Load the SuperRoot TreeNode from the Repo and completely
 * populate the tbl_L[k] table.
 *
 * WE DO NOT DROP/RECREATE THE TABLE BEFORE STARTING.
 * The caller should have prep'd the table if they want that.
 *
 * We DO NOT put the super-root in the tne_L0 table; we
 * start the tne_L0 table with the actual root "@/" (aka "@b/").
 * (Info on the super-root can be found in the tbl_csets.)
 * 
 */
void sg_wc_db__tne__process_super_root(SG_context * pCtx,
									   sg_wc_db * pDb,
									   const sg_wc_db__cset_row * pCSetRow,
									   sqlite3_stmt * pStmt,
									   const char * pszHidSuperRoot)
{
	SG_treenode * pTreenodeSuperRoot = NULL;
	const char * pszGidRoot = NULL;							// we don't own this
	const SG_treenode_entry * pTreenodeEntryRoot = NULL;	// we don't own this
	SG_uint64 uiAliasGidNull = 0;
	SG_uint64 uiAliasGidRoot = 0;

	SG_ERR_CHECK(  SG_treenode__load_from_repo(pCtx,
											   pDb->pRepo,
											   pszHidSuperRoot,
											   &pTreenodeSuperRoot)  );

#if defined(DEBUG)
	{
		// verify we have a well-formed super-root.  that is, the super-root treenode
		// should have exactly 1 treenode-entry -- the "@" (aka "@b/") directory.

		SG_uint32 nrEntries;
		
		SG_ERR_CHECK(  SG_treenode__count(pCtx, pTreenodeSuperRoot, &nrEntries)  );
		if (nrEntries != 1)
			SG_ERR_THROW(  SG_ERR_MALFORMED_SUPERROOT_TREENODE  );
	}
#endif

	SG_ERR_CHECK(  SG_treenode__get_nth_treenode_entry__ref(pCtx,
															pTreenodeSuperRoot,0,
															&pszGidRoot,
															&pTreenodeEntryRoot)  );
#if defined(DEBUG)
	{
		const char * pszEntrynameRoot = NULL;					// we don't own this
		SG_treenode_entry_type tneTypeRoot;

		SG_ERR_CHECK(  SG_treenode_entry__get_entry_name(pCtx, pTreenodeEntryRoot, &pszEntrynameRoot)  );
		SG_ERR_CHECK(  SG_treenode_entry__get_entry_type(pCtx, pTreenodeEntryRoot, &tneTypeRoot)  );
		// we set the root's entryname to "@" (not "@b") for
		// historical reasons and because that is what is in
		// the treenode. (We don't want it to look like
		// a rename if/when the corresponding row is created
		// in the tbl_PC table (where it should have "@").)
		if ( (strcmp(pszEntrynameRoot,"@") != 0) || (tneTypeRoot != SG_TREENODEENTRY_TYPE_DIRECTORY) )
			SG_ERR_THROW(  SG_ERR_MALFORMED_SUPERROOT_TREENODE  );
	}
#endif

	// alias for null-root has already been added.
	SG_ERR_CHECK(  sg_wc_db__gid__insert(pCtx, pDb, pszGidRoot)  );
	SG_ERR_CHECK(  sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, SG_WC_DB__GID__NULL_ROOT, &uiAliasGidNull)  );
	SG_ERR_CHECK(  sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, pszGidRoot,               &uiAliasGidRoot)  );

	SG_ERR_CHECK(  sg_wc_db__tne__insert_recursive(pCtx, pDb, pCSetRow, pStmt,
												   uiAliasGidNull, uiAliasGidRoot,
												   pTreenodeEntryRoot)  );

fail:
	SG_TREENODE_NULLFREE(pCtx, pTreenodeSuperRoot);
}
コード例 #3
0
ファイル: sg_dagfrag.c プロジェクト: avar/veracity
static void _process_work_queue_cb(SG_context * pCtx,
								   const char * szHid, SG_UNUSED_PARAM(void * pAssocData), void * pVoidCallerData)
{
	// we are given a random item in the work_queue.
	//
	// lookup the corresponding DATA node in the Cache, if it has one.
	//
	// and then evaluate where this node belongs:

	struct _work_queue_data * pWorkQueueData = (struct _work_queue_data *)pVoidCallerData;
	_my_data * pDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_bool bPresent = SG_FALSE;
	SG_UNUSED(pAssocData);

	SG_ERR_CHECK(  _cache__lookup(pCtx, pWorkQueueData->pFrag,szHid,&pDataCached,&bPresent)  );
	if (!bPresent)
	{
		// dagnode is not present in the cache.  therefore, we've never visited this
		// dagnode before.  add it to the cache with proper settings and maybe add
		// all of the parents to the work queue.

		SG_int32 myGeneration;

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pWorkQueueData->pRepo, szHid,&pDagnodeAllocated)  );

		SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeAllocated,&myGeneration)  );

        if ((myGeneration > pWorkQueueData->generationEnd))
        {
            SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
												pWorkQueueData->pFrag,
												myGeneration,
												pDagnodeAllocated,SG_DFS_INTERIOR_MEMBER,
												&pDataCached)  );
            pDagnodeAllocated = NULL;	// cache takes ownership of dagnode
			SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pDataCached->m_pDagnode, pWorkQueueData->prb_WorkQueue)  );
        }
        else
        {
            SG_ERR_CHECK(  _cache__add__fringe(pCtx, pWorkQueueData->pFrag, szHid)  );
            SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
        }
	}
	else
	{
		// dagnode already present in the cache. therefore, we have already visited it
		// before.  we can change our minds about the state of this dagnode if something
		// has changed (such as the fragment bounds being widened).

		switch (pDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pDataCached->m_state,szHid)  );

		case SG_DFS_START_MEMBER:
			// a dagnode has a parent that we are considering a START node.
			// this can happen when we were started from a non-leaf node and
			// then a subsequent call to __load is given a true leaf node or
			// a node deeper in the tree that has our original start node as
			// a parent.
			//
			// clear the start bit.  (we only want true fragment-terminal
			// nodes marked as start nodes.)

			pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
			// FALL-THRU-INTENDED

		case SG_DFS_INTERIOR_MEMBER:
			// a dagnode that we have already visited is being re-visited.
			// this happpens for a number of reasons, such as when we hit
			// the parent of a branch/fork.  we might get visisted because
			// we are a parent of each child.
			//
			// we also get revisited when the caller expands the scope of
			// the fragment.

			if (pWorkQueueData->generationEnd < pDataCached->m_genDagnode)
			{
				// the caller has expanded the scope of the fragment to include
				// older generations than the last time we visited this node.
				// this doesn't affect the state of this node, but it could mean
				// that older ancestors of this node should be looked at.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;

		case SG_DFS_END_FRINGE:
			// a dagnode that was on the end-fringe is being re-evaluated.

			if (pDataCached->m_genDagnode > pWorkQueueData->generationEnd)
			{
				// it looks like the bounds of the fragment were expanded and
				// now includes this dagnode.
				//
				// move it from END-FRINGE to INCLUDE state.
				// and re-eval all of its parents.

				pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;
		}
	}

	// we have completely dealt with this dagnode, so remove it from the work queue
	// and cause our caller to restart the iteration (because we changed the queue).

	SG_ERR_CHECK(  SG_rbtree__remove(pCtx,pWorkQueueData->prb_WorkQueue,szHid)  );
	SG_ERR_THROW(  SG_ERR_RESTART_FOREACH  );

fail:
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
コード例 #4
0
void SG_cmd_util__get_username_and_password(
	SG_context *pCtx,
	const char *szWhoami,
	SG_bool force_whoami,
	SG_bool bHadSavedCredentials,
	SG_uint32 kAttempt,
	SG_string **ppUsername,
	SG_string **ppPassword
	)
{
	SG_string * pUsername = NULL;
	SG_string * pPassword = NULL;

	SG_NULLARGCHECK_RETURN(ppPassword);
	SG_NULLARGCHECK_RETURN(ppUsername);

	if (kAttempt == 0)
	{
		if (bHadSavedCredentials)
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR,
									  "\nAuthorization required.  Saved username/password not valid.\n")  );
		}
		else
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "\nAuthorization required.")  );
			if (SG_password__supported())
				SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, " Use --remember to save this password.")  );
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "\n")  );
		}
	}
	else if (kAttempt >= 3)
	{
		SG_ERR_THROW(  SG_ERR_AUTHORIZATION_TOO_MANY_ATTEMPTS  );
	}
	else
	{
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "\nInvalid username or password. Please try again.\n")  );
	}

	if(szWhoami!=NULL && force_whoami)
	{
		SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pUsername, szWhoami)  );

		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Enter password for %s: ", szWhoami)  );
		SG_ERR_CHECK(  SG_console__get_password(pCtx, &pPassword)  );
	}
	else
	{
		if(szWhoami)
		{
			SG_bool bAllWhitespace = SG_FALSE;
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Enter username [%s]: ", szWhoami)  );
			SG_ERR_CHECK(  SG_console__readline_stdin(pCtx, &pUsername)  );
			SG_ERR_CHECK(  SG_sz__is_all_whitespace(pCtx, SG_string__sz(pUsername), &bAllWhitespace)  );
			if(bAllWhitespace)
				SG_ERR_CHECK(  SG_string__set__sz(pCtx, pUsername, szWhoami)  );
		}
		else
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Enter username: "******"Enter password: ")  );
		SG_ERR_CHECK(  SG_console__get_password(pCtx, &pPassword)  );
	}

	*ppUsername = pUsername;
	*ppPassword = pPassword;

	return;
fail:
	SG_STRING_NULLFREE(pCtx, pUsername);
	SG_STRING_NULLFREE(pCtx, pPassword);
}
コード例 #5
0
void SG_tag__add_tags(SG_context * pCtx, SG_repo * pRepo, SG_pendingtree * pPendingTree, const char* psz_spec_cs, SG_bool bRev, SG_bool bForce, const char** ppszTags, SG_uint32 count_args)
{
	SG_pathname* pPathCwd = NULL;
	char* psz_hid_cs = NULL;
	SG_audit q;
	SG_uint32 i = 0;
	char * psz_current_hid_with_that_tag = NULL;
	SG_bool bFreePendingTree = SG_FALSE;

	SG_ERR_CHECK(  SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS)  );

	// TODO 4/21/10 pendingtree contains a pRepo inside it.  we should
	// TODO 4/21/10 refactor this to alloc the pendingtree first and then
	// TODO 4/21/10 just borrow the pRepo from it.



	if (psz_spec_cs)
	{
		if (bRev)
		{
			SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_spec_cs, &psz_hid_cs)  );
		}
		else
		{
			SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, psz_spec_cs, &psz_hid_cs)  );
			if (psz_hid_cs == NULL)
				SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
		}
	}
	else
	{
		// tag the current baseline.
		//
		// when we have an uncomitted merge, we will have more than one parent.
		// what does this command mean then?  It feels like we we should throw
		// an error and say that you have to commit first.

		const SG_varray * pva_wd_parents;		// we do not own this
		const char * psz_hid_parent_0;			// we do not own this
		SG_uint32 nrParents;

		if (pPendingTree == NULL)
		{

			SG_ERR_CHECK(  SG_pendingtree__alloc_from_cwd(pCtx, SG_TRUE, &pPendingTree)  );
			bFreePendingTree = SG_TRUE;
		}
		SG_ERR_CHECK(  SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pva_wd_parents)  );
		SG_ERR_CHECK(  SG_varray__count(pCtx, pva_wd_parents, &nrParents)  );
		if (nrParents > 1)
			SG_ERR_THROW(  SG_ERR_CANNOT_DO_WHILE_UNCOMMITTED_MERGE  );

		SG_ERR_CHECK(  SG_varray__get__sz(pCtx, pva_wd_parents, 0, &psz_hid_parent_0)  );
		SG_ERR_CHECK(  SG_strdup(pCtx, psz_hid_parent_0, &psz_hid_cs)  );
	}

	if (!bForce)
	{
		//Go through and check all tags to make sure that they are not already applied.
		for (i = 0; i < count_args; i++)
		{
			const char * pszTag = ppszTags[i];
			SG_ERR_IGNORE(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag)  );
			if (psz_current_hid_with_that_tag != NULL && 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) //The tag has been applied, but not to the given changeset.
				SG_ERR_THROW(SG_ERR_TAG_ALREADY_EXISTS);
			SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
		}
	}
	for (i = 0; i < count_args; i++)
	{
		const char * pszTag = ppszTags[i];
		SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag)  );
		if (psz_current_hid_with_that_tag == NULL || 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs))
		{
			//The tag has not been applied, or it's been applied to a different dagnode.
			if ( psz_current_hid_with_that_tag != NULL && bForce)  //Remove it, if it's already there
					SG_ERR_CHECK(  SG_vc_tags__remove(pCtx, pRepo, &q, 1, &pszTag)  );
			SG_ERR_CHECK(  SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, pszTag, &q)  );
		}
		SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
	}

fail:
	SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
	if (bFreePendingTree == SG_TRUE)
		SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	SG_NULLFREE(pCtx, psz_hid_cs);
	SG_PATHNAME_NULLFREE(pCtx, pPathCwd);
}
コード例 #6
0
void SG_unzip__locate_file(SG_context* pCtx, SG_unzip* s, const char* psz_filename, SG_bool* pb, SG_uint64* piLength)
{
    SG_bool b = SG_FALSE;

    /* We remember the 'current' position in the file so that we can jump
     * back there if we fail.
     */
    unz_file_info cur_file_infoSaved;
    unz_file_info_internal cur_file_info_internalSaved;
    SG_uint32 num_fileSaved;
    SG_uint32 pos_in_central_dirSaved;

    SG_NULLARGCHECK_RETURN( s );

	SG_ARGCHECK_RETURN((strlen(psz_filename) < UNZ_MAXFILENAMEINZIP), psz_filename);

    /* TODO hmmm.  why do we require the current file state to be "ok" here ? */
    if (!s->current_file_ok)
    {
        *pb = SG_FALSE;
        return;
    }

    /* Save the current state */
    num_fileSaved = s->num_file;
    pos_in_central_dirSaved = s->pos_in_central_dir;
    cur_file_infoSaved = s->cur_file_info;
    cur_file_info_internalSaved = s->cur_file_info_internal;

    SG_ERR_CHECK(  SG_unzip__goto_first_file(pCtx, s, &b, NULL, NULL)  );

    while (b)
    {
        if (strcmp(s->cur_file_name, psz_filename) == 0)
        {
            break;
        }

        SG_ERR_CHECK(  SG_unzip__goto_next_file(pCtx, s, &b, NULL, NULL)  );
    }

    if (b)
    {
        if (pb)
        {
            *pb = SG_TRUE;
        }
        if (piLength)
        {
            *piLength = s->cur_file_info.uncompressed_size;
        }
    }
    else
    {
        if (pb)
        {
            *pb = SG_FALSE;
            goto fail;
        }
        else
        {
            SG_ERR_THROW(  SG_ERR_NOT_FOUND  );
        }
    }

    return;

fail:
    /* We failed, so restore the state of the 'current file' to where we
     * were.
     */
    s->num_file = num_fileSaved ;
    s->pos_in_central_dir = pos_in_central_dirSaved ;
    s->cur_file_info = cur_file_infoSaved;
    s->cur_file_info_internal = cur_file_info_internalSaved;

}
コード例 #7
0
void SG_unzip__currentfile__open(SG_context* pCtx, SG_unzip* s)
{
    int zerr=Z_OK;
    SG_uint32 iSizeVar;
    file_in_zip_read_info_s* pfile_in_zip_read_info = NULL;
    SG_uint32 offset_local_extrafield;  /* offset of the local extra field */
    SG_uint32  size_local_extrafield;    /* size of the local extra field */

    SG_NULLARGCHECK_RETURN( s );
	SG_NULLARGCHECK_RETURN(s->current_file_ok);

    if (s->pfile_in_zip_read)
    {
        SG_unzip__currentfile__close(pCtx, s);
    }

    SG_ERR_CHECK(  sg_unzip__check_coherency(pCtx, s,&iSizeVar, &offset_local_extrafield,&size_local_extrafield)  );

	SG_ERR_CHECK(  SG_malloc(pCtx, sizeof(file_in_zip_read_info_s), &pfile_in_zip_read_info)  );

	SG_ERR_CHECK(  SG_malloc(pCtx, UNZ_BUFSIZE, &pfile_in_zip_read_info->read_buffer)  );
    pfile_in_zip_read_info->offset_local_extrafield = offset_local_extrafield;
    pfile_in_zip_read_info->size_local_extrafield = size_local_extrafield;
    pfile_in_zip_read_info->pos_local_extrafield=0;

    pfile_in_zip_read_info->stream_initialised=0;

    if ((s->cur_file_info.compression_method!=0) &&
        (s->cur_file_info.compression_method!=Z_DEFLATED))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    pfile_in_zip_read_info->crc32_wait = s->cur_file_info.crc;
    pfile_in_zip_read_info->crc32=0;
    pfile_in_zip_read_info->compression_method = s->cur_file_info.compression_method;
    pfile_in_zip_read_info->pFile = s->pFile;
    pfile_in_zip_read_info->byte_before_the_zipfile=s->byte_before_the_zipfile;

    pfile_in_zip_read_info->stream.total_out = 0;

    if (s->cur_file_info.compression_method==Z_DEFLATED)
    {
      pfile_in_zip_read_info->stream.zalloc = (alloc_func)0;
      pfile_in_zip_read_info->stream.zfree = (free_func)0;
      pfile_in_zip_read_info->stream.opaque = (voidpf)0;
      pfile_in_zip_read_info->stream.next_in = (voidpf)0;
      pfile_in_zip_read_info->stream.avail_in = 0;

      zerr=inflateInit2(&pfile_in_zip_read_info->stream, -MAX_WBITS);
      if (zerr == Z_OK)
      {
          pfile_in_zip_read_info->stream_initialised=1;
      }
      else
      {
          SG_ERR_THROW(  SG_ERR_ZLIB(zerr)  );
      }

        /* windowBits is passed < 0 to tell that there is no zlib header.
         * Note that in this case inflate *requires* an extra "dummy" byte
         * after the compressed stream in order to complete decompression and
         * return Z_STREAM_END.
         * In unzip, i don't wait absolutely Z_STREAM_END because I known the
         * size of both compressed and uncompressed data
         */
    }
    pfile_in_zip_read_info->rest_read_compressed =
            s->cur_file_info.compressed_size ;
    pfile_in_zip_read_info->rest_read_uncompressed =
            s->cur_file_info.uncompressed_size ;


    pfile_in_zip_read_info->pos_in_zipfile = s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + iSizeVar;

    pfile_in_zip_read_info->stream.avail_in = (SG_uint32)0;

    s->pfile_in_zip_read = pfile_in_zip_read_info;

    return;

fail:
    SG_NULLFREE(pCtx, pfile_in_zip_read_info);
}
コード例 #8
0
void SG_repo__dag__find_direct_path_from_root(
        SG_context * pCtx,
        SG_repo* pRepo,
        SG_uint64 dagnum,
        const char* psz_csid,
        SG_varray** ppva
        )
{
    SG_varray* new_pva = NULL;
#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_varray* old_pva = NULL;
    SG_dagnode* pdn = NULL;
    char* psz_cur = NULL;
    SG_string* pstr1 = NULL;
    SG_string* pstr2 = NULL;
#endif

    SG_ERR_CHECK(  SG_repo__find_dag_path(pCtx, pRepo, dagnum, NULL, psz_csid, &new_pva)  );

#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_ERR_CHECK(  SG_VARRAY__ALLOC(pCtx, &old_pva)  );
    SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_csid, &psz_cur)  );
    while (1)
    {
        SG_uint32 count_parents = 0;
        const char** a_parents = NULL;

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_cur, &pdn)  );
        SG_ERR_CHECK(  SG_varray__append__string__sz(pCtx, old_pva, psz_cur)  );
        SG_ERR_CHECK(  SG_dagnode__get_parents__ref(pCtx, pdn, &count_parents, &a_parents)  );
        if (0 == count_parents)
        {
            break;
        }
        SG_NULLFREE(pCtx, psz_cur);
        SG_ERR_CHECK(  SG_STRDUP(pCtx, a_parents[0], &psz_cur)  );
        SG_DAGNODE_NULLFREE(pCtx, pdn);
    }
    SG_ERR_CHECK(  SG_varray__append__string__sz(pCtx, old_pva, "")  );

    SG_ERR_CHECK(  SG_string__alloc(pCtx, &pstr1)  );
    SG_ERR_CHECK(  SG_string__alloc(pCtx, &pstr2)  );
    SG_ERR_CHECK(  SG_varray__to_json(pCtx, old_pva, pstr1)  );
    SG_ERR_CHECK(  SG_varray__to_json(pCtx, new_pva, pstr2)  );
    if (0 != strcmp(SG_string__sz(pstr1), SG_string__sz(pstr2)))
    {
        // a failure here isn't actually ALWAYS bad.  there can be more than one path
        // to root.

        fprintf(stderr, "old way:\n");
        SG_VARRAY_STDERR(old_pva);
        fprintf(stderr, "new way:\n");
        SG_VARRAY_STDERR(new_pva);

        SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
    }
#endif

    *ppva = new_pva;
    new_pva = NULL;

fail:
    SG_VARRAY_NULLFREE(pCtx, new_pva);
#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_STRING_NULLFREE(pCtx, pstr1);
    SG_STRING_NULLFREE(pCtx, pstr2);
    SG_VARRAY_NULLFREE(pCtx, old_pva);
    SG_DAGNODE_NULLFREE(pCtx, pdn);
    SG_NULLFREE(pCtx, psz_cur);
#endif
}
static void _merge__compute_target_hid(SG_context * pCtx,
									   SG_mrg * pMrg)
{
	const SG_rev_spec * pRevSpec = ((pMrg->pMergeArgs) ? pMrg->pMergeArgs->pRevSpec : NULL);
	SG_stringarray * psaHids = NULL;
	SG_stringarray * psaMissingHids = NULL;
	SG_rev_spec * pRevSpec_Allocated = NULL;
	SG_bool bRequestedAttachedBranch = SG_FALSE;
	SG_stringarray * psaBranchesRequested = NULL;
	const char * pszBranchNameRequested = NULL;
	SG_uint32 nrMatched = 0;
	SG_uint32 nrMatchedExcludingParent = 0;

	if (pRevSpec)
	{
		SG_uint32 uTotal = 0u;
		SG_uint32 uBranches = 0u;

		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &uTotal)  );
		SG_ERR_CHECK(  SG_rev_spec__count_branches(pCtx, pRevSpec, &uBranches)  );
		if (uTotal == 0u)
		{
			// if the rev spec is empty, just pretend it doesn't exist
			pRevSpec = NULL;
		}
		else if (uTotal > 1u)
		{
			// we can only handle a single specification
			SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "Merge can accept at most one revision/tag/branch specifier."));
		}
		else if (uTotal == 1u && uBranches == 1u)
		{
			SG_ERR_CHECK(  SG_rev_spec__branches(pCtx, (/*const*/ SG_rev_spec *)pRevSpec, &psaBranchesRequested)  );
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaBranchesRequested, 0, &pszBranchNameRequested)  );

			if (pMrg->pszBranchName_Starting)
				bRequestedAttachedBranch = (strcmp(pszBranchNameRequested, pMrg->pszBranchName_Starting) == 0);
		}
	}

	if (!pRevSpec)
	{
        if (!pMrg->pszBranchName_Starting)
            SG_ERR_THROW(  SG_ERR_NOT_TIED  );

		SG_ERR_CHECK(  SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated)  );
		SG_ERR_CHECK(  SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pMrg->pszBranchName_Starting)  );
		pRevSpec = pRevSpec_Allocated;
		pszBranchNameRequested = pMrg->pszBranchName_Starting;
		bRequestedAttachedBranch = SG_TRUE;
	}

	// Lookup the given (or synthesized) --rev/--tag/--branch
	// and see how many csets it refers to.  Disregard/filter-out
	// any that are not present in the local repo.

	SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pMrg->pWcTx->pDb->pRepo, pRevSpec, SG_TRUE,
											  &psaHids, &psaMissingHids)  );
	SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatched)  );
	if (nrMatched == 0)
	{
		SG_uint32 nrMissing = 0;
		SG_ASSERT_RELEASE_FAIL(  (psaMissingHids != NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaMissingHids, &nrMissing)  );
		if (nrMissing == 1)
		{
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaMissingHids, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to changeset '%s'. Consider pulling.",
							 pszBranchNameRequested, psz_0)  );
		}
		else
		{
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to %d changesets that are not present. Consider pulling.",
							 pszBranchNameRequested, nrMissing)  );
		}
	}
	else if (nrMatched == 1)
	{
		// We found a single unique match for our request.
		// We ***DO NOT*** disqualify the current baseline
		// in this case.  We let routines like do_cmd_merge_preview()
		// report that.

		const char * psz_0;
		SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
	}
	else
	{
		// We can only get here if pRevSpec contained a "--branch ..."
		// reference (because the "--rev" lookup throws when given a
		// non-unique prefix and "--tag" can only be bound to a single
		// cset).
		//
		// If they referenced the attached branch (and the baseline is
		// pointing at a head), we'll get our baseline in the result set,
		// so get rid of it.
		SG_ERR_CHECK(  SG_stringarray__remove_all(pCtx, psaHids, pMrg->pszHid_StartingBaseline, NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatchedExcludingParent)  );

		if (nrMatchedExcludingParent == 1)
		{
			// parent may or may not be a head of this branch, but
			// we found a single head or single other head.
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
		}
		else if (nrMatchedExcludingParent < nrMatched)
		{
			// There were at least 3 heads of this branch and the baseline
			// is one of them.  Throwing a generic 'needs merge' message is
			// not helpful.
			SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
							(pCtx, "Branch '%s' has %d heads (excluding the baseline). Consider merging one of the other heads using --rev/--tag.",
							 pszBranchNameRequested, nrMatchedExcludingParent)  );
		}
		else //if (nrMatchedExcludingParent == nrMatched)
		{
			// The requested branch has multiple heads and the current
			// baseline is NOT one of them.  The current baseline MAY OR MAY NOT
			// be in that branch.  (And independently, we may or may not be
			// attached to that branch.)
			//
			// See how the heads are related to the current baseline.
			const char * pszDescendant0 = NULL;
			const char * pszAncestor0 = NULL;
			SG_uint32 nrDescendants = 0;
			SG_uint32 nrAncestors = 0;
			SG_uint32 k;
			for (k=0; k<nrMatched; k++)
			{
				const char * psz_k;
				SG_dagquery_relationship dqRel;
				SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, k, &psz_k)  );
				SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pMrg->pWcTx->pDb->pRepo,
																	 SG_DAGNUM__VERSION_CONTROL,
																	 psz_k, pMrg->pszHid_StartingBaseline,
																	 SG_FALSE, SG_FALSE, &dqRel)  );
				if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
				{
					pszDescendant0 = psz_k;
					nrDescendants++; // target[k] is descendant of baseline
				}
				else if (dqRel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR)
				{
					pszAncestor0 = psz_k;
					nrAncestors++;	// target[k] is ancestor of baseline
				}
			}
			SG_ASSERT(  ((nrDescendants == 0) || (nrAncestors == 0))  );
			if (nrDescendants == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszDescendant0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
			}
			else if (nrDescendants > 1)					// nrDescendants may or may not be equal to nrMatched since there may be peers too.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them and then merging the branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, nrDescendants, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
			}
			else if (nrAncestors == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszAncestor0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
			}
			else if (nrAncestors > 1)					// nrAncestors may or may not be equal to nrMatched since there may be peers too.
			{
				SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
								(pCtx, "Branch '%s' has %d heads. All of them are ancestors of the current baseline. Consider moving one of the heads forward and removing the others.",
								 pszBranchNameRequested, nrMatched)  );
			}
			else										// All of the heads are peers of the current baseline.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag.",
									 pszBranchNameRequested, nrMatched)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched)  );
			}
		}
	}

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, psaBranchesRequested);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
	SG_STRINGARRAY_NULLFREE(pCtx, psaMissingHids);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated);
}
コード例 #10
0
void SG_password__get(
	SG_context *pCtx,
	const char *szRepoSpec,
	const char *username,
	SG_string **ppstrPassword)
{
	SG_string *password = NULL;
	SG_string *path = NULL;
	SG_string *server = NULL;
	SG_string *proto = NULL;
	SG_uint32 port;
	SG_bool isValid = SG_FALSE;
	GnomeKeyringResult saveRes = 0;
	GList *results = NULL;
	guint count = 0;

	SG_NULLARGCHECK(username);
	SG_NULLARGCHECK(ppstrPassword);
	SG_NULLARGCHECK(szRepoSpec);

	if (! SG_password__supported())
		goto fail;

	SG_ERR_CHECK(  _sg_password__parse_url(pCtx, szRepoSpec,
		&isValid, &proto, &server, &path, &port)  );

	if (! isValid)
		SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED);

	saveRes = gnome_keyring_find_network_password_sync(
		username, NULL,
		SG_string__sz(server), SG_string__sz(path),
		SG_string__sz(proto), NULL, (guint32)port,
		&results);

	if ((saveRes != GNOME_KEYRING_RESULT_OK) && (saveRes != GNOME_KEYRING_RESULT_NO_MATCH) && (saveRes != GNOME_KEYRING_RESULT_CANCELLED))
		_SG_THROW_LINUX_SEC_ERROR(saveRes);

	if (results != NULL)
		count = g_list_length(results);

	if (count > 0)
	{
		const char *pw = "";
		GnomeKeyringNetworkPasswordData *entry = g_list_nth_data(results, 0);

		SG_ASSERT(entry != NULL);

		if (entry->password)
			pw = entry->password;

		SG_ERR_CHECK(  SG_string__alloc__sz(pCtx, &password, pw)  );
	}

	*ppstrPassword = password;
	password = NULL;

fail:
	SG_STRING_NULLFREE(pCtx, path);
	SG_STRING_NULLFREE(pCtx, server);
	SG_STRING_NULLFREE(pCtx, proto);
	SG_STRING_NULLFREE(pCtx, password);
	if (results)
		gnome_keyring_network_password_list_free(results);
}
コード例 #11
0
static void SG_db__make_delta_from_path(
    SG_context* pCtx,
    SG_repo* pRepo,
    SG_uint64 dagnum,
    SG_varray* pva_path,
    SG_uint32 flags,
    SG_vhash* pvh_add,
    SG_vhash* pvh_remove
    )
{
#if SG_DOUBLE_CHECK__CALC_DELTA
    SG_int64 t1 = -1;
    SG_int64 t2 = -1;
    SG_vhash* new_pvh_add = NULL;
    SG_vhash* new_pvh_remove = NULL;
    SG_vhash* old_pvh_add = NULL;
    SG_vhash* old_pvh_remove = NULL;
    SG_string* old_pstr = NULL;
    SG_string* new_pstr = NULL;

    SG_ERR_CHECK(  SG_vhash__alloc__copy(pCtx, &new_pvh_add, pvh_add)  );
    SG_ERR_CHECK(  SG_vhash__alloc__copy(pCtx, &new_pvh_remove, pvh_remove)  );
    SG_ERR_CHECK(  SG_vhash__alloc__copy(pCtx, &old_pvh_add, pvh_add)  );
    SG_ERR_CHECK(  SG_vhash__alloc__copy(pCtx, &old_pvh_remove, pvh_remove)  );

    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t1)  );
    SG_ERR_CHECK(  old_SG_db__make_delta_from_path(
                pCtx,
                pRepo,
                dagnum,
                pva_path,
                old_pvh_add,
                old_pvh_remove
                )  );
    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t2)  );
    {
        SG_uint32 path_length = 0;
        SG_ERR_CHECK(  SG_varray__count(pCtx, pva_path, &path_length)  );
        fprintf(stderr, "make_delta_from_path (%d)\n", path_length);
    }
    fprintf(stderr, "  time old %d ms\n", (int) (t2 - t1));

    SG_ERR_CHECK(  SG_vhash__sort(pCtx, old_pvh_add, SG_FALSE, SG_vhash_sort_callback__increasing)  );
    SG_ERR_CHECK(  SG_vhash__sort(pCtx, old_pvh_remove, SG_FALSE, SG_vhash_sort_callback__increasing)  );

    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t1)  );
    SG_ERR_CHECK(  SG_repo__dbndx__make_delta_from_path(
                pCtx,
                pRepo,
                dagnum,
                pva_path,
                0,
                new_pvh_add,
                new_pvh_remove
                )  );
    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t2)  );
    fprintf(stderr, "  time new %d ms\n", (int) (t2 - t1));
    SG_ERR_CHECK(  SG_vhash__sort(pCtx, new_pvh_add, SG_FALSE, SG_vhash_sort_callback__increasing)  );
    SG_ERR_CHECK(  SG_vhash__sort(pCtx, new_pvh_remove, SG_FALSE, SG_vhash_sort_callback__increasing)  );

    SG_ERR_CHECK(  SG_string__alloc(pCtx, &old_pstr)  );
    SG_ERR_CHECK(  SG_vhash__to_json(pCtx, old_pvh_add, old_pstr)  );
    SG_ERR_CHECK(  SG_string__alloc(pCtx, &new_pstr)  );
    SG_ERR_CHECK(  SG_vhash__to_json(pCtx, new_pvh_add, new_pstr)  );

    if (0 != strcmp(SG_string__sz(old_pstr), SG_string__sz(new_pstr)))
    {
        fprintf(stderr, "oldway:\n");
        SG_VHASH_STDERR(old_pvh_add);
        fprintf(stderr, "new:\n");
        SG_VHASH_STDERR(new_pvh_add);

        SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
    }

    SG_STRING_NULLFREE(pCtx, old_pstr);
    SG_STRING_NULLFREE(pCtx, new_pstr);

    SG_ERR_CHECK(  SG_string__alloc(pCtx, &old_pstr)  );
    SG_ERR_CHECK(  SG_vhash__to_json(pCtx, old_pvh_remove, old_pstr)  );
    SG_ERR_CHECK(  SG_string__alloc(pCtx, &new_pstr)  );
    SG_ERR_CHECK(  SG_vhash__to_json(pCtx, new_pvh_remove, new_pstr)  );

    if (0 != strcmp(SG_string__sz(old_pstr), SG_string__sz(new_pstr)))
    {
        fprintf(stderr, "oldway:\n");
        SG_VHASH_STDERR(old_pvh_remove);
        fprintf(stderr, "new:\n");
        SG_VHASH_STDERR(new_pvh_remove);

        SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
    }
#endif

#if SG_DOUBLE_CHECK__CALC_DELTA
    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t1)  );
#endif
    SG_ERR_CHECK(  SG_repo__dbndx__make_delta_from_path(
                pCtx,
                pRepo,
                dagnum,
                pva_path,
                flags,
                pvh_add,
                pvh_remove
                )  );
#if SG_DOUBLE_CHECK__CALC_DELTA
    SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &t2)  );
    fprintf(stderr, "  time NEW %d ms\n", (int) (t2 - t1));
#endif

fail:
#if SG_DOUBLE_CHECK__CALC_DELTA
    SG_STRING_NULLFREE(pCtx, old_pstr);
    SG_STRING_NULLFREE(pCtx, new_pstr);

    SG_VHASH_NULLFREE(pCtx, old_pvh_add);
    SG_VHASH_NULLFREE(pCtx, old_pvh_remove);
    SG_VHASH_NULLFREE(pCtx, new_pvh_add);
    SG_VHASH_NULLFREE(pCtx, new_pvh_remove);
#endif
    ;
}
コード例 #12
0
void SG_password__set(
	SG_context *pCtx,
	const char *szRepoSpec,
	SG_string *pUserName,
	SG_string *pPassword)
{
	const char *username, *password;
	SG_string *path = NULL;
	SG_string *server = NULL;
	SecProtocolType proto;
	SG_uint32 port;
	SG_bool isValid = SG_FALSE;

	OSStatus saveRes, findRes;
	SecKeychainItemRef item = NULL;

	SG_NULLARGCHECK(pUserName);
	SG_NULLARGCHECK(pPassword);
	SG_NULLARGCHECK(szRepoSpec);

	username = SG_string__sz(pUserName);
	password = SG_string__sz(pPassword);

	SG_ERR_CHECK(  _sg_password__parse_url(pCtx, szRepoSpec,
		&isValid, &proto, &server, &path, &port)  );

	if (! isValid)
		SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED);

	findRes = SecKeychainFindInternetPassword(
		NULL,
		SG_STRLEN( SG_string__sz(server) ), SG_string__sz(server),
		0, NULL,
		SG_STRLEN(username), username,
		SG_STRLEN( SG_string__sz(path) ), SG_string__sz(path),
		port, proto, kSecAuthenticationTypeDefault,
		NULL, NULL,
		&item);

	if (findRes == errSecSuccess)
	{
		saveRes = SecKeychainItemModifyAttributesAndData(item, NULL, SG_STRLEN(password), password);
	}
	else
	{
		saveRes = SecKeychainAddInternetPassword(
			NULL,
			SG_STRLEN( SG_string__sz(server) ), SG_string__sz(server),
			0, NULL,
			SG_STRLEN(username), username,
			SG_STRLEN( SG_string__sz(path) ), SG_string__sz(path),
			port, proto, kSecAuthenticationTypeDefault,
			SG_STRLEN(password), password,
			NULL);
	}

	if (saveRes != errSecSuccess)
		_SG_THROW_MAC_SEC_ERROR(saveRes);

fail:
	if (item)
		CFRelease(item);

	SG_STRING_NULLFREE(pCtx, path);
	SG_STRING_NULLFREE(pCtx, server);
}
コード例 #13
0
void SG_password__get(
	SG_context *pCtx,
	const char *szRepoSpec,
	const char *szUsername,
	SG_string **ppstrPassword)
{
	SG_string* pstrTarget = NULL;
	SG_string* pstrPassword = NULL;
	LPWSTR pwszTarget = NULL;
	SG_byte* pbPassword = NULL;
	PCREDENTIAL pCred = NULL;
	BOOL result = FALSE;

	SG_NULLARGCHECK_RETURN(szRepoSpec);
	SG_NULLARGCHECK_RETURN(szUsername);
	SG_NULLARGCHECK_RETURN(ppstrPassword);

	_get_key(pCtx, szRepoSpec, szUsername, &pstrTarget);
	if (SG_CONTEXT__HAS_ERR(pCtx))
	{
		SG_error err, err2;
		err2 = SG_context__get_err(pCtx, &err);
		if (SG_IS_ERROR(err2))
		{
			SG_ERR_DISCARD;
			SG_ERR_THROW(err2);
		}

		if (err & __SG_ERR__GETLASTERROR__)
			SG_ERR_DISCARD;
		else
			SG_ERR_RETHROW;
	}

	if (pstrTarget)
	{
		SG_ERR_CHECK(  SG_utf8__extern_to_os_buffer__wchar(pCtx, SG_string__sz(pstrTarget), &pwszTarget, NULL)  );

		result = CredReadW(pwszTarget, CRED_TYPE_GENERIC, 0, &pCred);
		if (!result)
		{
			DWORD err = GetLastError();
			if (err != ERROR_NOT_FOUND && err != ERROR_NO_SUCH_LOGON_SESSION)
				SG_ERR_THROW2( SG_ERR_GETLASTERROR(GetLastError()), (pCtx, "%s", "unable to retrieve saved credentials") );
		}
		else
		{
			SG_uint32 size = pCred->CredentialBlobSize+sizeof(wchar_t);
			SG_ERR_CHECK(  SG_allocN(pCtx, pCred->CredentialBlobSize+sizeof(wchar_t), pbPassword)  );
			memcpy(pbPassword, pCred->CredentialBlob, size);
			SG_ERR_CHECK(  SG_string__alloc(pCtx, &pstrPassword)  );
			SG_ERR_CHECK(  SG_utf8__intern_from_os_buffer__wchar(pCtx, pstrPassword, (const LPWSTR)pbPassword)  );

			*ppstrPassword = pstrPassword;
			pstrPassword = NULL;
		}
	}

	/* fall through */
fail:
	SG_STRING_NULLFREE(pCtx, pstrTarget);
	SG_STRING_NULLFREE(pCtx, pstrPassword);
	SG_NULLFREE(pCtx, pwszTarget);
	SG_NULLFREE(pCtx, pbPassword);
	if (pCred)
		CredFree(pCred);
}
コード例 #14
0
ファイル: sg_dagfrag.c プロジェクト: avar/veracity
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
コード例 #15
0
void SG_time__parse(SG_context* pCtx,const char* pszInputString, SG_int64 * pTime, SG_bool bParseToMaximumValidValue)
{
	SG_string * pWorkingStr = NULL;
	char** splitOnSpaces = NULL;
	char** splitOnColons = NULL;
	char** splitOnDashes = NULL;
	SG_uint32 nCountAfterSplitOnSpaces = 0;
	SG_uint32 nCountAfterSplitOnDashes = 0;
	SG_uint32 nCountAfterSplitOnColons = 0;
	SG_string * pWorkingStringDate = NULL;
	SG_string * pWorkingStringTime = NULL;
	time_t resultTicks = 0;
	struct tm result;
	SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pWorkingStr, pszInputString)  );
	//This understands exactly two formats
	//YYYY-MM-DD
	//
	//and
	//YYYY-MM-DD hh:mm:ss
	//The local time zone is always assumed.
	SG_ERR_CHECK(  SG_string__split__asciichar(pCtx, pWorkingStr, ' ', 2, &splitOnSpaces, &nCountAfterSplitOnSpaces) );
	if (nCountAfterSplitOnSpaces == 1)
	{
		//YYYY-M-DD

		SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pWorkingStringDate, splitOnSpaces[0])  );
		SG_ERR_CHECK(  SG_string__split__asciichar(pCtx, pWorkingStringDate, '-', 3, &splitOnDashes, &nCountAfterSplitOnDashes) );

		if(nCountAfterSplitOnDashes == 3)
		{
			SG_uint32 year = 0;
			SG_uint32 month = 0;
			SG_uint32 dayOfMonth = 0;

			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &year, splitOnDashes[0])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &month, splitOnDashes[1])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &dayOfMonth, splitOnDashes[2])  );
			if (month<= 0 || month > 12)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			if (dayOfMonth<= 0 || dayOfMonth > 31)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			result.tm_year = year - 1900;
			result.tm_mon = month - 1;
			result.tm_mday = dayOfMonth;
			if (bParseToMaximumValidValue == SG_TRUE)
			{
				result.tm_hour = 23;
				result.tm_min = 59;
				result.tm_sec = 59;
			}
			else
			{
				result.tm_hour = 0;
				result.tm_min = 0;
				result.tm_sec = 0;
			}
		}
		else
			SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
	}
	else if (nCountAfterSplitOnSpaces == 2)
	{
		//YYYY-MM-DD hh:mm:ss
		//YYYY-M-DD

		SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pWorkingStringDate, splitOnSpaces[0])  );
		SG_ERR_CHECK(  SG_string__split__asciichar(pCtx, pWorkingStringDate, '-', 3, &splitOnDashes, &nCountAfterSplitOnDashes) );

		if(nCountAfterSplitOnDashes == 3)
		{
			SG_uint32 year = 0;
			SG_uint32 month = 0;
			SG_uint32 dayOfMonth = 0;

			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &year, splitOnDashes[0])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &month, splitOnDashes[1])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &dayOfMonth, splitOnDashes[2])  );
			if (month<= 0 || month > 12)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			if (dayOfMonth<= 0 || dayOfMonth > 31)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			result.tm_year = year - 1900;
			result.tm_mon = month - 1;
			result.tm_mday = dayOfMonth;

		}
		else
			SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
		SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pWorkingStringTime, splitOnSpaces[1])  );
		SG_ERR_CHECK(  SG_string__split__asciichar(pCtx, pWorkingStringTime, ':', 3, &splitOnColons, &nCountAfterSplitOnColons) );

		if(nCountAfterSplitOnColons == 3)
		{
			SG_uint32 hour = 0;
			SG_uint32 minute = 0;
			SG_uint32 second = 0;

			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &hour, splitOnColons[0])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &minute, splitOnColons[1])  );
			SG_ERR_CHECK(  SG_uint32__parse(pCtx, &second, splitOnColons[2])  );
			if (hour > 23)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			if (minute > 59)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			if (second > 59)
				SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
			result.tm_hour = hour;
			result.tm_min = minute;
			result.tm_sec = second;
		}
		else
			SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
	}
	else
	{
		//Invalid date.
		SG_ERR_THROW(SG_ERR_DATE_PARSING_ERROR);
	}


	result.tm_isdst = -1;
	resultTicks = mktime(&result);
	if (resultTicks >= 0)
	{
		*pTime = resultTicks;
		*pTime = *pTime * MILLISECONDS_PER_SECOND;
		if (bParseToMaximumValidValue == SG_TRUE)
		{
			*pTime += 999;
		}
	}

	SG_STRING_NULLFREE(pCtx, pWorkingStringTime);
	SG_STRING_NULLFREE(pCtx, pWorkingStringDate);
	if (splitOnColons != NULL)
		SG_ERR_CHECK(  SG_freeStringList(pCtx, (const char ***)&splitOnColons, nCountAfterSplitOnColons)  );
	if (splitOnSpaces != NULL)
		SG_ERR_CHECK(  SG_freeStringList(pCtx, (const char ***)&splitOnSpaces, nCountAfterSplitOnSpaces)  );
	if (splitOnDashes != NULL)
		SG_ERR_CHECK(  SG_freeStringList(pCtx, (const char ***)&splitOnDashes, nCountAfterSplitOnDashes)  );
	SG_STRING_NULLFREE(pCtx, pWorkingStr);
	return;
fail:
	SG_STRING_NULLFREE(pCtx, pWorkingStringTime);
	SG_STRING_NULLFREE(pCtx, pWorkingStringDate);

	if (splitOnColons != NULL && nCountAfterSplitOnColons > 0)
		SG_ERR_IGNORE(  SG_freeStringList(pCtx, (const char ***)&splitOnColons, nCountAfterSplitOnColons)  );
	if (splitOnSpaces != NULL && nCountAfterSplitOnSpaces > 0)
		SG_ERR_IGNORE(  SG_freeStringList(pCtx, (const char ***)&splitOnSpaces, nCountAfterSplitOnSpaces)  );
	if (splitOnDashes != NULL)
		SG_ERR_IGNORE(  SG_freeStringList(pCtx, (const char ***)&splitOnDashes, nCountAfterSplitOnDashes)  );
	SG_STRING_NULLFREE(pCtx, pWorkingStr);
	return;
	}
コード例 #16
0
void do_cmd_merge_preview(SG_context * pCtx, SG_option_state * pOptSt)
{
	SG_repo * pRepo = NULL;
	
	SG_uint32 countRevSpecs = 0;
	SG_stringarray * psaRevSpecs = NULL;
	const char * const * ppszRevSpecs = NULL;
	
	SG_stringarray * psaNewChangesets = NULL;
	const char * const * ppszNewChangesets = NULL;
	SG_uint32 countNewChangesets = 0;
	
	char * pszHidBaseline = NULL;
	char * pszHidMergeTarget = NULL;
	SG_dagquery_relationship relationship;
	
	SG_vhash * pvhPileOfCleanBranches = NULL;
	SG_uint32 i = 0;
	
	countRevSpecs = 0;
	if (pOptSt->pRevSpec)
	{
		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pOptSt->pRevSpec, &countRevSpecs)  );
		if(countRevSpecs>2)
			SG_ERR_THROW(SG_ERR_USAGE);
	}
	
	if(pOptSt->psz_repo!=NULL)
	{
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pOptSt->psz_repo, &pRepo)  );
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_ERR_THROW2(SG_ERR_USAGE, (pCtx, "When using the --repo option, you must provide both the BASELINE-REVSPEC and the OTHER-REVSPEC."));
		}
	}
	else
	{
		SG_ERR_CHECK(  SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL)  );
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_uint32 countBaselines = 0;
			SG_ERR_CHECK(  SG_wc__get_wc_parents__stringarray(pCtx, NULL, &psaRevSpecs)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaRevSpecs, &ppszRevSpecs, &countBaselines)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			if(countBaselines==2)
			{
				SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			}
			else
			{
				SG_wc_merge_args merge_args;
				merge_args.pRevSpec = pOptSt->pRevSpec;
				merge_args.bNoAutoMergeFiles = SG_TRUE;	// doesn't matter
				merge_args.bComplainIfBaselineNotLeaf = SG_FALSE;	// doesn't matter
				SG_ERR_CHECK(  SG_wc__merge__compute_preview_target(pCtx, NULL, &merge_args, &pszHidMergeTarget)  );
			}
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
	}
	
	SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL,
		pszHidMergeTarget, pszHidBaseline,
		SG_FALSE, SG_FALSE,
		&relationship)  );
	if(relationship==SG_DAGQUERY_RELATIONSHIP__ANCESTOR || relationship==SG_DAGQUERY_RELATIONSHIP__SAME)
	{
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "The baseline already includes the merge target. No merge is needed.\n")  );
	}
	else
	{
		SG_ERR_CHECK(  SG_dagquery__find_new_since_common(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszHidBaseline, pszHidMergeTarget, &psaNewChangesets)  );
		SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaNewChangesets, &ppszNewChangesets, &countNewChangesets)  );
		
		SG_ERR_CHECK(  SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches)  );
		for(i=0; i<countNewChangesets; ++i)
		{
			SG_ERR_CHECK(  SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, ppszNewChangesets[i], pvhPileOfCleanBranches, SG_TRUE, SG_FALSE)  );
		}
		
		if(relationship==SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nFast-Forward Merge to '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
		else
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nMerge with '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
	}

	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_REPO_NULLFREE(pCtx, pRepo);

	return;
fail:
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
}
コード例 #17
0
void SG_unzip__open(SG_context* pCtx, const SG_pathname* pPath, SG_unzip** ppResult)
{
    SG_unzip us;
    SG_unzip *s;
    SG_uint64 central_pos = 0;
    SG_uint32 uL;

    SG_uint16 number_disk = 0;          /* number of the current dist, used for
                                   spaning ZIP, unsupported, always 0*/
    SG_uint16 number_disk_with_CD = 0;  /* number the the disk with central dir, used
                                   for spaning ZIP, unsupported, always 0*/
    SG_uint16 number_entry_CD = 0;      /* total number of entries in
                                   the central dir
                                   (same than number_entry on nospan) */


    SG_ERR_CHECK(  SG_file__open__pathname(pCtx, pPath, SG_FILE_RDONLY | SG_FILE_OPEN_EXISTING, SG_FSOBJ_PERMS__UNUSED, &us.pFile)  );

    SG_ERR_CHECK(  sg_unzip__locate_central_dir(pCtx, us.pFile, &central_pos)  );

    SG_ERR_CHECK(  SG_file__seek(pCtx, us.pFile, central_pos)  );

    /* the signature, already checked */
    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, us.pFile,&uL)  );

    /* number of this disk */
    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, us.pFile,&number_disk)  );

    /* number of the disk with the start of the central directory */
    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, us.pFile,&number_disk_with_CD)  );

    /* total number of entries in the central dir on this disk */
    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, us.pFile,&us.gi.number_entry)  );

    /* total number of entries in the central dir */
    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, us.pFile,&number_entry_CD)  );

    if ((number_entry_CD!=us.gi.number_entry) ||
        (number_disk_with_CD!=0) ||
        (number_disk!=0))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    /* size of the central directory */
    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, us.pFile,&us.size_central_dir)  );

    /* offset of start of central directory with respect to the
          starting disk number */
    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, us.pFile,&us.offset_central_dir)  );

    /* zipfile comment length */
    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, us.pFile,&us.gi.size_comment)  );

    if (central_pos<us.offset_central_dir+us.size_central_dir)
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    us.byte_before_the_zipfile = (SG_uint32) (central_pos - (us.offset_central_dir+us.size_central_dir));
    us.central_pos = central_pos;
    us.pfile_in_zip_read = NULL;

    us.current_file_ok = SG_FALSE;

	SG_ERR_CHECK(  SG_malloc(pCtx, sizeof(SG_unzip), &s)  );
    *s=us;

    *ppResult = s;

    return;

fail:
    /* TODO free stuff */
    SG_FILE_NULLCLOSE(pCtx, us.pFile);
}
コード例 #18
0
static void _sg_workingdir__get_entry2(SG_context * pCtx,
									   SG_repo * pRepo,
									   const SG_pathname * pPathSub,
									   const char * pszGid,
									   SG_treenode_entry_type type,
									   const char * pszidHidContent,
									   const char * pszidHidXattrs,
									   SG_int64 iAttributeBits,
									   SG_vhash * pvhTimestamps)
{
	SG_file* pFile = NULL;
	SG_string* pstrLink = NULL;
	SG_byte* pBytes = NULL;
	SG_vhash * pvhGid = NULL;

    if (SG_TREENODEENTRY_TYPE_DIRECTORY == type)
    {
        /* create the directory and then recurse into it */
        SG_ERR_CHECK(  SG_fsobj__mkdir__pathname(pCtx, pPathSub)  );
        SG_ERR_CHECK(  _sg_workingdir__get_dir(pCtx, pRepo, pPathSub, pszidHidContent, pvhTimestamps)  );
    }
    else if (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type)
    {
        SG_ERR_CHECK(  SG_file__open__pathname(pCtx, pPathSub, SG_FILE_RDWR | SG_FILE_CREATE_NEW, SG_FSOBJ_PERMS__MASK, &pFile)  );
        SG_ERR_CHECK(  SG_repo__fetch_blob_into_file(pCtx, pRepo, pszidHidContent, pFile, NULL)  );
        SG_ERR_CHECK(  SG_file__close(pCtx, &pFile)  );
    }
    else if (SG_TREENODEENTRY_TYPE_SYMLINK == type)
    {
        SG_uint64 iLenBytes = 0;

        SG_ERR_CHECK(  SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszidHidContent, &pBytes, &iLenBytes)  );
        SG_ERR_CHECK(  SG_STRING__ALLOC__BUF_LEN(pCtx, &pstrLink, pBytes, (SG_uint32) iLenBytes)  );
        SG_ERR_CHECK(  SG_fsobj__symlink(pCtx, pstrLink, pPathSub)  );
        SG_NULLFREE(pCtx, pBytes);
        SG_STRING_NULLFREE(pCtx, pstrLink);
    }
    else
    {
        SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED);
    }

    if (pszidHidXattrs)
    {
#ifdef SG_BUILD_FLAG_FEATURE_XATTR
        SG_ERR_CHECK(  _sg_workingdir__set_xattrs(pCtx, pRepo, pPathSub, pszidHidXattrs)  );
#else
		// TODO do we need to stuff something into the pendingtree to remind us
		// TODO that the entry originally had an XAttr and we just didn't restore
		// TODO it when we populated the WD on this Windows system?
#endif
    }

    SG_ERR_CHECK(  SG_attributes__bits__apply(pCtx, pPathSub, iAttributeBits)  );

	if (pvhTimestamps && (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type))
	{
		SG_fsobj_stat stat;
		SG_int64 iTimeNow;

		SG_ERR_CHECK(  SG_fsobj__stat__pathname(pCtx, pPathSub, &stat)  );
		SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &iTimeNow)  );

		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhGid)  );
		SG_ERR_CHECK(  SG_vhash__add__int64(pCtx, pvhGid, "mtime_ms", stat.mtime_ms)  );
		SG_ERR_CHECK(  SG_vhash__add__int64(pCtx, pvhGid, "clock_ms", iTimeNow)  );

		SG_ERR_CHECK(  SG_vhash__add__vhash(pCtx, pvhTimestamps, pszGid, &pvhGid)  );	// this steals our vhash
	}

fail:
	SG_VHASH_NULLFREE(pCtx, pvhGid);
}
コード例 #19
0
/*
  Read the local header of the current zipfile
  Check the coherency of the local header and info in the end of central
        directory about this file
  store in *piSizeVar the size of extra info in local header
        (filename and size of extra field data)
*/
static void sg_unzip__check_coherency(
        SG_context* pCtx,
        SG_unzip* s,
        SG_uint32* piSizeVar,
        SG_uint32* poffset_local_extrafield,
        SG_uint32* psize_local_extrafield
        )
{
    /* TODO what is uFlags?  It seems to be unused here. */
    SG_uint32 uMagic,uData,uFlags = 0;
    SG_uint16 u16 = 0;
    SG_uint16 size_filename = 0;
    SG_uint16 size_extra_field = 0;

    *piSizeVar = 0;
    *poffset_local_extrafield = 0;
    *psize_local_extrafield = 0;

    SG_ERR_CHECK(  SG_file__seek(pCtx, s->pFile, s->cur_file_info_internal.offset_curfile + s->byte_before_the_zipfile)  );

    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, s->pFile,&uMagic)  );

    if (uMagic!=0x04034b50)
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, s->pFile,&u16)  );

    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, s->pFile,&u16)  );

    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, s->pFile,&u16)  );
    if (u16 != s->cur_file_info.compression_method)
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }
    if ((s->cur_file_info.compression_method!=0) && (s->cur_file_info.compression_method != Z_DEFLATED))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, s->pFile,&uData)  ); /* date/time */

    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, s->pFile,&uData)  ); /* crc */
    if ((uData!=s->cur_file_info.crc) && ((uFlags & 8)==0))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, s->pFile,&uData)  ); /* size compr */
    if ((uData!=s->cur_file_info.compressed_size) && ((uFlags & 8)==0))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    SG_ERR_CHECK(  sg_unzip__get_uint32(pCtx, s->pFile,&uData)  ); /* size uncompr */
    if ((uData!=s->cur_file_info.uncompressed_size) && ((uFlags & 8)==0))
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }


    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, s->pFile,&size_filename)  );
    if (size_filename!=s->cur_file_info.size_filename)
    {
        SG_ERR_THROW(  SG_ERR_ZIP_BAD_FILE  );
    }

    *piSizeVar += (SG_uint32)size_filename;

    SG_ERR_CHECK(  sg_unzip__get_uint16(pCtx, s->pFile,&size_extra_field)  );

    *poffset_local_extrafield= s->cur_file_info_internal.offset_curfile +
                                    SIZEZIPLOCALHEADER + size_filename;
    *psize_local_extrafield = (SG_uint32)size_extra_field;

    *piSizeVar += (SG_uint32)size_extra_field;

fail:
    return;
}
コード例 #20
0
void SG_workingdir__create_and_get(
	SG_context* pCtx,
	const char* pszDescriptorName,
	const SG_pathname* pPathDirPutTopLevelDirInHere,
	SG_bool bCreateDrawer,
    const char* psz_spec_hid_cs_baseline
	)
{
	SG_repo* pRepo = NULL;
	SG_rbtree* pIdsetLeaves = NULL;
	SG_uint32 count_leaves = 0;
	SG_changeset* pcs = NULL;
	const char* pszidUserSuperRoot = NULL;
	SG_bool b = SG_FALSE;
    char* psz_hid_cs_baseline = NULL;
	SG_pendingtree * pPendingTree = NULL;
	SG_vhash * pvhTimestamps = NULL;

	/*
	 * Fetch the descriptor by its given name and use it to connect to
	 * the repo.
	 */
	SG_ERR_CHECK(  SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo)  );


	if (psz_spec_hid_cs_baseline)
	{
		SG_ERR_CHECK(  SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline)  );
	}
	else
    {
        const char* psz_hid = NULL;
        /*
         * If you do not specify a hid to be the baseline, then this routine
         * currently only works if there is exactly one leaf in the repo.
         */
        SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves)  );
        SG_ERR_CHECK(  SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves)  );

		if (count_leaves != 1)
			SG_ERR_THROW(  SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE  );

        SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL)  );

        SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline)  );
    }

	/*
	 * Load the desired changeset from the repo so we can look up the
	 * id of its user root directory
	 */
	SG_ERR_CHECK(  SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs)  );
	SG_ERR_CHECK(  SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot)  );

	if (bCreateDrawer)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhTimestamps)  );

		// Retrieve everything into the WD and capture the timestamps on the files that we create.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps)  );

		// this creates "repo.json" with the repo-descriptor.
		SG_ERR_CHECK(  SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL)  );

		// this creates an empty "wd.json" file (which doesn't know anything).
		SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree)  );

		// force set the initial parents to the current changeset.
		SG_ERR_CHECK(  SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline)  );

		// force initialize the timestamp cache to the list that we just built; this should
		// be the only timestamps in the cache since we just populated the WD.
		SG_ERR_CHECK(  SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps)  );	// this steals our vhash

		SG_ERR_CHECK(  SG_pendingtree__save(pCtx, pPendingTree)  );
	}
	else
	{
		// Retrieve everything into the WD but do not create .sgdrawer or record timestamps.
		// This is more like an EXPORT operation.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL)  );
	}


fail:
	SG_VHASH_NULLFREE(pCtx, pvhTimestamps);
    SG_NULLFREE(pCtx, psz_hid_cs_baseline);
	SG_CHANGESET_NULLFREE(pCtx, pcs);
	SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves);
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
}
コード例 #21
0
void SG_unzip__currentfile__read(SG_context* pCtx, SG_unzip* s, SG_byte* pBuf, SG_uint32 iLenBuf, SG_uint32* piBytesRead)
{
    int zerr=Z_OK;
    SG_uint32 iRead = 0;
    file_in_zip_read_info_s* pfile_in_zip_read_info;

    SG_NULLARGCHECK_RETURN( s );

    pfile_in_zip_read_info = s->pfile_in_zip_read;

	SG_NULLARGCHECK_RETURN( pfile_in_zip_read_info );

    if (!pfile_in_zip_read_info->read_buffer)
    {
        SG_ERR_THROW_RETURN( SG_ERR_UNSPECIFIED );
    }

    if (!iLenBuf)
    {
        return;
    }

    pfile_in_zip_read_info->stream.next_out = pBuf;

    pfile_in_zip_read_info->stream.avail_out = iLenBuf;

    if (iLenBuf > pfile_in_zip_read_info->rest_read_uncompressed)
    {
        pfile_in_zip_read_info->stream.avail_out = (SG_uint32)pfile_in_zip_read_info->rest_read_uncompressed;
    }

    while (pfile_in_zip_read_info->stream.avail_out>0)
    {
        if ((pfile_in_zip_read_info->stream.avail_in==0) &&
            (pfile_in_zip_read_info->rest_read_compressed>0))
        {
            SG_uint32 uReadThis = UNZ_BUFSIZE;

            if (pfile_in_zip_read_info->rest_read_compressed<uReadThis)
            {
                uReadThis = (SG_uint32)pfile_in_zip_read_info->rest_read_compressed;
            }
            if (uReadThis == 0)
            {
				//TODO - maybe we should change this
                SG_ERR_THROW(  SG_ERR_EOF  );
            }

            SG_ERR_CHECK(  SG_file__seek(pCtx, s->pFile, pfile_in_zip_read_info->pos_in_zipfile + pfile_in_zip_read_info->byte_before_the_zipfile)  );
            SG_ERR_CHECK(  SG_file__read(pCtx, s->pFile, uReadThis, (SG_byte*) pfile_in_zip_read_info->read_buffer, NULL)  );

            pfile_in_zip_read_info->pos_in_zipfile += uReadThis;

            pfile_in_zip_read_info->rest_read_compressed-=uReadThis;

            pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->read_buffer;
            pfile_in_zip_read_info->stream.avail_in = (SG_uint32)uReadThis;
        }

        if (pfile_in_zip_read_info->compression_method==0)
        {
            SG_uint32 uDoCopy,i ;

            if ((pfile_in_zip_read_info->stream.avail_in == 0) &&
                (pfile_in_zip_read_info->rest_read_compressed == 0))
            {
                if (iRead == 0)
                {
                    SG_ERR_THROW(  SG_ERR_EOF  );
                }
                goto done;
            }

            if (pfile_in_zip_read_info->stream.avail_out <
                            pfile_in_zip_read_info->stream.avail_in)
            {
                uDoCopy = pfile_in_zip_read_info->stream.avail_out ;
            }
            else
            {
                uDoCopy = pfile_in_zip_read_info->stream.avail_in ;
            }

            for (i=0;i<uDoCopy;i++)
            {
                *(pfile_in_zip_read_info->stream.next_out+i) =
                        *(pfile_in_zip_read_info->stream.next_in+i);
            }

            pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,
                                pfile_in_zip_read_info->stream.next_out,
                                uDoCopy);

            pfile_in_zip_read_info->rest_read_uncompressed-=uDoCopy;
            pfile_in_zip_read_info->stream.avail_in -= uDoCopy;
            pfile_in_zip_read_info->stream.avail_out -= uDoCopy;
            pfile_in_zip_read_info->stream.next_out += uDoCopy;
            pfile_in_zip_read_info->stream.next_in += uDoCopy;
            pfile_in_zip_read_info->stream.total_out += uDoCopy;
            iRead += uDoCopy;
        }
        else
        {
            SG_uint32 uTotalOutBefore,uTotalOutAfter;
            const Bytef *bufBefore;
            SG_uint32 uOutThis;
            int flush=Z_SYNC_FLUSH;

            uTotalOutBefore = pfile_in_zip_read_info->stream.total_out;
            bufBefore = pfile_in_zip_read_info->stream.next_out;

            /*
            if ((pfile_in_zip_read_info->rest_read_uncompressed ==
                     pfile_in_zip_read_info->stream.avail_out) &&
                (pfile_in_zip_read_info->rest_read_compressed == 0))
                flush = Z_FINISH;
            */
            zerr = inflate(&pfile_in_zip_read_info->stream,flush);

            if ((zerr>=0) && (pfile_in_zip_read_info->stream.msg))
            {
                SG_ERR_THROW(  SG_ERR_ZLIB(zerr)  );
            }

            uTotalOutAfter = pfile_in_zip_read_info->stream.total_out;
            uOutThis = uTotalOutAfter-uTotalOutBefore;

            pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (SG_uint32)(uOutThis));

            pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis;

            iRead += (SG_uint32)(uTotalOutAfter - uTotalOutBefore);

            if (zerr == Z_STREAM_END)
            {
            //    return (iRead==0) ? UNZ_EOF : iRead;
            //    break;
            }
        }
    }

done:
    *piBytesRead = iRead;

fail:
    return;
}
static void read_entire_stream__file(
    SG_context* pCtx,
    CFReadStreamRef myReadStream,
    SG_pathname* pPath,
    CFHTTPMessageRef* pp,
    SG_bool b_progress
)
{
    CFHTTPMessageRef myResponse = NULL;
    CFIndex numBytesRead = 0;
    SG_file* pFile = NULL;
    SG_int64 content_length = 0;
    SG_int64 so_far = 0;

    SG_ERR_CHECK(  SG_file__open__pathname(pCtx, pPath, SG_FILE_CREATE_NEW | SG_FILE_WRONLY, 0644, &pFile)  );
    do
    {
        UInt8 buf[8192]; // TODO
        numBytesRead = CFReadStreamRead(myReadStream, buf, sizeof(buf));
        if( numBytesRead > 0 )
        {
            SG_ERR_CHECK(  SG_file__write(pCtx, pFile, numBytesRead, buf, NULL)  );
            if (!myResponse)
            {
                myResponse = (CFHTTPMessageRef)CFReadStreamCopyProperty(myReadStream, kCFStreamPropertyHTTPResponseHeader);
                CFStringRef contentLengthString = CFHTTPMessageCopyHeaderFieldValue(myResponse, CFSTR("Content-Length"));
                if (contentLengthString)
                {
                    // TODO 32 bit limit problem here
                    content_length = CFStringGetIntValue(contentLengthString);
                    CFRelease(contentLengthString);
                }
                if (b_progress)
                {
                    SG_ERR_CHECK(  SG_log__set_steps(pCtx, content_length / 1024, "KB")  );
                }
            }
            so_far += (SG_uint32) numBytesRead;
            if (b_progress)
            {
                SG_ERR_CHECK(  SG_log__set_finished(pCtx, so_far / 1024)  );
            }
        }
        else if( numBytesRead < 0 )
        {
            CFStreamError myErr = CFReadStreamGetError(myReadStream);
            // TODO clean this up
            if (myErr.domain == kCFStreamErrorDomainPOSIX)
            {
                if (ETIMEDOUT == myErr.error)
                {
                    usleep(5000);
                    numBytesRead = 0;
                }
                else
                {
                    // Interpret myErr.error as a UNIX errno.
                    SG_ERR_THROW(  SG_ERR_ERRNO(myErr.error)  );
                }
            }
            else if (myErr.domain == kCFStreamErrorDomainMacOSStatus)
            {
                // Interpret myErr.error as a MacOS error code.
                // TODO SG_ERR_THROW(  SG_ERR_MAC((OSStatus) myErr.error)  );
                SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
            }
        }
    } while( numBytesRead > 0 );

    SG_ERR_CHECK(  SG_file__close(pCtx, &pFile)  );

    if (!myResponse)
    {
        myResponse = (CFHTTPMessageRef)CFReadStreamCopyProperty(myReadStream, kCFStreamPropertyHTTPResponseHeader);
    }

    *pp = myResponse;
    myResponse = NULL;

fail:
    if (pFile)
    {
        SG_ERR_CHECK(  SG_file__close(pCtx, &pFile)  );
        // TODO delete it too
    }
    if (myResponse)
    {
        CFRelease(myResponse);
    }
}
コード例 #23
0
ファイル: sg__do_cmd_resolve.c プロジェクト: avar/veracity
/**
 * Handle the RESOLVE command.
 *
 *
 */
void do_cmd_resolve(SG_context * pCtx,
					SG_option_state * pOptSt,
					SG_uint32 count_args, const char** paszArgs)
{
	struct _resolve_data data;
	SG_uint32 sum = 0;
	SG_bool bAll = SG_FALSE;
	SG_bool bWantResolved = SG_FALSE;
	SG_bool bWantUnresolved = SG_FALSE;
	SG_bool bReqArg = SG_FALSE;

	memset(&data, 0, sizeof(data));
	data.pPathCwd = NULL;
	data.pPendingTree = NULL;
	data.psaGids = NULL;
	data.bIgnoreWarnings = SG_TRUE;			// TODO what should this be?

	// allow at most ONE of the command options.
	//
	// the --{List,Mark,Unmark}All options do not allow ARGs.
	// 
	// the --{Mark,Unmark} require at least one ARG.
	// the --List allows 0 or more ARGs.
	//
	// if no command option, allow 0 or more ARGs.
	//
	// most commands do not require there to be issues; rather
	// they just don't do anything.
	//
	// WARNING: We set sg_cl_options[].has_arg to 0 for all of
	//          our commands options so that we get all of the
	//          pathnames in ARGs rather than bound to the option.
	//          That is, I want to be able to say:
	//               vv resolve --mark foo bar
	//          rather than:
	//               vv resolve --mark foo --mark bar
	//
	//          It also allows me to have:
	//               vv resolve --list
	//          and
	//               vv resolve --list foo

	if (pOptSt->bListAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bMarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
//	if (pOptSt->bUnmarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bList)
	{
		if (count_args == 0)	{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
		else					{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	}
	if (pOptSt->bMark)			{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_TRUE;  }
//	if (pOptSt->bUnmark)		{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_FALSE; bReqArg = SG_TRUE;  }
	if (sum == 0)				{        bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }

	if (sum > 1)
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bReqArg && (count_args == 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bAll && (count_args > 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC(pCtx, &data.pPathCwd)  );
	SG_ERR_CHECK(  SG_pathname__set__from_cwd(pCtx, data.pPathCwd)  );

	// Do a complete scan first.  This ensures that the pendingtree knows
	// about everything that is dirty in the WD and helps ensure that every
	// issue in the issues list has a ptnode in the pendingtree.
	// 
	// TODO 2010/07/16 Technically, this should NOT be required.  But it
	// TODO            helps.  The problem is that when a file is edited
	// TODO            we don't automatically get the notification, rather
	// TODO            we do a status aka scan (and/or use the timestamp
	// TODO            cache) when various commands start which detect
	// TODO            file content changes.  So the fact that the MERGE
	// TODO            may have written a bunch of merged/edited files
	// TODO            doesn't necessarily mean that they are listed in
	// TODO            the pendingtree -- because the user may have edited
	// TODO            them again (or edited other files) since the merge
	// TODO            completed.  So we scan.
	// TODO
	// TODO            See also the comment in sg.c:do_cmd_commit() for sprawl-809.
	// TODO
	// TODO            What this scan is helping to hide is a problem where
	// TODO            we're hitting the issues list for GIDs and then
	// TODO            using SG_pendingtree__find_repo_path_by_gid() to
	// TODO            dynamically convert it into a "live/current" repo-path.
	// TODO            and it assumes that it is only called for dirty entries
	// TODO            (or rather, for entries that have a ptnode).  We need
	// TODO            to fix that.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__scan(pCtx, data.pPendingTree, SG_TRUE, NULL, 0, NULL, 0)  );
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);

	// Now load the pendingtree for real.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );

	if (count_args > 0)
		SG_ERR_CHECK(  _resolve__map_args_to_gids(pCtx, &data, count_args, paszArgs, bWantResolved, bWantUnresolved)  );
	else
		SG_ERR_CHECK(  _resolve__get_all_issue_gids(pCtx, &data, bWantResolved, bWantUnresolved)  );

	//////////////////////////////////////////////////////////////////

	if (pOptSt->bListAll || pOptSt->bList)
	{
		SG_ERR_CHECK(  _resolve__do_list(pCtx, &data)  );
	}
	else if (pOptSt->bMarkAll || pOptSt->bMark)
	{
		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_TRUE)  );
	}
//	else if (pOptSt->bUnmarkAll || pOptSt->bUnmark)
//	{
//		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_FALSE)  );
//	}
	else // no command option given -- assume we want to FIX the issues
	{
		SG_ERR_CHECK(  _resolve__do_fix(pCtx, &data)  );
	}

fail:
	SG_PATHNAME_NULLFREE(pCtx, data.pPathCwd);
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);
	SG_STRINGARRAY_NULLFREE(pCtx, data.psaGids);
}
static void read_entire_stream__string(
    SG_context* pCtx,
    CFReadStreamRef myReadStream,
    SG_string** ppstr,
    CFHTTPMessageRef* pp
)
{
    SG_string* pstr = NULL;
    CFHTTPMessageRef myResponse = NULL;
    CFIndex numBytesRead = 0;

    SG_ERR_CHECK(  SG_string__alloc__sz(pCtx, &pstr, "")  );
    do
    {
        UInt8 buf[8192]; // TODO
        numBytesRead = CFReadStreamRead(myReadStream, buf, sizeof(buf));
        if( numBytesRead > 0 )
        {
            SG_ERR_CHECK(  SG_string__append__buf_len(pCtx, pstr, buf, numBytesRead)  );

            if (!myResponse)
            {
                myResponse = (CFHTTPMessageRef)CFReadStreamCopyProperty(myReadStream, kCFStreamPropertyHTTPResponseHeader);
            }
        }
        else if( numBytesRead < 0 )
        {
            CFStreamError myErr = CFReadStreamGetError(myReadStream);
            // TODO clean this up
            if (myErr.domain == kCFStreamErrorDomainPOSIX)
            {
                if (ETIMEDOUT == myErr.error)
                {
                    usleep(5000);
                    numBytesRead = 0;
                }
                else
                {
                    // Interpret myErr.error as a UNIX errno.
                    SG_ERR_THROW(  SG_ERR_ERRNO(myErr.error)  );
                }
            }
            else if (myErr.domain == kCFStreamErrorDomainMacOSStatus)
            {
                // Interpret myErr.error as a MacOS error code.
                // TODO SG_ERR_THROW(  SG_ERR_MAC((OSStatus) myErr.error)  );
                SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
            }
        }
    } while( numBytesRead > 0 );

    if (!myResponse)
    {
        myResponse = (CFHTTPMessageRef)CFReadStreamCopyProperty(myReadStream, kCFStreamPropertyHTTPResponseHeader);
    }

    *pp = myResponse;
    myResponse = NULL;

    *ppstr = pstr;
    pstr = NULL;

fail:
    if (myResponse)
    {
        CFRelease(myResponse);
    }
    SG_STRING_NULLFREE(pCtx, pstr);
}
コード例 #25
0
void SG_server__pull_request_fragball(SG_context* pCtx,
									  SG_repo* pRepo,
									  SG_vhash* pvhRequest,
									  const SG_pathname* pFragballDirPathname,
									  char** ppszFragballName,
									  SG_vhash** ppvhStatus)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint32* paDagNums = NULL;
    SG_rbtree* prbDagnodes = NULL;
	SG_string* pstrFragballName = NULL;
	char* pszRevFullHid = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_uint32* repoDagnums = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);
	SG_NULLARGCHECK_RETURN(ppvhStatus);

#if TRACE_SERVER
	SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request")  );
#endif

	SG_ERR_CHECK(  SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname)  );

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
	}
	else
	{
		// Build the requested fragball.
		SG_bool found;

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
			// Full clone requested.
			SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Dagnodes were requested.

				SG_uint32 generations = 0;
				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 count_repo_dagnums = 0;
				SG_uint32 i;
				const char* pszDagNum = NULL;
				const SG_variant* pvRequestedNodes = NULL;
				SG_vhash* pvhRequestedNodes = NULL;
				const char* pszHidRequestedDagnode = NULL;

				// Were additional generations requested?
				SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found)  );
				if (found)
					SG_ERR_CHECK(  SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations)  );

				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums)  );

				// For each requested dag, get the requested nodes.
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_uint32 iMissingNodeCount;
					SG_uint32 iDagnum;
					SG_uint32 j;
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes)  );
					SG_ERR_CHECK(  SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum)  );

					// Verify that requested dagnum exists
					for (j = 0; j < count_repo_dagnums; j++)
					{
						if (repoDagnums[j] == iDagnum)
						{
							isValidDagnum = SG_TRUE;
							break;
						}
					}
					if (!isValidDagnum)
					{
						char buf[SG_DAGNUM__BUF_MAX__NAME];
						SG_ERR_CHECK(  SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf))  );
						SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf));
					}

					if (pvRequestedNodes)
					{
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes)  );

						// Get each node listed for the dag
						SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount)  );
						if (iMissingNodeCount > 0)
						{
							SG_uint32 j;
							const SG_variant* pvVal;

							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL)  );
							for (j=0; j<iMissingNodeCount; j++)
							{
								SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal)  );

								if (pvVal)
								{
									const char* pszVal;
									SG_ERR_CHECK(  SG_variant__get__sz(pCtx, pvVal, &pszVal)  );
									if (pszVal)
									{
										if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX))
										{
											SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid)  );
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG))
										{
											SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid)  );
											if (!pszRevFullHid)
												SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else
											SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST);
									}
								}
								
								SG_ERR_CHECK(  SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode)  );
								// Get additional dagnode generations, if requested.
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations)  );
								SG_NULLFREE(pCtx, pszRevFullHid);
							}
						}
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );

						// Get additional dagnode generations, if requested.
						if (generations)
						{
							SG_bool found;
							const char* hid;
							
							SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL)  );
							while (found)
							{
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations)  );
								SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL)  );
							}
						}
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes)  );
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
	}
	
	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_NULLFREE(pCtx, pszRevFullHid);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_NULLFREE(pCtx, repoDagnums);
}
static void perform_upload_request__string(
    SG_context* pCtx,
    CFHTTPMessageRef myRequest,
    SG_pathname* pPath,
    CFHTTPMessageRef* pmyResponse,
    SG_string** ppstr
)
{
    CFReadStreamRef myReadStream = NULL;
    CFHTTPMessageRef myResponse = NULL;
    SG_string* pstr = NULL;
    CFReadStreamRef upload = NULL;
    CFURLRef upload_file_url = NULL;

    // set the content-length header
    {
        SG_uint64 len = 0;
        SG_ERR_CHECK(  SG_fsobj__length__pathname(pCtx, pPath, &len, NULL)  );

        CFStringRef headerFieldName = CFSTR("Content-Length");
        CFStringRef headerFieldValue = CFStringCreateWithFormat (kCFAllocatorDefault, NULL,  CFSTR("%d"), len);
        CFHTTPMessageSetHeaderFieldValue(myRequest, headerFieldName, headerFieldValue);
        CFRelease(headerFieldValue);
    }

    upload_file_url = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, (UInt8*) SG_pathname__sz(pPath), SG_STRLEN(SG_pathname__sz(pPath)), SG_FALSE);
    upload = CFReadStreamCreateWithFile(kCFAllocatorDefault, upload_file_url);
    CFRelease(upload_file_url);
    if (!CFReadStreamOpen(upload))
    {
        CFStreamError myErr = CFReadStreamGetError(upload);

        if (myErr.domain == kCFStreamErrorDomainPOSIX)
        {
            // Interpret myErr.error as a UNIX errno.
            SG_ERR_THROW(  SG_ERR_ERRNO(myErr.error)  );
        }
        else if (myErr.domain == kCFStreamErrorDomainMacOSStatus)
        {
            // Interpret myErr.error as a MacOS error code.
            // TODO SG_ERR_THROW(  SG_ERR_MAC((OSStatus) myErr.error)  );
            SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
        }
    }

    SG_ERR_CHECK(  send_upload_request(pCtx, myRequest, upload, &myReadStream)  );
    SG_ERR_CHECK(  read_entire_stream__string(pCtx, myReadStream, &pstr, &myResponse)  );

    *ppstr = pstr;
    pstr = NULL;

    *pmyResponse = myResponse;
    myResponse = NULL;

fail:
    if (upload)
    {
        CFRelease(upload);
    }
    if (myReadStream)
    {
        CFReadStreamClose(myReadStream);
        CFRelease(myReadStream);
        myReadStream = NULL;
    }

    if (myResponse)
    {
        CFRelease(myResponse);
        myResponse = NULL;
    }

    SG_STRING_NULLFREE(pCtx, pstr);
}
コード例 #27
0
// Helper function for _tree__init__continuation()
static void _tree__add_subtoken(SG_context * pCtx, _tree_t * pTree, _node_t * pParent, SG_varray * pSubtoken)
{
	SG_int64 revno = 0;
	char * szHid = NULL;
	_node_t * pNodeRef = NULL;
	SG_uint32 count = 0;
	SG_uint32 i = 0;
	SG_uint16 lastElementType = 0;
	SG_varray * pSubsubtoken = NULL;
	SG_stringarray * psa = NULL;

	++pTree->indentLevel;

	SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, 0, &revno)  );
	SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
	SG_ERR_CHECK(  _tree__add_new_node(pCtx, pTree, pParent, szHid, &pNodeRef)  );
	pNodeRef->isPending = SG_FALSE;
	SG_NULLFREE(pCtx, szHid);

	SG_ERR_CHECK(  SG_varray__count(pCtx, pSubtoken, &count)  );
	SG_ERR_CHECK(  SG_varray__typeof(pCtx, pSubtoken, count-1, &lastElementType)  );
	if(lastElementType==SG_VARIANT_TYPE_VARRAY)
	{
		SG_ERR_CHECK(  SG_varray__get__varray(pCtx, pSubtoken, count-1, &pSubsubtoken)  );
		--count;
	}

	for(i=1; i<count; ++i)
	{
		_node_t * pRefChildNode = NULL;
		SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, i, &revno)  );
		if(i==1 && revno==0)
		{
			// Revno 0 in the as the first child means "Use the LCA for the baseline".
			SG_uint32 j = 0;

			if(pSubsubtoken!=NULL)
			{
				// LCA would not include nodes in the sub-token. Just error out.
				SG_ERR_THROW(SG_ERR_INVALIDARG);
			}

			if(count-2<2)
			{
				// This would be interpreted as merging a node with itself or with nothing.
				SG_ERR_THROW(SG_ERR_INVALIDARG);
			}

			SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa, count-2)  );
			for(j=2; j<count; ++j)
			{
				SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, j, &revno)  );
				SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
				SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa, szHid)  );
				SG_NULLFREE(pCtx, szHid);
			}
			SG_ERR_CHECK(  SG_dagquery__highest_revno_common_ancestor(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, psa, &szHid)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psa);
		}
		else
		{
			SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
		}
		SG_ERR_CHECK(  _tree__add_new_node(pCtx, pTree, pNodeRef, szHid, &pRefChildNode)  );
		pTree->pNextResult = pRefChildNode;
		SG_ERR_CHECK(  _node_list__append(pCtx, &pTree->pending, &pRefChildNode)  );
		SG_NULLFREE(pCtx, szHid);
	}

	if(pSubsubtoken!=NULL)
	{
		SG_ERR_CHECK(  _tree__add_subtoken(pCtx, pTree, pNodeRef, pSubsubtoken)  );
	}

	return;
fail:
	SG_NULLFREE(pCtx, szHid);
	SG_STRINGARRAY_NULLFREE(pCtx, psa);
}
コード例 #28
0
/**
 * Request to UNLOCK on one or more files.
 *
 * WARNING: This routine deviates from the model of most
 * WARNING: of the SG_wc__ level-8 and/or SG_wc_tx level-7
 * WARNING: API routines because we cannot just "queue" an
 * WARNING: unlock like we do a RENAME with everything
 * WARNING: contained within the pWcTx; we actually have
 * WARNING: to update the locks dag (which is outside of
 * WARNING: the scope of the WC TX).
 * WARNING:
 * WARNING: So we only have a level-8 API
 * WARNING: so that we can completely control/bound the TX.
 *
 * We also deviate in that we don't take a --test
 * nor --verbose option.  Which means we don't have a
 * JOURNAL to mess with.
 *
 */
void SG_wc__unlock(SG_context * pCtx,
                   const SG_pathname* pPathWc,
                   const SG_stringarray * psaInputs,
                   SG_bool bForce,
                   const char * psz_username,
                   const char * psz_password,
                   const char * psz_repo_upstream)
{
    SG_wc_tx * pWcTx = NULL;
    SG_audit q;
    SG_uint32 nrInputs = 0;
    SG_uint32 k;
    char * psz_tied_branch_name = NULL;
    char * psz_repo_upstream_allocated = NULL;
    SG_vhash * pvh_gids = NULL;
    const char * pszRepoDescriptorName = NULL;	// we do not own this

    if (psaInputs)
        SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaInputs, &nrInputs)  );
    if (nrInputs == 0)
        SG_ERR_THROW2(  SG_ERR_INVALIDARG,
                        (pCtx, "Nothing to unlock")  );

    // psz_username is optional (assume no auth required)
    // psz_password is optional (assume no auth required)
    // psz_server is optional (assume default server)

    // Begin a WC TX so that we get all of the good stuff
    // (like mapping the CWD into a REPO handle and mapping
    // the inputs into GIDs).
    //
    // At this point I don't believe that setting a lock
    // will actually make any changes in WC.DB, so I'm
    // making it a READ-ONLY TX.
    //
    // My assumption is that the lock actually gets
    // written to the Locks DAG and shared with the server.
    // But I still want a TX handle for all of the other stuff.

    SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_FALSE)  );

    // We need the repo descriptor name later for the push/pull
    // and to optionally look up the default destination for
    // this repo.  The pRepo stores this *IFF* it was properly
    // opened (using a name).

    SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pWcTx->pDb->pRepo,
                   &pszRepoDescriptorName)  );
    SG_ASSERT_RELEASE_FAIL2(  (pszRepoDescriptorName && *pszRepoDescriptorName),
                              (pCtx, "SG_wc__unlock: Could not get repo descriptor name.")  );

    // now we need to know what branch we are tied to.
    // if we're not tied, fail
    SG_ERR_CHECK(  SG_wc_tx__branch__get(pCtx, pWcTx, &psz_tied_branch_name)  );
    if (!psz_tied_branch_name)
        SG_ERR_THROW(  SG_ERR_NOT_TIED  );

    SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_gids)  );
    for (k=0; k<nrInputs; k++)
    {
        const char * pszInput_k;

        SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k)  );
        SG_ERR_CHECK(  _map_input(pCtx, pWcTx, pvh_gids, pszInput_k)  );
    }

    if (!psz_repo_upstream)
    {
        SG_localsettings__descriptor__get__sz(pCtx, pszRepoDescriptorName,
                                              "paths/default",
                                              &psz_repo_upstream_allocated);
        if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND))
            SG_ERR_REPLACE_ANY_RETHROW(  SG_ERR_NO_SERVER_SPECIFIED  );
        else
            SG_ERR_CHECK_CURRENT;

        psz_repo_upstream = psz_repo_upstream_allocated;
    }

    SG_ERR_CHECK(  SG_audit__init(pCtx, &q, pWcTx->pDb->pRepo,
                                  SG_AUDIT__WHEN__NOW,
                                  SG_AUDIT__WHO__FROM_SETTINGS)  );

    // OK, we have all the pieces.  Time to call the unlock code
    SG_ERR_CHECK(  SG_vc_locks__unlock(
                       pCtx,
                       pszRepoDescriptorName,
                       psz_repo_upstream,
                       psz_username,
                       psz_password,
                       psz_tied_branch_name,
                       pvh_gids,
                       bForce,
                       &q
                   )  );

    // Fall through and let the normal fail code discard/cancel
    // the read-only WC TX.  This will not affect the Locks DAG
    // nor the server.

fail:
    SG_ERR_IGNORE(  SG_wc_tx__cancel(pCtx, pWcTx)  );
    SG_WC_TX__NULLFREE(pCtx, pWcTx);
    SG_NULLFREE(pCtx, psz_tied_branch_name);
    SG_NULLFREE(pCtx, psz_repo_upstream_allocated);
    SG_VHASH_NULLFREE(pCtx, pvh_gids);
}