void FindMCOwnerFeatures(const GoBoard& bd, SgPoint move, FeBasicFeatureSet& features) { // TODO run 63 simulations SG_UNUSED(bd); SG_UNUSED(move); FeBasicFeature f = FE_NONE; int n = NuWins() / 8; switch(n) { case 0: f = FE_MC_OWNER_1; break; case 1: f = FE_MC_OWNER_2; break; case 2: f = FE_MC_OWNER_3; break; case 3: f = FE_MC_OWNER_4; break; case 4: f = FE_MC_OWNER_5; break; case 5: f = FE_MC_OWNER_6; break; case 6: f = FE_MC_OWNER_7; break; case 7: f = FE_MC_OWNER_8; break; default: SG_ASSERT(false); break; } if (f != FE_NONE) features.set(f); }
/** * Return true if the entryname collides (or potentially * collides) with the .sgdrawer and/or .sgcloset (or has * a prefix that does). * * The user can set $SGCLOSET but we don't look at it. * S9718. * */ void sg_wc_db__path__is_reserved_entryname(SG_context * pCtx, sg_wc_db * pDb, const char * pszEntryname, SG_bool * pbIsReserved) { const char * pszDrawerName = ".sgdrawer"; const char * pszDefaultClosetName = ".sgcloset"; SG_UNUSED( pCtx ); SG_UNUSED( pDb ); // It's OK to use stricmp/strnicmp rather than a // portability collider because the ".sgdrawer" and // ".sgcloset" prefix we are looking for are 7-bit clean. // // However I suppose it coulde give the wrong answer // if they pass us something that includes some NFD // ignorable characters or something. // // I'm mainly avoiding using a portability collider // because I only want a prefix check. if (SG_strnicmp(pszEntryname, pszDrawerName, SG_STRLEN(pszDrawerName)) == 0) { *pbIsReserved = SG_TRUE; } else if (SG_strnicmp(pszEntryname, pszDefaultClosetName, SG_STRLEN(pszDefaultClosetName)) == 0) { *pbIsReserved = SG_TRUE; } else { *pbIsReserved = SG_FALSE; } }
static void _dagwalk_callback(SG_context* pCtx, SG_UNUSED_PARAM(SG_repo* pRepo), void* pData, SG_dagnode* pCurrentNode, SG_UNUSED_PARAM(SG_rbtree* pDagnodeCache), SG_bool* pbContinue) { _dagwalk_data* pDagWalkData = (_dagwalk_data*)pData; const char* pszCurrentNodeHid = NULL; SG_int32 genCurrentNode; SG_UNUSED(pRepo); SG_UNUSED(pDagnodeCache); SG_ERR_CHECK_RETURN( SG_dagnode__get_generation(pCtx, pCurrentNode, &genCurrentNode) ); if (genCurrentNode < pDagWalkData->genLimit) { *pbContinue = SG_FALSE; return; } SG_ERR_CHECK_RETURN( SG_dagnode__get_id_ref(pCtx, (const SG_dagnode*)pCurrentNode, &pszCurrentNodeHid) ); if (!strcmp(pDagWalkData->pszStartNodeHid, (const char*)pszCurrentNodeHid)) return; SG_ERR_CHECK_RETURN( SG_rbtree__update(pCtx, pDagWalkData->prbVisitedNodes, (const char*)pszCurrentNodeHid) ); // TODO: Stop walking when this node and all it siblings are already in prbVisitedNodes? }
/** * Insert/Replace a TNE ROW from the tne_L0 table in the wc.db. * * The existing row (if it exists) is a copy of the TNE * as it existed in the current baseline. This item * will be present in the future baseline, but it has * one or more changed fields. So we want the TNE ROW * to be updated as we transition the tne_L0 table. * */ void sg_wc_tx__apply__insert_tne(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { #if TRACE_WC_TX_APPLY const char * pszRepoPath; SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvh, "src", &pszRepoPath) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__insert_tne: '%s'\n"), pszRepoPath) ); #else SG_UNUSED( pCtx ); SG_UNUSED( pvh ); #endif SG_UNUSED( pWcTx ); // we don't actually have anything here. // the journal record was more for the verbose log. // the actual work of updating the SQL will be done // in the parallel journal-stmt. }
void SG_curl__set__read_string(SG_context* pCtx, SG_curl* pCurl, SG_string* pString) { SG_UNUSED(pCtx); SG_UNUSED(pCurl); SG_UNUSED(pString); SG_ERR_THROW_RETURN(SG_ERR_NOTIMPLEMENTED); }
bool SgSearchControl::StartNextIteration(int depth, double elapsedTime, int numNodes) { SG_UNUSED(depth); SG_UNUSED(elapsedTime); SG_UNUSED(numNodes); return true; }
void SG_password__get( SG_context *pCtx, const char *szRepoSpec, const char *szUsername, SG_string **ppstrPassword) { SG_UNUSED(szRepoSpec); SG_UNUSED(szUsername); SG_UNUSED(ppstrPassword); SG_ERR_THROW_RETURN(SG_ERR_NOTIMPLEMENTED); }
static JSBool Help(JSContext *cx, uintN argc, jsval *vp) { uintN i; SG_UNUSED(cx); SG_UNUSED(argc); fprintf(gOutFile, "%s\n", JS_GetImplementationVersion()); fputs(shell_help_header, gOutFile); for (i = 0; shell_functions[i].name; i++) fprintf(gOutFile, "%s\n", shell_help_messages[i]); JS_SET_RVAL(cx, vp, JSVAL_VOID); return JS_TRUE; }
static void _sg_cmd_util__dump_log__revspec__cb( SG_context* pCtx, SG_repo* pRepo, SG_rev_spec_type specType, const char* pszGiven, /* The value as added to the SG_rev_spec. */ const char* pszFullHid, /* The full HID of the looked-up spec. */ void* ctx ) { struct _sg_cmd_util__dump_log__revspec__baton * b = (struct _sg_cmd_util__dump_log__revspec__baton *)ctx; SG_UNUSED(specType); SG_UNUSED(pszGiven); SG_ERR_CHECK_RETURN( SG_cmd_util__dump_log(pCtx, b->cs, pRepo, pszFullHid, b->pvhCleanPileOfBranches, b->bShowOnlyOpenBranchNames, b->bShowFullComments) ); }
void SG_group__list_members( SG_context* pCtx, SG_repo* pRepo, const char* psz_group_name, SG_varray** ppva ) { SG_UNUSED(pCtx); SG_UNUSED(pRepo); SG_UNUSED(psz_group_name); SG_UNUSED(ppva); SG_ERR_THROW_RETURN( SG_ERR_NOTIMPLEMENTED ); }
static void _deserialize_data_ver_1_cb(SG_context * pCtx, void * pVoidDeserializeData, SG_UNUSED_PARAM(const SG_varray * pva), SG_UNUSED_PARAM(SG_uint32 ndx), const SG_variant * pVariant) { struct _deserialize_data * pDeserializeData = (struct _deserialize_data *)pVoidDeserializeData; SG_vhash * pvhMyData; SG_vhash * pvhDagnode; SG_int64 gen64, state64; _my_data * pMyData; SG_dagnode * pDagnode = NULL; const char* psz_id = NULL; SG_UNUSED(pva); SG_UNUSED(ndx); SG_ERR_CHECK( SG_variant__get__vhash(pCtx,pVariant,&pvhMyData) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console(pCtx, pvhMyData) ); #endif SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhMyData,KEY_DFS_STATE,&state64) ); if (SG_DFS_END_FRINGE == state64) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhMyData,KEY_DAGNODE_ID,&psz_id) ); SG_ERR_CHECK( _cache__add__fringe(pCtx,pDeserializeData->pFrag, psz_id) ); } else { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx,pvhMyData,KEY_ACTUAL_DAGNODE,&pvhDagnode) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhDagnode,KEY_GEN,&gen64) ); SG_ERR_CHECK( SG_dagnode__alloc__from_vhash(pCtx, &pDagnode, pvhDagnode) ); SG_ERR_CHECK( _cache__add__dagnode(pCtx, pDeserializeData->pFrag, (SG_int32)gen64, pDagnode, (SG_uint32)state64, &pMyData) ); pDagnode = NULL; // cache owns it now. } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
void SG_group__remove_subgroups( SG_context* pCtx, SG_repo* pRepo, const char* psz_group_name, const char** paszMemberNames, SG_uint32 count_names ) { SG_UNUSED(pCtx); SG_UNUSED(pRepo); SG_UNUSED(psz_group_name); SG_UNUSED(paszMemberNames); SG_UNUSED(count_names); SG_ERR_THROW_RETURN( SG_ERR_NOTIMPLEMENTED ); }
void GoRegionBoard::RemoveBlock(GoBlock* b, bool isExecute, bool removeFromRegions) { SgBlackWhite color = b->Color(); for (SgSetIterator it(b->Stones()); it; ++it) m_block[*it] = 0; bool found = m_allBlocks[color].Exclude(b); SG_UNUSED(found); SG_ASSERT(found); const int size = Board().Size(); // remove from regions. SgPointSet area(b->Stones().Border(size)); SgVectorOf<GoRegion> regions; if (removeFromRegions) { RegionsAt(area, color, ®ions); for (SgVectorIteratorOf<GoRegion> it(regions); it; ++it) { (*it)->RemoveBlock(b); if (isExecute) m_stack.PushPtr(*it); } } if (isExecute) { m_stack.PushInt(regions.Length()); // 0 if ! removeFromRegions PushBlock(REGION_REMOVE_BLOCK, b); } else delete b; }
/** * Is this object a candidate for short-circuit evaluation? That is, can we * avoid diving into this pair of folders and recursively comparing everything * within. * * We must have a pair of unscanned folders with equal content HIDs. If one has * already been scanned, we must scan the other so that the children don't look * like peerless objects. (We either scan neither or both.) * * If we have pendingtree data, we don't know how much of the tree it populated. * If the dsFlags in the pendingtree version indicate a change *on* the folder * (such as a rename/move), we can still allow the short-circuit; only if it * indicates a change in the stuff *within* the folder do we force it to continue. * This is another instance of the accidental peerless problem. */ void sg_vv2__status__can_short_circuit_from_work_queue(SG_context * pCtx, const sg_vv2status_od * pOD, SG_bool * pbCanShortCircuit) { SG_bool bEqual; const char * pszHid_orig; const char * pszHid_dest; SG_UNUSED( pCtx ); if (!pOD->apInst[SG_VV2__OD_NDX_ORIG]) goto no; if (!pOD->apInst[SG_VV2__OD_NDX_DEST]) goto no; if (pOD->apInst[SG_VV2__OD_NDX_ORIG]->typeInst != SG_VV2__OD_TYPE_UNSCANNED_FOLDER) goto no; if (pOD->apInst[SG_VV2__OD_NDX_DEST]->typeInst != SG_VV2__OD_TYPE_UNSCANNED_FOLDER) goto no; SG_ERR_CHECK( SG_treenode_entry__get_hid_blob(pCtx, pOD->apInst[SG_VV2__OD_NDX_ORIG]->pTNE, &pszHid_orig) ); SG_ERR_CHECK( SG_treenode_entry__get_hid_blob(pCtx, pOD->apInst[SG_VV2__OD_NDX_DEST]->pTNE, &pszHid_dest) ); bEqual = (strcmp(pszHid_orig, pszHid_dest) == 0); if (!bEqual) goto no; *pbCanShortCircuit = SG_TRUE; return; no: *pbCanShortCircuit = SG_FALSE; fail: return; }
void _dispatch__todo(SG_context * pCtx, _request_headers * pRequestHeaders, SG_repo **ppRepo, const char ** ppUriSubstrings, SG_uint32 uriSubstringsCount, _response_handle ** ppResponseHandle) { SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(pRequestHeaders); SG_NULLARGCHECK_RETURN(ppUriSubstrings); SG_NULLARGCHECK_RETURN(ppResponseHandle); SG_ASSERT(*ppResponseHandle==NULL); SG_UNUSED(ppRepo); if(uriSubstringsCount==0) { if(eq(pRequestHeaders->pRequestMethod,"GET")) { SG_ERR_CHECK_RETURN( _create_response_handle_for_template(pCtx, pRequestHeaders, SG_HTTP_STATUS_OK, "todo.xhtml", _todo_replacer, ppResponseHandle) ); } else SG_ERR_THROW_RETURN(SG_ERR_URI_HTTP_405_METHOD_NOT_ALLOWED); } else SG_ERR_THROW_RETURN(SG_ERR_URI_HTTP_404_NOT_FOUND); }
static curlioerr _my_ioctl__file(CURL *handle, int cmd, void *clientp) { _sg_curl* pMe = (_sg_curl*)clientp; SG_UNUSED(handle); if (cmd == CURLIOCMD_NOP) { SG_context__push_level(pMe->pCtx); SG_log__report_verbose(pMe->pCtx, "SG_curl handling CURLIOCMD_NOP."); SG_context__pop_level(pMe->pCtx); return CURLIOE_OK; } else if (cmd == CURLIOCMD_RESTARTREAD) { SG_context__push_level(pMe->pCtx); SG_log__report_verbose(pMe->pCtx, "SG_curl handling CURLIOCMD_RESTARTREAD."); SG_context__pop_level(pMe->pCtx); SG_file__seek(pMe->pCtx, pMe->readState.pFile, 0); if(SG_context__has_err(pMe->pCtx)) return CURLIOE_FAILRESTART; pMe->readState.pos = 0; pMe->readState.finished = SG_FALSE; return CURLIOE_OK; } else { return CURLIOE_UNKNOWNCMD; } }
static int _compare_path(SG_context * pCtx, const void * pVoid_ppv1, // const SG_variant ** ppv1 const void * pVoid_ppv2, // const SG_variant ** ppv2 void * pVoidData) { const SG_variant** ppv1 = (const SG_variant **)pVoid_ppv1; const SG_variant** ppv2 = (const SG_variant **)pVoid_ppv2; SG_vhash * pvh1; SG_vhash * pvh2; const char * psz1; const char * psz2; int result = 0; SG_UNUSED( pVoidData ); if (*ppv1 == NULL && *ppv2 == NULL) return 0; if (*ppv1 == NULL) return -1; if (*ppv2 == NULL) return 1; SG_variant__get__vhash(pCtx, *ppv1, &pvh1); SG_variant__get__vhash(pCtx, *ppv2, &pvh2); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh1, "path", &psz1) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh2, "path", &psz2) ); SG_ERR_CHECK( SG_repopath__compare(pCtx, psz1, psz2, &result) ); fail: return result; }
/** * Given an issue that has a file-content conflict and has just been resolved, * go thru all of the steps in the merge plan and delete the ~mine files. */ static void _resolve__delete_temp_files(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue) { _resolve__step_pathnames * pStepPathnames = NULL; const SG_varray * pvaPlan; SG_uint32 kStep, nrSteps; SG_UNUSED( pszGid ); SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaPlan, &nrSteps) ); for (kStep=0; kStep<nrSteps; kStep++) { const SG_vhash * pvhStep; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, kStep, (SG_vhash **)&pvhStep) ); SG_ERR_CHECK( _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep, NULL, &pStepPathnames) ); SG_ERR_CHECK( _resolve__step_pathnames__delete_temp_files(pCtx, pStepPathnames) ); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); } fail: _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
void GoRegionBoard::MergeAdjacentAndAddBlock(SgPoint move, SgBlackWhite capturedColor) { SgVector<SgPoint> nb; for (GoNbIterator it(Board(), move); it; ++it) if (Board().IsEmpty(*it)) nb.PushBack(*it); SgVectorOf<GoBlock> captures; PreviousBlocksAt(nb, capturedColor, &captures); SG_ASSERT(captures.NonEmpty()); SgPointSet captured; {for (SgVectorIteratorOf<GoBlock> it(captures); it; ++it) captured |= (*it)->Stones(); } SgVectorOf<GoRegion> adj; const int size = Board().Size(); RegionsAt(captured.Border(size), capturedColor, &adj); SG_ASSERT(adj.NonEmpty()); GoRegion* r = MergeAll(adj, captured, capturedColor); SG_UNUSED(r); for (SgVectorIteratorOf<GoBlock> it(captures); it; ++it) RemoveBlock(*it, true, false); // don't remove from regions; already gone. }
/** * QUEUE a "SPECIAL-ADD". This is used by MERGE, for example, * to add things that were added in the other branch to the * final result. * * TODO 2011/01/31 This routine really only needs the LVD of the * TODO parent directory and can dynamically compute * TODO the repo-path from it. I'm just passing it * TODO in here because the first caller just happened * TODO to have it on hand. * */ void sg_wc_tx__queue__add_special(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_string * pStringLiveRepoPath_Parent, sg_wc_liveview_dir * pLVD_Parent, const char * pszEntryname, SG_uint64 uiAliasGid, SG_treenode_entry_type tneType, const char * pszHidBlob, SG_int64 attrbits, SG_wc_status_flags statusFlagsAddSpecialReason) { sg_wc_liveview_item * pLVI_Entry; // we do not own this SG_UNUSED( pStringLiveRepoPath_Parent ); SG_ERR_CHECK( sg_wc_liveview_dir__add_special(pCtx, pWcTx, pLVD_Parent, pszEntryname, uiAliasGid, tneType, pszHidBlob, attrbits, statusFlagsAddSpecialReason, &pLVI_Entry) ); SG_ERR_CHECK( sg_wc_tx__journal__add_special(pCtx, pWcTx, pLVI_Entry, pszHidBlob, attrbits) ); fail: return; }
void u0026_jcb(SG_UNUSED_PARAM(SG_context* pCtx), void* ctx, int type, const SG_jsonparser_value* value) { struct u0026_ctx* p = (struct u0026_ctx*) ctx; SG_UNUSED(pCtx); switch(type) { case SG_JSONPARSER_TYPE_ARRAY_BEGIN: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_ARRAY_END: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_OBJECT_BEGIN: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_OBJECT_END: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_INTEGER: if (0 == strcmp(p->key, "x")) { VERIFY_COND("x", 5 == value->vu.integer_value); } p->key[0] = 0; break; case SG_JSONPARSER_TYPE_FLOAT: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_NULL: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_TRUE: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_FALSE: p->key[0] = 0; break; case SG_JSONPARSER_TYPE_KEY: strcpy(p->key, value->vu.str.value); break; case SG_JSONPARSER_TYPE_STRING: if (0 == strcmp(p->key, "hello")) { VERIFY_COND("hello:world", 0 == strcmp("world", value->vu.str.value)); } else if (0 == strcmp(p->key, "messy")) { VERIFY_COND("messy", 0 == strcmp(U0026_MESSY, value->vu.str.value)); } p->key[0] = 0; break; default: SG_ASSERT(0); break; } }
/** * Find the appropriate external tool to let the user perform a TEXT MERGE * on a file. * * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge. * TODO Later we want to allow them to have multiple * TODO tools configured and/or to use the file suffix * TODO and so on. */ static void _resolve__external_tool__lookup(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, _resolve__external_tool ** ppET) { _resolve__external_tool * pET = NULL; SG_repo * pRepo; SG_UNUSED( pszGid ); SG_UNUSED( pvhIssue ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo) ); SG_ERR_CHECK( SG_alloc1(pCtx, pET) ); // TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use. // TODO (This could be based upon suffixes and/or whatever.) // TODO Then -- for THAT tool -- lookup the program executable path and // TODO the argument list. // TODO Substitute the given pathnames into the argument list. // TODO // TODO For now, we just hard-code DiffMerge. SG_ERR_CHECK( SG_strdup(pCtx, "DiffMerge", &pET->pszName) ); SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL); if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe)) { SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_NO_MERGE_TOOL_CONFIGURED, (pCtx, "'%s' Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.", SG_string__sz(pStrRepoPath)) ); } // TODO 2010/07/13 Get argvec. *ppET = pET; return; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); }
void sg_wc_tx__apply__resolve_issue__sr(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { #if TRACE_WC_RESOLVE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__resolve_issue__sr:\n")) ); #else SG_UNUSED( pCtx ); #endif SG_UNUSED( pWcTx ); SG_UNUSED( pvh ); // we don't actually have anything here. // the journal record was more for the verbose log. // the actual work of updating the SQL will be done // in the parallel journal-stmt. }
void SG_time__parseRFC850(SG_context* pCtx,const char* pszInputString, SG_int64 * pTime, SG_int64 * pReturnedLocalTime) { int scanOK = 0; char trash[4]; int day = 0; int month = 0; int year = 0; int hour = 0; int minute = 0; int second = 0; char monthstr[4]; char tz[4]; struct tm t; SG_UNUSED(pCtx); SG_UNUSED(pszInputString); #if defined(WINDOWS) scanOK = sscanf_s(pszInputString, "%3s, %d %3s %d %d:%d:%d %3s", trash, sizeof(trash), &day, monthstr, sizeof(monthstr), &year, &hour, &minute, &second, tz, sizeof(tz)); #else scanOK = sscanf(pszInputString, "%3s, %d %3s %d %d:%d:%d %3s", trash, &day, monthstr, &year, &hour, &minute, &second, tz); #endif if ((scanOK != 8) || (strcmp("GMT", tz) != 0)) { SG_ERR_THROW_RETURN(SG_ERR_ARGUMENT_OUT_OF_RANGE); } SG_ERR_CHECK_RETURN( _lookupMonth(pCtx, monthstr, &month) ); t.tm_min = minute; t.tm_hour = hour; t.tm_sec = second; t.tm_year = year - 1900; t.tm_mon = month; t.tm_mday = day; *pTime = (SG_int64)utc_mktime(&t) * 1000; *pReturnedLocalTime = (SG_int64)mktime(&t) * 1000; }
void SG_localsettings__foreach( SG_context* pCtx, const char* szPattern, SG_bool bIncludeDefaults, SG_localsettings_foreach_callback* pCallback, void* pCallerData ) { SG_vhash* pValues = NULL; SG_vhash* pDefaults = NULL; provide_matching_values__data ProvideMatchingValuesData = {NULL, NULL, NULL, NULL}; SG_UNUSED(pCallback); SG_UNUSED(pCallerData); // get the settings SG_ERR_CHECK( SG_localsettings__list__vhash(pCtx, &pValues) ); // if defaults were requested, get those too if (bIncludeDefaults) { SG_ERR_CHECK( SG_localsettings__factory__list__vhash(pCtx, &pDefaults) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pValues, SG_LOCALSETTING__SCOPE__DEFAULT + 1, &pDefaults) ); // +1 to remove the slash at the beginning } // sort the settings SG_ERR_CHECK( SG_vhash__sort(pCtx, pValues, SG_TRUE, SG_vhash_sort_callback__increasing) ); // setup our callback data SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &(ProvideMatchingValuesData.pPrefix)) ); ProvideMatchingValuesData.szPattern = szPattern; ProvideMatchingValuesData.pCallback = pCallback; ProvideMatchingValuesData.pCallerData = pCallerData; // iterate through the vhash SG_ERR_CHECK( SG_vhash__foreach(pCtx, pValues, provide_matching_values, &ProvideMatchingValuesData) ); fail: SG_VHASH_NULLFREE(pCtx, pValues); SG_VHASH_NULLFREE(pCtx, pDefaults); SG_STRING_NULLFREE(pCtx, ProvideMatchingValuesData.pPrefix); }
void SG_thread__threads_equal( SG_context* pCtx, SG_thread_id cThread1, SG_thread_id cThread2, SG_bool* pEqual ) { SG_UNUSED(pCtx); *pEqual = (cThread1 == cThread2) ? SG_TRUE : SG_FALSE; }
void SgIncrementalStack::AddPoints(SgPointSet* set) { int nu = PopInt(); SgEmptyBlackWhite col = PopInt(); SG_UNUSED(col); for (int i = 1; i <= nu; ++i) { SgPoint p = PopPoint(); set->Include(p); } }
static void _dump_marks__dir__current__cb(SG_context * pCtx, SG_uint64 uiAliasGid, void * pVoid_LVI, void * pVoid_Data) { struct dump_data * pDumpData = (struct dump_data *)pVoid_Data; sg_wc_liveview_item * pLVI = (sg_wc_liveview_item *)pVoid_LVI; SG_UNUSED( uiAliasGid ); SG_ERR_CHECK_RETURN( _dump_marks__lvi(pCtx, pDumpData->pWcTx, pLVI, pDumpData->indent, " ", SG_TRUE) ); }
/** * A predicate that matches any value whose index is evenly divisible by a given number. */ void MyFn(remove__if__predicate)( SG_context* pCtx, //< [in] [out] Error and context info. SG_uint32 uIndex, //< [in] The index of the current item. void* pValue, //< [in] The value of the current item. void* pUserData, //< [in] The number (SG_uint32*) divide the current index by. SG_bool* pMatch //< [out] Whether or not the item matches. ) { SG_uint32* pData = (SG_uint32*)pUserData; SG_UNUSED(pCtx); SG_UNUSED(pValue); if ( (uIndex % *pData) == 0u ) { *pMatch = SG_TRUE; } else { *pMatch = SG_FALSE; } }
static void _modulesInstalled(SG_context *pCtx, JSContext *cx, JSObject *glob, SG_bool *installed) { jsval jv = JSVAL_VOID; SG_UNUSED(pCtx); *installed = SG_FALSE; if (JS_GetProperty(cx, glob, "vvModulesInstalled", &jv) && JSVAL_IS_BOOLEAN(jv)) { *installed = JSVAL_TO_BOOLEAN(jv); } }