void IfShape::clearNodes() { #define CLEAR_NODE(name) if (name != NULL) { name->unref(); name = NULL; } #define CLEAR_LIST(name) if (name != NULL) { delete name; name = NULL; } CLEAR_NODE(camera); CLEAR_NODE(environment); CLEAR_NODE(lightModel); CLEAR_NODE(texture); CLEAR_NODE(drawStyle); CLEAR_NODE(shapeHints); CLEAR_NODE(material); CLEAR_NODE(complexity); CLEAR_NODE(coords); CLEAR_NODE(font); CLEAR_NODE(materialBinding); CLEAR_NODE(normals); CLEAR_NODE(normalBinding); CLEAR_NODE(profileCoords); CLEAR_NODE(texCoords); CLEAR_NODE(texCoordBinding); CLEAR_NODE(shape); CLEAR_LIST(lights); CLEAR_LIST(clipPlanes); CLEAR_LIST(other); CLEAR_LIST(profiles); #undef CLEAR_NODE #undef CLEAR_LIST }
void MainWindow::on_jobs_finished(){ ui->progressBar->hide(); delete iso; QString text= "<table style='border-collapse: collapse;'>" "<tr style='background: #efefef;'>" "<td style='padding:0 0.1em 0 0.1em;'>File name</td>" "<td style='padding:0 0.1em 0 0.1em;'>Job type</td>" "<td style='padding:0 0.1em 0 0.1em;'>Result</td>" "</tr>"; int counts[5]={0,}; const char *colors[]={ "#efe", // JOB_STATE_OK "#fee", // JOB_STATE_FAILED "#ffe", // JOB_STATE_WARNINGS "#eee", // JOB_STATE_SKIPPED "#f2f8f2" // JOB_STATE_AUTO }; int succ=0,failed=0; for(int i=0;i<jobs.count();i++){ GenericJob *j=jobs.at(i); counts[j->state]++; if(j->state!=JOB_STATE_OK && j->state!=JOB_STATE_AUTO) failed++; else succ++; QString comment=j->issue; if(comment.isEmpty()){ if(!j->output.isEmpty()) comment=j->output.join("<br />"); else switch(j->state){ case JOB_STATE_OK: comment="ok!"; break; case JOB_STATE_FAILED: comment="failed!"; break; case JOB_STATE_WARNINGS: comment="completed with warnings"; break; case JOB_STATE_SKIPPED: comment="skipped"; break; case JOB_STATE_AUTO: comment="auto"; break; default: comment="MISSINGNO"; break; // f**k yeah stack broken by counts array! } } text+=QString( "<tr style='background: %1;'>" "<td style='padding:0 0.1em 0 0.1em;'>%2</td>" "<td style='padding:0 0.1em 0 0.1em;'>%3</td>" "<td style='padding:0 0.1em 0 0.1em;'>%4</td>" "</tr>" ).arg(colors[j->state]) .arg(j->desc) .arg(j->type) .arg(comment); } text+="</table>"; QString title; if(failed==0){ title="All ok!"; text.prepend("<h1>All jobs completed succesfully.</h1><hr />"); } else{ title=QString("%1 jobs completed successfully; ").arg(succ); QStringList list; if(counts[JOB_STATE_FAILED]!=0) list.append(QString("%1 errors").arg(counts[JOB_STATE_FAILED])); if(counts[JOB_STATE_WARNINGS]!=0) list.append(QString("%1 warnings").arg(counts[JOB_STATE_WARNINGS])); if(counts[JOB_STATE_SKIPPED]!=0) list.append(QString("%1 skipped").arg(counts[JOB_STATE_SKIPPED])); title.append(list.join(", ")); text.prepend(QString("<h1>%1</h1><hr />").arg(title)); title=QString("Completed %1 out of %2 jobs.").arg(succ).arg(succ+failed); } report(title,text,&resources,failed==0); CLEAR_LIST(resources); CLEAR_LIST(jobs); ui->doItButton->setEnabled(true); }
//venu changed from int to long for 64 bit JNIEXPORT void JNICALL Java_org_trafodion_jdbc_t2_SQLMXCallableStatement_executeCall (JNIEnv *jenv, jobject jobj, jstring server, jlong dialogueId, jint txid, jboolean autoCommit, jint txnMode, jlong stmtId, jint paramCount, jobject paramValues, jint queryTimeout, jstring iso88591Encoding) { FUNCTION_ENTRY("Java_org_trafodion_jdbc_t2_SQLMXCallableStatement_executeCall",("...")); SQLValueList_def outputSqlValueList; ERROR_DESC_LIST_def sqlWarning; jint currentTxid = txid; jint externalTxid = 0; short returnResultSet; long sqlcode; short txn_status; SQLValueList_def inputSqlValueList; CLEAR_LIST(inputSqlValueList); ExceptionStruct exception_; CLEAR_EXCEPTION(exception_); SRVR_STMT_HDL *pSrvrStmt; if ((pSrvrStmt = getSrvrStmt(dialogueId, stmtId, &sqlcode)) == NULL) { throwSQLException(jenv, INVALID_HANDLE_ERROR, NULL, "HY000", sqlcode); FUNCTION_RETURN_VOID(("getSrvrStmt() Failed")); } CLI_DEBUG_SHOW_SERVER_STATEMENT(pSrvrStmt); inputSqlValueList._buffer = NULL; inputSqlValueList._length = 0; if (fillInSQLValues(jenv, jobj, pSrvrStmt, 0, 1, paramCount, paramValues, iso88591Encoding)) FUNCTION_RETURN_VOID(("fillInSQLValues() Failed")); if ((txn_status = beginTxnControl(jenv, currentTxid, externalTxid, txnMode, -1)) != 0) { jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwTransactionException(jenv, txn_status); FUNCTION_RETURN_VOID(("beginTxnControl() failed")); } odbc_SQLSvc_ExecuteCall_sme_(NULL, NULL, &exception_, dialogueId, stmtId, &inputSqlValueList, FALSE, queryTimeout, &outputSqlValueList, &returnResultSet, &sqlWarning); if ((txn_status = endTxnControl(jenv, currentTxid, txid, autoCommit, exception_.exception_nr, pSrvrStmt->isSPJRS, txnMode, externalTxid)) != 0) { jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwTransactionException(jenv, txn_status); DEBUG_OUT(DEBUG_LEVEL_ENTRY,("endTxnControl() Failed")); } switch (exception_.exception_nr) { case CEE_SUCCESS: setExecuteCallOutputs(jenv, jobj, pSrvrStmt, returnResultSet, currentTxid); DEBUG_OUT(DEBUG_LEVEL_STMT,("RSMax: %d RSIndex: %d isSPJResultSet: %d", pSrvrStmt->RSMax, pSrvrStmt->RSIndex, pSrvrStmt->isSPJRS)); if (sqlWarning._length > 0) setSQLWarning(jenv, jobj, &sqlWarning); break; case odbc_SQLSvc_ExecuteCall_SQLQueryCancelled_exn_: jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwSQLException(jenv, QUERY_CANCELLED_ERROR, NULL, "HY008", exception_.u.SQLQueryCancelled.sqlcode); break; case odbc_SQLSvc_ExecuteCall_SQLError_exn_: case odbc_SQLSvc_ExecuteCall_SQLRetryCompile_exn_: jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwSQLException(jenv, &exception_.u.SQLError); break; case odbc_SQLSvc_ExecuteCall_ParamError_exn_: jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwSQLException(jenv, PROGRAMMING_ERROR, exception_.u.ParamError.ParamDesc, "HY000"); break; case odbc_SQLSvc_ExecuteCall_SQLInvalidHandle_exn_: jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwSQLException(jenv, INVALID_HANDLE_ERROR, NULL, "HY000", exception_.u.SQLInvalidHandle.sqlcode); break; case odbc_SQLSvc_ExecuteCall_SQLStillExecuting_exn_: case odbc_SQLSvc_ExecuteCall_InvalidConnection_exn_: case odbc_SQLSvc_ExecuteCall_TransactionError_exn_: case odbc_SQLSvc_ExecuteCall_SQLNeedData_exn_: default: // TFDS - These exceptions should not happen jenv->CallVoidMethod(jobj, gJNICache.setCurrentTxidStmtMethodId, currentTxid); throwSQLException(jenv, PROGRAMMING_ERROR, NULL, "HY000", exception_.exception_nr); break; } FUNCTION_RETURN_VOID((NULL)); }
/* * Synchronous method function prototype for * operation 'odbc_SQLSvc_GetSQLCatalogs' */ void odbc_SQLSvc_GetSQLCatalogs_sme_( /* In */ void * objtag_ , /* In */ const CEE_handle_def *call_id_ , /* Out */ ExceptionStruct *exception_ , /* In */ long dialogueId , /* In */ short APIType , /* In */ const char *catalogNm , /* In */ const char *schemaNm , /* In */ const char *tableNm , /* In */ const char *tableTypeList , /* In */ const char *columnNm , /* In */ long columnType , /* In */ long rowIdScope , /* In */ long nullable , /* In */ long uniqueness , /* In */ long accuracy , /* In */ short sqlType , /* In */ unsigned long metadataId , /* Out */ char *catStmtLabel , /* Out */ SQLItemDescList_def *outputDesc , /* Out */ ERROR_DESC_LIST_def *sqlWarning , /* Out */ long *rowsAffected , /* Out */ SQLValueList_def *outputValueList , /* Out */ long *stmtId , /* In */ const char *fkcatalogNm , /* In */ const char *fkschemaNm , /* In */ const char *fktableNm) { FUNCTION_ENTRY("odbc_SQLSvc_GetSQLCatalogs_sme_",("")); DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" %#x, %#x, %#x, %#x, %d, %s, %s, %s, %s, %s, %ld, %ld, %ld, %ld, %ld, %d, %#x, %#x, %#x", objtag_, call_id_, exception_, dialogueId, APIType, catalogNm, schemaNm, tableNm, tableTypeList, columnNm, columnType, rowIdScope, nullable, uniqueness, accuracy, sqlType, catStmtLabel, outputDesc, sqlWarning)); SRVRTRACE_ENTER(FILE_SME+14); enum CATAPI_TABLE_INDEX { COLUMNS = 0, DEFAULTS, INDEXES, KEYS, OBJECTS, OBJECTUID, TABLES, VIEWS, VIEWS_USAGE, VERSIONS }; char *smdCatAPITablesList[] = { "COLUMNS", "DEFAULTS", "INDEXES", "KEYS", "OBJECTS", "OBJECTUID", "TABLES", "VIEWS", "VIEWS_USAGE", "VERSIONS" }; const char *inputParam[16]; const char *tableParam[20]; short retCode; char tmpBuf[20]; char *odbcAppVersion = "3"; char *translationId = "3"; ExceptionStruct prepareException; CLEAR_EXCEPTION(prepareException); ExceptionStruct executeException; CLEAR_EXCEPTION(executeException); ExceptionStruct fetchException; CLEAR_EXCEPTION(fetchException); ExceptionStruct closeException; CLEAR_EXCEPTION(closeException); char expCatalogNm[MAX_ANSI_NAME_LEN+1]; char expSchemaNm[MAX_ANSI_NAME_LEN+1]; char expTableNm[MAX_ANSI_NAME_LEN+1]; char expColumnNm[MAX_ANSI_NAME_LEN+1]; char expProcNm[MAX_ANSI_NAME_LEN+1]; char catalogNmNoEsc[MAX_ANSI_NAME_LEN+1]; char schemaNmNoEsc[MAX_ANSI_NAME_LEN+1]; char tableNmNoEsc[MAX_ANSI_NAME_LEN+1]; char columnNmNoEsc[MAX_ANSI_NAME_LEN+1]; char procNmNoEsc[MAX_ANSI_NAME_LEN+1]; char tableName1[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName2[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName3[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName4[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName5[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName6[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName7[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char tableName8[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char SQLObjType[2]; char inParam1[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char inParam2[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; char inParam3[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; // catalog len + '.' + schema len + '.' + table len +'\0' char inParam4[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; // catalog len + '.' + schema len + '.' + table len +'\0' char schemaVersion[10]; // Holds SQL schema version from SQL_SCHEMAVERSION_ANSI_Q1 module call char fkstmtLabel[MAX_STMT_LABEL_LEN+1]; // Used for FK methods long rowsMPFetched; // # of tables to check for MP metadata long rowsFKFetched; // # of rows fetched from foreign keys method query 1 BOOL queryMP = FALSE; // Flag whether to pull MP metadata SQLValueList_def tempOutputValueList; // Intermediate and temp output value lists // Null out tempOutputValueList CLEAR_LIST(tempOutputValueList); long curRowNo = 0; long numOfCols = 0; long curColNo = 0; char schemaNmAct[MAX_ANSI_NAME_LEN+1]; char tableNmAct[MAX_ANSI_NAME_LEN+1]; char colNmAct[MAX_ANSI_NAME_LEN+1]; char ordinalAct[10]; char obuidAct[MAX_ANSI_NAME_LEN+1]; char riuidAct[MAX_ANSI_NAME_LEN+1]; short sqlStmtType; SQLItemDescList_def lc_outputDesc; BOOL tableViewGiven = FALSE; BOOL systemTableGiven = FALSE; short namelen; SQLValue_def *SQLValue; char userCatalogNm[MAX_ANSI_NAME_LEN+1]; char guardianNm[36]; // 8+1+8+1+8+1+8+1 char inParam[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3]; // catalog len + '.' + schema len + '.' + table len +'\0' char MapDataType[2] = "0"; tmpBuf[0] = '\0'; fkstmtLabel[0] = '\0'; userCatalogNm[0] = '\0'; char catStmtLabelNew[128] = {'\0'}; // Trying to support max module name length inputParam[0] = srvrGlobal->SystemCatalog; if (catalogNm == NULL) strcpy(catalogNmNoEsc,""); //strcpy(catalogNmNoEsc,srvrGlobal->DefaultCatalog); // There is an OR condition with the catalog so it can be "" else strcpy(catalogNmNoEsc, catalogNm); inputParam[1] = catalogNmNoEsc; if (schemaNm == NULL ) strcpy(schemaNmNoEsc,"%"); else strcpy(schemaNmNoEsc, schemaNm); if (schemaNm != NULL ) { convertWildcardNoEsc(metadataId, FALSE, schemaNm, schemaNmNoEsc); convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm); } inputParam[2] = schemaNmNoEsc; inputParam[3] = expSchemaNm; inputParam[4] = NULL; sqlStmtType = TYPE_SELECT; DEBUG_OUT(DEBUG_LEVEL_METADATA,("SQL_SCHEMAVERSION_NEW_ANSI_Q1 tableParams= |%s|%s| inputParams= |%s|", tableParam[0],tableParam[1], inputParam[0])); if (APIType != SQL_TXN_ISOLATION) { retCode = executeAndFetchSMDQuery(objtag_, call_id_, dialogueId, APIType, "SQL_CATALOG_API", sqlStmtType, &tableParam[0], &inputParam[0], catalogNm, schemaNm, tableNm, columnNm, tableTypeList, metadataId, outputDesc, &executeException, &fetchException, sqlWarning, rowsAffected, outputValueList, stmtId); if (retCode != CEE_SUCCESS && writeServerException(retCode,exception_,&prepareException,&executeException,&fetchException) != TRUE) { odbc_SQLSvc_Close_sme_(objtag_, call_id_, &closeException, dialogueId, *stmtId, SQL_DROP, rowsAffected, sqlWarning); FUNCTION_RETURN_VOID(("executeAndFetchSMDQuery() and writeServerException() Failed")); } if (retCode == FETCH_EXCEPTION && fetchException.exception_nr == odbc_SQLSvc_FetchN_SQLNoDataFound_exn_) { odbc_SQLSvc_Close_sme_(objtag_, call_id_, &closeException, dialogueId, *stmtId, SQL_DROP, rowsAffected, sqlWarning); FUNCTION_RETURN_VOID(("executeAndFetchSMDQuery() FETCH_EXCEPTION - SQLNoDataFound Expected")); } SQLValue = (SQLValue_def *)outputValueList->_buffer; if (SQLValue->dataInd == -1) //does not come here { inputParam[0] = srvrGlobal->SystemCatalog; inputParam[2] = "SYSTEM_SCHEMA"; memset(outputValueList, NULL, sizeof(SQLValueList_def)); retCode = executeAndFetchSMDQuery(objtag_, call_id_, dialogueId, APIType, "SQL_SCHEMAVERSION_NEW_ANSI_Q1", sqlStmtType, &tableParam[0], &inputParam[0], catalogNm, schemaNm, tableNm, columnNm, tableTypeList, metadataId, outputDesc, &executeException, &fetchException, sqlWarning, rowsAffected, outputValueList, stmtId); if (retCode != CEE_SUCCESS && writeServerException(retCode,exception_,&prepareException,&executeException,&fetchException) != TRUE) { odbc_SQLSvc_Close_sme_(objtag_, call_id_, &closeException, dialogueId, *stmtId, SQL_DROP, rowsAffected, sqlWarning); FUNCTION_RETURN_VOID(("executeAndFetchSMDQuery() and writeServerException() Failed")); } if (retCode == FETCH_EXCEPTION && fetchException.exception_nr == odbc_SQLSvc_FetchN_SQLNoDataFound_exn_) { odbc_SQLSvc_Close_sme_(objtag_, call_id_, &closeException, dialogueId, *stmtId, SQL_DROP, rowsAffected, sqlWarning); FUNCTION_RETURN_VOID(("executeAndFetchSMDQuery() and writeServerException() Failed")); } SQLValue = (SQLValue_def *)outputValueList->_buffer; if (SQLValue->dataInd == -1) { exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_SCHEMA_VERSION; FUNCTION_RETURN_VOID(("SQLValue->dataInd == -1")); } } } FUNCTION_RETURN_VOID((NULL)); }
void SPC_HIERARCHICAL(edge_t * V, nid_t n, int d, int k, nid_t minClusSize, int Nmc, int * Nc, nid_t ** Cn, nid_t *** C, nid_t ** P) { // Declare variables FS_TREE * FST; Graph * D, * MSG, * Tb; edge_t tmin, tmax, aKi, aWs, ival, b_thresh, tval, maxval, cval, Tsp; edge_t minTempStep, minTempStep_end, ta, tb, tc, Tspa; edge_t * Teval; nid_t tid, maxid, mcid, Sma, Smb, Smc, cid, cid2, tcs; nid_t * maxCor, * Ct, * MCS, * MCI, * newC; List * S; int i, j, l, init_temps, ei, scan_temps, scan_print, ilc, ncf, Njumps; int p, jj, maxPosClusts, llci, hier_print, plt, nsplits; int SI[1000]; int ** Ch; int * ChN, * newCh; //0. Find the fair-split tree //mexPrintf("Computing fair-split tree...\n"); mexEvalString("drawnow;"); FST = FAIR_SPLIT(V, n, d); //compute the fair split tree //mexPrintf(" Finished.\n"); mexEvalString("drawnow;"); // 1. Find mutual K Nearest Neighbors mexPrintf("Finding mutual K Nearest Neighbors...\n"); mexEvalString("drawnow;"); D = NEW_ALGRAPH(n,0); //graph of distances between mutual KNN FS_AMKNN(FST, D, n, k); //compute the approx MKNN using the fair-split tree mexPrintf(" Finished.\n"); mexEvalString("drawnow;"); // 2. Find Approximate Minimum Spanning Graph //mexPrintf("Approximating the minimum spanning graph...\n"); mexEvalString("drawnow;"); MSG = NEW_ALGRAPH(n,0); //allocate new graph for min span graph FS_AEMSG(FST, MSG); //compute approx. min spanning graph using fair-split tree //mexPrintf(" Finished.\n"); mexEvalString("drawnow;"); CLEAR_FS_TREE(FST); //don't need the tree anymore // 3. Superimpose KNN and MST ALG_ORIP(D, MSG); //OR the edges of each graph, store in D ALG_CLEAR(MSG); //free the MST from mem // 4. Calculate interaction values between each neighbor b_thresh = 0.5; //probability threshold for edge aKi = 1.0/(2.0*((float) D->ne)/((float) D->n)); //inverse of avg. number of neighbors aWs = ALG_AVG_EDGE_WEIGHT(D); //the average distance between neighbors aWs = 2.0*aWs*aWs; //twice the squared average distance between neighbors Tb = NEW_ALGRAPH(n, 0); //allocate new graph for break temps for (i=0; i<D->n; i++) { //for each node for (j=0; j<LIST_LENGTH(D->nodes[i]); j++) { //for each edge from that node tid = LIST_GET_ID(D->nodes[i], j); //id of edge's endpoint if (i < tid) { //undir graph, so only worry about lesser->larger id edges tval = LIST_GET_VAL(D->nodes[i], j); //distance between i and tid ival = aKi*exp(-tval*tval/aWs); //this interaction strength ival = -ival/log(1.0-b_thresh); //the temp above which this edge breaks ival = 1e3*ival*ival*ival; //cubing it and *1k seems to work well just to get a more linear eval if (ival > tmax) tmax = ival; //store max edge val if (ival < tmin) tmin = ival; //store min val ALG_ADD_EDGE(Tb, i, tid, ival); //add the interaction to the graph } } } // Get the max-correlation neighbor of each point maxCor = malloc(Tb->n * sizeof(nid_t)); //id of the max-cor point for each point for (i=0; i<Tb->n; i++) { //for each node maxid = 0; maxval = 0; for (j=1; j<LIST_LENGTH(Tb->nodes[i]); j++) { //for each edge mcid = LIST_GET_ID(Tb->nodes[i], j); //get this child's id cval = LIST_GET_VAL(Tb->nodes[i], j); //get child's value if (cval > maxval) { maxval = cval; maxid = mcid; } } maxCor[i] = maxid; } // 5. Guestimate SPM-to-Paramagnetic phase transition temperature Tsp = tmax; // 6. Evaluate at the theoretical SPM->PM temp init_temps = 100; ei = 0; //index of current evaluation S = NEW_LIST(10); //dfs stack of nodes to visit during subgraphs search Teval = malloc(init_temps*sizeof(edge_t)); //temps which were evaluated Ct = malloc(n*init_temps*sizeof(nid_t)); //clusterids at each temp MCS = malloc(Nmc*init_temps*sizeof(nid_t)); //max cluster sizes MCI = malloc(Nmc*init_temps*sizeof(nid_t)); //ids of the max clusters THRESH_SUBGRAPHS_SIZES(Tb, maxCor, Tsp, Nmc, S, Ct, MCS, MCI); //find the cluster ids and sizes of the largest clusters at the theoretical temp *Teval = Tsp; //store where we evaluated at ei++; //increment evaluation index // 6.5 "Evaluate" at T=0 for (i=0; i<n; i++) { *(Ct+n+i) = 1; } //all points are of the same cluster for (i=1; i<Nmc; i++) { *(MCI+Nmc+i) = -1; } //all points are of the same cluster for (i=1; i<Nmc; i++) { *(MCS+Nmc+i) = 0; } //all other cluster sizes are 0 *(MCS+Nmc) = n; //at lowest temp, largest cluster consists of all points *(MCI+Nmc) = 1; //and it has id of 1 *(Teval+1) = 0.0; ei++; // 7. Find the true SPM->PM phase transition temperature //mexPrintf("Finding true SPM->PM temp...\n"); minTempStep_end = Tsp/100; ta=0; tb=Tsp; //bound temps for binary search Sma=n; Smb=*MCS; //max cluster size at bound temps while ( tb-ta > minTempStep_end ) { if ( Smb > minClusSize ) { //SPM->PM temp is above our bracket ta = tb; //set A to B Sma = Smb; //set A to B tb = 2*tb; //extend bracket range *(Teval+ei) = tb; //evaluate at new T THRESH_SUBGRAPHS_SIZES(Tb, maxCor, tb, Nmc, S, Ct+ei*n, MCS+ei*Nmc, MCI+ei*Nmc); Smb = *(MCS+ei*Nmc); //store the max cluster size @ that temp } else { //SPM->PM temp is below top bracket tc = (ta+tb)/2; //midpoint temperature between brackets *(Teval+ei) = tc; //evaluate at midpoint THRESH_SUBGRAPHS_SIZES(Tb, maxCor, tc, Nmc, S, Ct+ei*n, MCS+ei*Nmc, MCI+ei*Nmc); Smc = *(MCS+ei*Nmc); //store the max cluster size @ that temp if ( Smc < minClusSize ) { //SPM->PM is between A and C tb = tc; //set B to C Smb = Smc; } else { //SPM->PM is between C and B ta = tc; //set A to C } } ei++; //increment evaluation index if (ei >= init_temps) { //double array sizes if we need more space doubleArraySizes(&Teval, &Ct, &MCS, &MCI, &init_temps, n, Nmc); } } Tspa = tb; //the actual SPM->PM transtion temperature //mexPrintf(" Finished.\n"); // 8. Do an initial scan across temperatures scan_temps = 50; scan_print = scan_temps/10; mexPrintf("Performing initial scan over temperatures...\n"); mexEvalString("drawnow;"); for (i=1; i<scan_temps; i++) { tc = i*Tspa/scan_temps; //linspace from 0 to SPM->PM *(Teval+ei) = tc; //evaluate at each temp THRESH_SUBGRAPHS_SIZES(Tb, maxCor, tc, Nmc, S, Ct+ei*n, MCS+ei*Nmc, MCI+ei*Nmc); ei++; if (ei >= init_temps) { //double array sizes if we need more space doubleArraySizes(&Teval, &Ct, &MCS, &MCI, &init_temps, n, Nmc); } scan_print = scan_temps/10; if (i%scan_print==0) { //print progress //mexPrintf(" %.1f percent complete\n", 100 * (float) i / (float) scan_temps); //mexEvalString("drawnow;"); } } mexPrintf(" Finished.\n"); mexEvalString("drawnow;"); // 9. Find the jump points for each i-th largest cluster minTempStep = Tspa/800; Njumps = 0; //number of jumps so far printf("Zooming in on cluster splits...\n"); mexEvalString("drawnow;"); for (ilc=1; ilc<Nmc; ilc++) { //for each of the i-th largest clusters //printf(" Looking for %d-largest cluster jumps (out of %d)...\n", ilc+1, Nmc); ncf = 1; //mexEvalString("drawnow;"); sortTemps(Teval, ei, SI); //get the order of Teval in SI i = 1; while ( Teval[SI[i]] < Tspa ) { //while we haven't reached the SPM->PM temp if ( ( *(MCS+Nmc*SI[i]+ilc) > *(MCS+Nmc*SI[i-1]+ilc) ) //if i-th largest cluster increased in size since last timestep && ( *(MCS+Nmc*SI[i]+ilc) > minClusSize ) // and it's not too small, && ( Teval[SI[i]]-Teval[SI[i-1]] > minTempStep ) ) { // and we haven't already zoomed in here // Then do a binary search for jump point between these two points! //printf(" Found total of %d, this one at T=%f\n", ncf++, Teval[SI[i]]); //mexEvalString("drawnow;"); ta = Teval[SI[i-1]]; tb = Teval[SI[i]]; //bracket the jump point Sma = *(MCS+Nmc*SI[i-1]+ilc); Smb = *(MCS+Nmc*SI[i]+ilc); while ( tb-ta > minTempStep ) { tc = (ta+tb)/2; //midpoint *(Teval+ei) = tc; //evaluate at midpoint THRESH_SUBGRAPHS_SIZES(Tb, maxCor, tc, Nmc, S, Ct+ei*n, MCS+ei*Nmc, MCI+ei*Nmc); Smc = *(MCS+ei*Nmc+ilc); //store the max cluster size @ that temp ei++; if (ei >= init_temps) { //double array sizes if we need more space doubleArraySizes(&Teval, &Ct, &MCS, &MCI, &init_temps, n, Nmc); } if (Sma < Smc && Smc > minClusSize) { //there is a jump between A and C and C is above size threshold tb = tc; //set B to C Smb = Smc; } else { //jump point is between C and B ta = tc; //set A to C Sma = Smc; } } sortTemps(Teval, ei, SI); //get the order of Teval in SI } i++; //move on to next temp step } } printf(" Finished.\n"); mexEvalString("drawnow;"); printf("\nRan %d evaluations total\n\n", ei); mexEvalString("drawnow;"); // 10. Find the clusters which have jumped at each jump point, and hierarchical structure //printf("Determining hierarchical structure of clusters...\n"); maxPosClusts = 200; //not gonna be more than that many clusters, right? *Nc = 0; *Cn = malloc(maxPosClusts*sizeof(nid_t)); //allocate list of cluster sizes *C = malloc(maxPosClusts*sizeof(nid_t *)); //allocate list of pointers to cluster id lists *P = malloc(maxPosClusts*sizeof(nid_t)); //allocate list of parent cluster ids Ch = malloc(maxPosClusts*sizeof(nid_t *)); //list of pointers to child list for each cluster for (p=0; p<maxPosClusts; p++) { //initialize to null Ch[p] = NULL; } ChN = malloc(maxPosClusts*sizeof(nid_t)); //number of children for each cluster for (p=0; p<maxPosClusts; p++) { //initialize to zero ChN[p] = 0; } llci = 0; //index of the temp of the last break from the largest clus hier_print = ei/5; for (i=1; i<ei; i++) { //for each evaluation, //if (i%hier_print==0) { //print progress occasionally // printf(" %.1f percent complete\n", 100.0 * ((float) i) / ((float) ei)); //} // If the gap is small enough, there may be a split here if ( Teval[SI[i]]-Teval[SI[i-1]] < 2*minTempStep ) { nsplits = 0; for (j=0; j<Nmc; j++) { //for each of the i-th largest clusters // Find which cluster at the last tempstep was its parent plt = -1; //parent id at the last temperature for (jj=Nmc-1; jj>=0 && plt<0; jj--) { if (isChild(j, i, jj, i-1, MCI, Nmc, SI, Ct, n)) { plt = jj; } } // Was there a jump? tcs = *(MCS+Nmc*SI[i]+j); //this cluster's size at current temp if ((*(MCS+Nmc*SI[i-1]+plt)-tcs) > minClusSize && tcs>minClusSize) { //if there was a jump and this cluster is large enough to consider // Make a new cluster of clus(j,i) nsplits++; //count the number of clusters which split newC = malloc(tcs*sizeof(nid_t)); //list of cluster IDs for the new cluster (*C)[*Nc] = newC; //make corresponding element of C point to that list (*Cn)[*Nc] = tcs; //store the size of the new cluster cid2 = 0; //reset counter for (cid=0; cid<n; cid++) { //for each point, //if this point is in the new cluster if ( *(Ct+n*SI[i]+cid) == *(MCI+Nmc*SI[i]+j) ) { newC[cid2++] = cid; //store the ids of each point in the cluster } } (*Nc)++; //increment the number of clusters found // Find its parent in the list of existing clusters p = parentSearch(newC, tcs, *C, *Cn, *P, *Nc-1); (*P)[*Nc-1] = p; //set element in P to the parent clus id of new clus // Print info //printf("At T=%f, cluster id=%d of size %d broke off from %d\n", Teval[SI[i]], *Nc-1, tcs, p); } } if (nsplits == 1) { //can't have just one! Need to have two which split off //so delete the most recently added cluster free(newC); (*Nc)--; //decrement the number of clusters found (*C)[*Nc] = NULL; //make corresponding element of C point to that list (*Cn)[*Nc] = -1; //store the size of the new cluster } } } //printf(" Finished.\n"); mexEvalString("drawnow;"); // Done, clean up ALG_CLEAR(Tb); CLEAR_LIST(S); free(maxCor); free(Teval); free(Ct); free(MCS); free(MCI); mexPrintf("Done.\n"); }