std::string LFGQueue::DumpQueueInfo() const { uint32 players = 0; uint32 groups = 0; uint32 playersInGroup = 0; for (uint8 i = 0; i < 2; ++i) { LfgGuidList const& queue = i ? newToQueueStore : currentQueueStore; for (LfgGuidList::const_iterator it = queue.begin(); it != queue.end(); ++it) { uint64 guid = *it; if (IS_GROUP(guid)) { groups++; playersInGroup += sLFGMgr->GetPlayerCount(guid); } else players++; } } std::ostringstream o; o << "Queued Players: " << players << " (in group: " << playersInGroup << ") Groups: " << groups << "\n"; return o.str(); }
std::string LfgQueue::DumpQueueInfo() const { uint32 players = 0; uint32 groups = 0; uint32 playersInGroup = 0; for (uint8 i = 0; i < 2; ++i) { LfgGuidList const& queue = i ? m_newToQueue : m_currentQueue; for (LfgGuidList::const_iterator it = queue.begin(); it != queue.end(); ++it) { uint64 guid = *it; if (IS_GROUP(guid)) { groups++; if (Group const* group = sGroupMgr->GetGroupByGUID(GUID_LOPART(guid))) playersInGroup += group->GetMembersCount(); else playersInGroup += 2; // Shouldn't happen but just in case } else players++; } } std::ostringstream o; o << "Queued Players: " << players << "(in group: " << playersInGroup << ") Groups: " << groups << "\n"; return o.str(); }
// // Next_Path_Throws: C // // Evaluate next part of a path. // REBOOL Next_Path_Throws(REBPVS *pvs) { REBPEF dispatcher; REBVAL temp; VAL_INIT_WRITABLE_DEBUG(&temp); // Path must have dispatcher, else return: dispatcher = Path_Dispatch[VAL_TYPE_0(pvs->value)]; if (!dispatcher) return FALSE; // unwind, then check for errors pvs->item++; //Debug_Fmt("Next_Path: %r/%r", pvs->path-1, pvs->path); // object/:field case: if (IS_GET_WORD(pvs->item)) { pvs->selector = GET_MUTABLE_VAR_MAY_FAIL(pvs->item); if (IS_UNSET(pvs->selector)) fail (Error(RE_NO_VALUE, pvs->item)); } // object/(expr) case: else if (IS_GROUP(pvs->item)) { if (DO_VAL_ARRAY_AT_THROWS(&temp, pvs->item)) { *pvs->value = temp; return TRUE; } pvs->selector = &temp; } else // object/word and object/value case: pvs->selector = pvs->item; switch (dispatcher(pvs)) { case PE_OK: break; case PE_SET_IF_END: if (pvs->opt_setval && IS_END(pvs->item + 1)) { *pvs->value = *pvs->opt_setval; pvs->opt_setval = NULL; } break; case PE_NONE: SET_NONE(pvs->store); case PE_USE_STORE: pvs->value = pvs->store; break; default: assert(FALSE); } if (NOT_END(pvs->item + 1)) return Next_Path_Throws(pvs); return FALSE; }
/* Suggested optimization: The loops take a lot of the time. Collapse them * into one, have them set flags for the things that have been found true. * Harder to maintain. */ static gboolean diagram_selected_any_groups(Diagram *dia) { GList *selected; for (selected = dia->data->selected; selected != NULL; selected = selected->next) { DiaObject *obj = (DiaObject*)selected->data; if (IS_GROUP(obj)) return TRUE; } return FALSE; }
const char * predefined_attribute (const char *key, int *ptr_argc, token_data **argv, boolean lowercase) { var_stack *next; char *cp, *sp, *lower; int i, j, special_chars; boolean found = FALSE; i = 1; while (i<*ptr_argc) { special_chars = 0; sp = TOKEN_DATA_TEXT (argv[i]); while (IS_GROUP (*sp)) { sp++; special_chars++; } cp = strchr (sp, '='); if ((cp == NULL && strcasecmp (sp, key) == 0) || (cp != NULL && strncasecmp (sp, key, strlen (key)) == 0 && *(sp + strlen (key)) == '=')) { found = TRUE; next = (var_stack *) xmalloc (sizeof (var_stack)); next->prev = tag_attr; if (cp) { next->text = (char *) xmalloc (special_chars + strlen (cp+1) + 1); if (special_chars) strncpy (next->text, TOKEN_DATA_TEXT (argv[i]), special_chars); strcpy (next->text+special_chars, cp+1); } else next->text = xstrdup (key); tag_attr = next; if (lowercase) for (lower=tag_attr->text; *lower != '\0'; lower++) *lower = tolower (*lower); /* remove this attribute from argv[]. */ for (j=i+1; j<=*ptr_argc; j++) argv[j-1] = argv[j]; (*ptr_argc)--; } i++; } return (found ? tag_attr->text : NULL ); }
/** Resets all highlighting in this layer. Helper function for * highlight_reset_all */ static void highlight_reset_objects(GList *objects, Diagram *dia) { for (; objects != NULL; objects = g_list_next(objects)) { DiaObject *object = (DiaObject*)objects->data; highlight_object_off(object, dia); if (IS_GROUP(object)) { highlight_reset_objects(group_objects(object), dia); } } }
tdmInteractor _dxfAllocateInteractor (tdmInteractorWin W, int size) { int i ; tdmInteractor I = (tdmInteractor) 0 ; ENTRY(("_dxfAllocateInteractor(0x%x, %d)", W, size)); if (! W) goto error ; if (W->numUsed == W->numAllocated) /* create more interactors */ I = _allocateMoreInteractors(W) ; else /* find an unused interactor */ for (i = 0 ; i < W->numAllocated ; i++) if (! IS_USED(W->Interactors[i])) { I = W->Interactors[i] ; break ; } if (! I) goto error ; if (size) { /* allocate interactor private data */ if (! (PRIVATE(I) = tdmAllocateLocal(size))) { goto error ; } else { bzero ((char *) PRIVATE(I), size) ; } } WINDOW(I) = W ; AUX(I) = (tdmInteractor) 0 ; IS_AUX(I) = 0 ; IS_GROUP(I) = 0 ; IS_USED(I) = 1 ; W->numUsed++ ; /* * Default event mask */ I->eventMask = DXEVENT_LEFT | DXEVENT_MIDDLE | DXEVENT_RIGHT; EXIT(("I = 0x%x", I)); return I ; error: EXIT(("ERROR")); return (tdmInteractor) 0 ; }
void diagram_ungroup_selected(Diagram *dia) { DiaObject *group; GList *group_list; GList *selected, *selection_copy; int group_index; int any_groups = 0; if (g_list_length(dia->data->selected) < 1) { message_error("Trying to ungroup with no selected objects."); return; } selection_copy = g_list_copy(dia->data->selected); selected = selection_copy; while (selected != NULL) { group = (DiaObject *)selected->data; if (IS_GROUP(group)) { Change *change; /* Fix selection */ diagram_unselect_object(dia, group); group_list = group_objects(group); group_index = layer_object_get_index(dia->data->active_layer, group); change = undo_ungroup_objects(dia, group_list, group, group_index); (change->apply)(change, dia); diagram_select_list(dia, group_list); any_groups = 1; } selected = g_list_next(selected); } g_list_free(selection_copy); if (any_groups) { diagram_modified(dia); diagram_flush(dia); undo_set_transactionpoint(dia->undo); } }
/* given the model and the path, construct the icon */ static void _dtv_cell_pixbuf_func (GtkCellLayout *layout, GtkCellRenderer *cell, GtkTreeModel *tree_model, GtkTreeIter *iter, gpointer data) { DiaObject *object; GdkPixbuf *pixbuf = NULL; gtk_tree_model_get (tree_model, iter, OBJECT_COLUMN, &object, -1); if (object) { if (object->type->pixmap != NULL) { if (strncmp((char *)object->type->pixmap, "GdkP", 4) == 0) pixbuf = gdk_pixbuf_new_from_inline(-1, (guint8*)object->type->pixmap, TRUE, NULL); else /* must be an XPM */ pixbuf = gdk_pixbuf_new_from_xpm_data(object->type->pixmap); } else if (object->type->pixmap_file != NULL) { GError *error = NULL; pixbuf = gdk_pixbuf_new_from_file (object->type->pixmap_file, &error); if (error) { g_warning ("%s", error->message); g_error_free (error); } } else if (IS_GROUP(object)) { pixbuf = gdk_pixbuf_new_from_inline(-1, dia_group_icon, TRUE, NULL); } } else { #if 0 /* these icons are not that useful */ Layer *layer; gtk_tree_model_get (tree_model, iter, LAYER_COLUMN, &layer, -1); if (layer) pixbuf = gdk_pixbuf_new_from_inline(-1, dia_layers, TRUE, NULL); else /* must be diagram */ pixbuf = gdk_pixbuf_new_from_inline(-1, dia_diagram_icon, TRUE, NULL); #endif } g_object_set (cell, "pixbuf", pixbuf, NULL); if (pixbuf) g_object_unref (pixbuf); }
/* * Add a shape to an AbstractGroupShape. */ void AbstractGroupShape::addShape ( AbstractNodeShape *ans ) { if ( IS_GROUP(ans) ) { this->addGroupShape ( (AbstractGroupShape *) ans ); return; } #if DEBUG_ABSTRACT_GROUP_SHAPE_SUBGRP >= 1 std::cout << "INSERTING node \'" << qPrintable(ans->id) << "\' in group \'" << qPrintable(this->id) << "\'\n"; #endif this->content.append ( ans ); this->insertedSubNode ( ans ); #if DEBUG_ABSTRACT_GROUP_SHAPE_SUBGRP >= 1 std::cout << "\n"; #endif }
// // Do_Breakpoint_Throws: C // // A call to Do_Breakpoint_Throws does delegation to a hook in the host, which // (if registered) will generally start an interactive session for probing the // environment at the break. The `resume` native cooperates by being able to // give back a value (or give back code to run to produce a value) that the // call to breakpoint returns. // // RESUME has another feature, which is to be able to actually unwind and // simulate a return /AT a function *further up the stack*. (This may be // switched to a feature of a "STEP OUT" command at some point.) // REBOOL Do_Breakpoint_Throws( REBVAL *out, REBOOL interrupted, // Ctrl-C (as opposed to a BREAKPOINT) const REBVAL *default_value, REBOOL do_default ) { REBVAL *target = NONE_VALUE; REBVAL temp; VAL_INIT_WRITABLE_DEBUG(&temp); if (!PG_Breakpoint_Quitting_Hook) { // // Host did not register any breakpoint handler, so raise an error // about this as early as possible. // fail (Error(RE_HOST_NO_BREAKPOINT)); } // We call the breakpoint hook in a loop, in order to keep running if any // inadvertent FAILs or THROWs occur during the interactive session. // Only a conscious call of RESUME speaks the protocol to break the loop. // while (TRUE) { struct Reb_State state; REBCTX *error; push_trap: PUSH_TRAP(&error, &state); // The host may return a block of code to execute, but cannot // while evaluating do a THROW or a FAIL that causes an effective // "resumption". Halt is the exception, hence we PUSH_TRAP and // not PUSH_UNHALTABLE_TRAP. QUIT is also an exception, but a // desire to quit is indicated by the return value of the breakpoint // hook (which may or may not decide to request a quit based on the // QUIT command being run). // // The core doesn't want to get involved in presenting UI, so if // an error makes it here and wasn't trapped by the host first that // is a bug in the host. It should have done its own PUSH_TRAP. // if (error) { #if !defined(NDEBUG) REBVAL error_value; VAL_INIT_WRITABLE_DEBUG(&error_value); Val_Init_Error(&error_value, error); PROBE_MSG(&error_value, "Error not trapped during breakpoint:"); Panic_Array(CTX_VARLIST(error)); #endif // In release builds, if an error managed to leak out of the // host's breakpoint hook somehow...just re-push the trap state // and try it again. // goto push_trap; } // Call the host's breakpoint hook. // if (PG_Breakpoint_Quitting_Hook(&temp, interrupted)) { // // If a breakpoint hook returns TRUE that means it wants to quit. // The value should be the /WITH value (as in QUIT/WITH) // assert(!THROWN(&temp)); *out = *ROOT_QUIT_NATIVE; CONVERT_NAME_TO_THROWN(out, &temp, FALSE); return TRUE; // TRUE = threw } // If a breakpoint handler returns FALSE, then it should have passed // back a "resume instruction" triggered by a call like: // // resume/do [fail "This is how to fail from a breakpoint"] // // So now that the handler is done, we will allow any code handed back // to do whatever FAIL it likes vs. trapping that here in a loop. // DROP_TRAP_SAME_STACKLEVEL_AS_PUSH(&state); // Decode and process the "resume instruction" { struct Reb_Frame *frame; REBVAL *mode; REBVAL *payload; assert(IS_GROUP(&temp)); assert(VAL_LEN_HEAD(&temp) == RESUME_INST_MAX); mode = VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_MODE); payload = VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_PAYLOAD); target = VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_TARGET); // The first thing we need to do is determine if the target we // want to return to has another breakpoint sandbox blocking // us. If so, what we need to do is actually retransmit the // resume instruction so it can break that wall, vs. transform // it into an EXIT/FROM that would just get intercepted. // if (!IS_NONE(target)) { #if !defined(NDEBUG) REBOOL found = FALSE; #endif for (frame = FS_TOP; frame != NULL; frame = frame->prior) { if (frame->mode != CALL_MODE_FUNCTION) continue; if ( frame != FS_TOP && FUNC_CLASS(frame->func) == FUNC_CLASS_NATIVE && ( FUNC_CODE(frame->func) == &N_pause || FUNC_CODE(frame->func) == &N_breakpoint ) ) { // We hit a breakpoint (that wasn't this call to // breakpoint, at the current FS_TOP) before finding // the sought after target. Retransmit the resume // instruction so that level will get it instead. // *out = *ROOT_RESUME_NATIVE; CONVERT_NAME_TO_THROWN(out, &temp, FALSE); return TRUE; // TRUE = thrown } if (IS_FRAME(target)) { if (NOT(frame->flags & DO_FLAG_FRAME_CONTEXT)) continue; if ( VAL_CONTEXT(target) == AS_CONTEXT(frame->data.context) ) { // Found a closure matching the target before we // reached a breakpoint, no need to retransmit. // #if !defined(NDEBUG) found = TRUE; #endif break; } } else { assert(IS_FUNCTION(target)); if (frame->flags & DO_FLAG_FRAME_CONTEXT) continue; if (VAL_FUNC(target) == frame->func) { // // Found a function matching the target before we // reached a breakpoint, no need to retransmit. // #if !defined(NDEBUG) found = TRUE; #endif break; } } } // RESUME should not have been willing to use a target that // is not on the stack. // #if !defined(NDEBUG) assert(found); #endif } if (IS_NONE(mode)) { // // If the resume instruction had no /DO or /WITH of its own, // then it doesn't override whatever the breakpoint provided // as a default. (If neither the breakpoint nor the resume // provided a /DO or a /WITH, result will be UNSET.) // goto return_default; // heeds `target` } assert(IS_LOGIC(mode)); if (VAL_LOGIC(mode)) { if (DO_VAL_ARRAY_AT_THROWS(&temp, payload)) { // // Throwing is not compatible with /AT currently. // if (!IS_NONE(target)) fail (Error_No_Catch_For_Throw(&temp)); // Just act as if the BREAKPOINT call itself threw // *out = temp; return TRUE; // TRUE = thrown } // Ordinary evaluation result... } else temp = *payload; } // The resume instruction will be GC'd. // goto return_temp; } DEAD_END; return_default: if (do_default) { if (DO_VAL_ARRAY_AT_THROWS(&temp, default_value)) { // // If the code throws, we're no longer in the sandbox...so we // bubble it up. Note that breakpoint runs this code at its // level... so even if you request a higher target, any throws // will be processed as if they originated at the BREAKPOINT // frame. To do otherwise would require the EXIT/FROM protocol // to add support for DO-ing at the receiving point. // *out = temp; return TRUE; // TRUE = thrown } } else temp = *default_value; // generally UNSET! if no /WITH return_temp: // The easy case is that we just want to return from breakpoint // directly, signaled by the target being NONE!. // if (IS_NONE(target)) { *out = temp; return FALSE; // FALSE = not thrown } // If the target is a function, then we're looking to simulate a return // from something up the stack. This uses the same mechanic as // definitional returns--a throw named by the function or closure frame. // // !!! There is a weak spot in definitional returns for FUNCTION! that // they can only return to the most recent invocation; which is a weak // spot of FUNCTION! in general with stack relative variables. Also, // natives do not currently respond to definitional returns...though // they can do so just as well as FUNCTION! can. // *out = *target; CONVERT_NAME_TO_THROWN(out, &temp, TRUE); return TRUE; // TRUE = thrown }
/** Check compatibilities between groups. If group is Matched proposal will be created @param[in] check List of guids to check compatibilities @return LfgCompatibility type of compatibility */ LfgCompatibility LFGQueue::CheckCompatibility(LfgGuidList check) { std::string strGuids = ConcatenateGuids(check); LfgProposal proposal; LfgDungeonSet proposalDungeons; LfgGroupsMap proposalGroups; LfgRolesMap proposalRoles; // Check for correct size if (check.size() > MAXGROUPSIZE || check.empty()) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s): Size wrong - Not compatibles", strGuids.c_str()); return LFG_INCOMPATIBLES_WRONG_GROUP_SIZE; } // Check all-but-new compatiblitity if (check.size() > 2) { uint64 frontGuid = check.front(); check.pop_front(); // Check all-but-new compatibilities (New, A, B, C, D) --> check(A, B, C, D) LfgCompatibility child_compatibles = CheckCompatibility(check); if (child_compatibles < LFG_COMPATIBLES_WITH_LESS_PLAYERS) // Group not compatible { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) child %s not compatibles", strGuids.c_str(), ConcatenateGuids(check).c_str()); SetCompatibles(strGuids, child_compatibles); return child_compatibles; } check.push_front(frontGuid); } // Check if more than one LFG group and number of players joining uint8 numPlayers = 0; uint8 numLfgGroups = 0; for (LfgGuidList::const_iterator it = check.begin(); it != check.end() && numLfgGroups < 2 && numPlayers <= MAXGROUPSIZE; ++it) { uint64 guid = (*it); LfgQueueDataContainer::iterator itQueue = QueueDataStore.find(guid); if (itQueue == QueueDataStore.end()) { sLog->outError(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: [" UI64FMTD "] is not queued but listed as queued!", guid); RemoveFromQueue(guid); return LFG_COMPATIBILITY_PENDING; } // Store group so we don't need to call Mgr to get it later (if it's player group will be 0 otherwise would have joined as group) for (LfgRolesMap::const_iterator it2 = itQueue->second.roles.begin(); it2 != itQueue->second.roles.end(); ++it2) proposalGroups[it2->first] = IS_GROUP(itQueue->first) ? itQueue->first : 0; numPlayers += itQueue->second.roles.size(); if (sLFGMgr->IsLfgGroup(guid)) { if (!numLfgGroups) proposal.group = guid; ++numLfgGroups; } } // Group with less that MAXGROUPSIZE members always compatible if (check.size() == 1 && numPlayers != MAXGROUPSIZE) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) sigle group. Compatibles", strGuids.c_str()); LfgQueueDataContainer::iterator itQueue = QueueDataStore.find(check.front()); LfgCompatibilityData data(LFG_COMPATIBLES_WITH_LESS_PLAYERS); data.roles = itQueue->second.roles; LFGMgr::CheckGroupRoles(data.roles); UpdateBestCompatibleInQueue(itQueue, strGuids, data.roles); SetCompatibilityData(strGuids, data); return LFG_COMPATIBLES_WITH_LESS_PLAYERS; } if (numLfgGroups > 1) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) More than one Lfggroup (%u)", strGuids.c_str(), numLfgGroups); SetCompatibles(strGuids, LFG_INCOMPATIBLES_MULTIPLE_LFG_GROUPS); return LFG_INCOMPATIBLES_MULTIPLE_LFG_GROUPS; } if (numPlayers > MAXGROUPSIZE) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) Too much players (%u)", strGuids.c_str(), numPlayers); SetCompatibles(strGuids, LFG_INCOMPATIBLES_TOO_MUCH_PLAYERS); return LFG_INCOMPATIBLES_TOO_MUCH_PLAYERS; } // If it's single group no need to check for duplicate players, ignores, bad roles or bad dungeons as it's been checked before joining if (check.size() > 1) { for (LfgGuidList::const_iterator it = check.begin(); it != check.end(); ++it) { const LfgRolesMap &roles = QueueDataStore[(*it)].roles; for (LfgRolesMap::const_iterator itRoles = roles.begin(); itRoles != roles.end(); ++itRoles) { LfgRolesMap::const_iterator itPlayer; for (itPlayer = proposalRoles.begin(); itPlayer != proposalRoles.end(); ++itPlayer) { if (itRoles->first == itPlayer->first) sLog->outError(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: ERROR! Player multiple times in queue! [" UI64FMTD "]", itRoles->first); else if (sLFGMgr->HasIgnore(itRoles->first, itPlayer->first)) break; } if (itPlayer == proposalRoles.end()) proposalRoles[itRoles->first] = itRoles->second; } } if (uint8 playersize = numPlayers - proposalRoles.size()) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) not compatible, %u players are ignoring each other", strGuids.c_str(), playersize); SetCompatibles(strGuids, LFG_INCOMPATIBLES_HAS_IGNORES); return LFG_INCOMPATIBLES_HAS_IGNORES; } LfgRolesMap debugRoles = proposalRoles; if (!LFGMgr::CheckGroupRoles(proposalRoles)) { std::ostringstream o; for (LfgRolesMap::const_iterator it = debugRoles.begin(); it != debugRoles.end(); ++it) o << ", " << it->first << ": " << sLFGMgr->GetRolesString(it->second); sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) Roles not compatible%s", strGuids.c_str(), o.str().c_str()); SetCompatibles(strGuids, LFG_INCOMPATIBLES_NO_ROLES); return LFG_INCOMPATIBLES_NO_ROLES; } LfgGuidList::iterator itguid = check.begin(); proposalDungeons = QueueDataStore[*itguid].dungeons; std::ostringstream o; o << ", " << *itguid << ": (" << sLFGMgr->ConcatenateDungeons(proposalDungeons) << ")"; for (++itguid; itguid != check.end(); ++itguid) { LfgDungeonSet temporal; LfgDungeonSet &dungeons = QueueDataStore[*itguid].dungeons; o << ", " << *itguid << ": (" << sLFGMgr->ConcatenateDungeons(dungeons) << ")"; std::set_intersection(proposalDungeons.begin(), proposalDungeons.end(), dungeons.begin(), dungeons.end(), std::inserter(temporal, temporal.begin())); proposalDungeons = temporal; } if (proposalDungeons.empty()) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) No compatible dungeons%s", strGuids.c_str(), o.str().c_str()); SetCompatibles(strGuids, LFG_INCOMPATIBLES_NO_DUNGEONS); return LFG_INCOMPATIBLES_NO_DUNGEONS; } } else { uint64 gguid = *check.begin(); const LfgQueueData &queue = QueueDataStore[gguid]; proposalDungeons = queue.dungeons; proposalRoles = queue.roles; LFGMgr::CheckGroupRoles(proposalRoles); // assing new roles } // Enough players? if (numPlayers != MAXGROUPSIZE) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) Compatibles but not enough players(%u)", strGuids.c_str(), numPlayers); LfgCompatibilityData data(LFG_COMPATIBLES_WITH_LESS_PLAYERS); data.roles = proposalRoles; for (LfgGuidList::const_iterator itr = check.begin(); itr != check.end(); ++itr) UpdateBestCompatibleInQueue(QueueDataStore.find(*itr), strGuids, data.roles); SetCompatibilityData(strGuids, data); return LFG_COMPATIBLES_WITH_LESS_PLAYERS; } proposal.queues = check; proposal.isNew = numLfgGroups != 1; if (!sLFGMgr->AllQueued(check)) { sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) Group MATCH but can't create proposal!", strGuids.c_str()); SetCompatibles(strGuids, LFG_COMPATIBLES_BAD_STATES); return LFG_COMPATIBLES_BAD_STATES; } // Create a new proposal proposal.cancelTime = time(NULL) + LFG_TIME_PROPOSAL; proposal.state = LFG_PROPOSAL_INITIATING; proposal.leader = 0; proposal.dungeonId = Las::Containers::SelectRandomContainerElement(proposalDungeons); bool leader = false; for (LfgRolesMap::const_iterator itRoles = proposalRoles.begin(); itRoles != proposalRoles.end(); ++itRoles) { // Assing new leader if (itRoles->second & PLAYER_ROLE_LEADER) { if (!leader || !proposal.leader || urand(0, 1)) proposal.leader = itRoles->first; leader = true; } else if (!leader && (!proposal.leader || urand(0, 1))) proposal.leader = itRoles->first; // Assing player data and roles LfgProposalPlayer &data = proposal.players[itRoles->first]; data.role = itRoles->second; data.group = proposalGroups.find(itRoles->first)->second; if (!proposal.isNew && data.group && data.group == proposal.group) // Player from existing group, autoaccept data.accept = LFG_ANSWER_AGREE; } // Mark proposal members as not queued (but not remove queue data) for (LfgGuidList::const_iterator itQueue = proposal.queues.begin(); itQueue != proposal.queues.end(); ++itQueue) { uint64 guid = (*itQueue); RemoveFromNewQueue(guid); RemoveFromCurrentQueue(guid); } sLFGMgr->AddProposal(proposal); sLog->outDebug(LOG_FILTER_LFG, "LFGQueue::CheckCompatibility: (%s) MATCH! Group formed", strGuids.c_str()); SetCompatibles(strGuids, LFG_COMPATIBLES_MATCH); return LFG_COMPATIBLES_MATCH; }
/* * renderer methods */ static void draw_object(DiaRenderer *self, DiaObject *object, DiaMatrix *matrix) { DrsRenderer *renderer = DRS_RENDERER (self); DiaMatrix *m = g_queue_peek_tail (renderer->matrices); xmlNodePtr node; g_queue_push_tail (renderer->parents, renderer->root); renderer->root = node = xmlNewChild(renderer->root, NULL, (const xmlChar *)"object", NULL); xmlSetProp(node, (const xmlChar *)"type", (xmlChar *)object->type->name); /* if it looks like intdata store it as well */ if ((int)object->type->default_user_data > 0 && (int)object->type->default_user_data < 0xFF) { gchar buffer[30]; g_snprintf(buffer, sizeof(buffer), "%d", (int)object->type->default_user_data); xmlSetProp(node, (const xmlChar *)"intdata", (xmlChar *)buffer); } if (renderer->save_props) { xmlNodePtr props_node; props_node = xmlNewChild(node, NULL, (const xmlChar *)"properties", NULL); object_save_props (object, props_node, renderer->ctx); } if (matrix) { DiaMatrix *m2 = g_new (DiaMatrix, 1); if (m) dia_matrix_multiply (m2, matrix, m); else *m2 = *matrix; g_queue_push_tail (renderer->matrices, m2); /* lazy creation of our transformer */ if (!renderer->transformer) renderer->transformer = dia_transform_renderer_new (self); } /* special handling for group objects: * - for the render branch use DiaTransformRenderer, but not it's draw_object, * to see all the children's draw_object ourself * - for the object branch we rely on this draw_object being called so need * to inline group_draw here * - to maintain the correct transform build our own queue of matrices like * the DiaTransformRenderer would do through it's draw_object */ { g_queue_push_tail (renderer->parents, renderer->root); renderer->root = node = xmlNewChild(renderer->root, NULL, (const xmlChar *)"render", NULL); if (renderer->transformer) { DiaMatrix *m = g_queue_peek_tail (renderer->matrices); if (IS_GROUP (object)) { /* reimplementation of group_draw to use this draw_object method */ GList *list; DiaObject *obj; list = group_objects (object); while (list != NULL) { obj = (DiaObject *) list->data; DIA_RENDERER_GET_CLASS(self)->draw_object(self, obj, m); list = g_list_next(list); } } else { /* just the leaf */ DIA_RENDERER_GET_CLASS(renderer->transformer)->draw_object(renderer->transformer, object, m); } } else { object->ops->draw(object, DIA_RENDERER (renderer)); } renderer->root = g_queue_pop_tail (renderer->parents); } renderer->root = g_queue_pop_tail (renderer->parents); if (matrix) g_queue_pop_tail (renderer->matrices); /* one lost demand destruction */ if (renderer->transformer && g_queue_is_empty (renderer->matrices)) { g_object_unref (renderer->transformer); renderer->transformer = NULL; } }
// // Do_Path_Throws: C // // Evaluate an ANY_PATH! REBVAL, starting from the index position of that // path value and continuing to the end. // // The evaluator may throw because GROUP! is evaluated, e.g. `foo/(throw 1020)` // // If label_sym is passed in as being non-null, then the caller is implying // readiness to process a path which may be a function with refinements. // These refinements will be left in order on the data stack in the case // that `out` comes back as IS_FUNCTION(). // // If `opt_setval` is given, the path operation will be done as a "SET-PATH!" // if the path evaluation did not throw or error. HOWEVER the set value // is NOT put into `out`. This provides more flexibility on performance in // the evaluator, which may already have the `val` where it wants it, and // so the extra assignment would just be overhead. // // !!! Path evaluation is one of the parts of R3-Alpha that has not been // vetted very heavily by Ren-C, and needs a review and overhaul. // REBOOL Do_Path_Throws( REBVAL *out, REBSYM *label_sym, const REBVAL *path, REBVAL *opt_setval ) { REBPVS pvs; REBDSP dsp_orig = DSP; assert(ANY_PATH(path)); // !!! There is a bug in the dispatch such that if you are running a // set path, it does not always assign the output, because it "thinks you // aren't going to look at it". This presumably originated from before // parens were allowed in paths, and neglects cases like: // // foo/(throw 1020): value // // We always have to check to see if a throw occurred. Until this is // streamlined, we have to at minimum set it to something that is *not* // thrown so that we aren't testing uninitialized memory. A safe trash // will do, which is unset in release builds. // if (opt_setval) SET_TRASH_SAFE(out); // None of the values passed in can live on the data stack, because // they might be relocated during the path evaluation process. // assert(!IN_DATA_STACK(out)); assert(!IN_DATA_STACK(path)); assert(!opt_setval || !IN_DATA_STACK(opt_setval)); // Not currently robust for reusing passed in path or value as the output assert(out != path && out != opt_setval); assert(!opt_setval || !THROWN(opt_setval)); // Initialize REBPVS -- see notes in %sys-do.h // pvs.opt_setval = opt_setval; pvs.store = out; pvs.orig = path; pvs.item = VAL_ARRAY_AT(pvs.orig); // may not be starting at head of PATH! // Seed the path evaluation process by looking up the first item (to // get a datatype to dispatch on for the later path items) // if (IS_WORD(pvs.item)) { pvs.value = GET_MUTABLE_VAR_MAY_FAIL(pvs.item); if (IS_UNSET(pvs.value)) fail (Error(RE_NO_VALUE, pvs.item)); } else { // !!! Ideally there would be some way to protect pvs.value during // successive path dispatches to make sure it does not get written. // This is semi-dangerously giving pvs.value a reference into the // input path, which should not be modified! pvs.value = VAL_ARRAY_AT(pvs.orig); } // Start evaluation of path: if (IS_END(pvs.item + 1)) { // If it was a single element path, return the value rather than // try to dispatch it (would cause a crash at time of writing) // // !!! Is this the desired behavior, or should it be an error? } else if (Path_Dispatch[VAL_TYPE_0(pvs.value)]) { REBOOL threw = Next_Path_Throws(&pvs); // !!! See comments about why the initialization of out is necessary. // Without it this assertion can change on some things: // // t: now // t/time: 10:20:03 // // (It thinks pvs.value has its THROWN bit set when it completed // successfully. It was a PE_USE_STORE case where pvs.value was reset to // pvs.store, and pvs.store has its thrown bit set. Valgrind does not // catch any uninitialized variables.) // // There are other cases that do trip valgrind when omitting the // initialization, though not as clearly reproducible. // assert(threw == THROWN(pvs.value)); if (threw) return TRUE; // Check for errors: if (NOT_END(pvs.item + 1) && !IS_FUNCTION(pvs.value)) { // Only function refinements should get by this line: fail (Error(RE_INVALID_PATH, pvs.orig, pvs.item)); } } else if (!IS_FUNCTION(pvs.value)) fail (Error(RE_BAD_PATH_TYPE, pvs.orig, Type_Of(pvs.value))); if (opt_setval) { // If SET then we don't return anything assert(IS_END(pvs.item) + 1); return FALSE; } // If storage was not used, then copy final value back to it: if (pvs.value != pvs.store) *pvs.store = *pvs.value; assert(!THROWN(out)); // Return 0 if not function or is :path/word... if (!IS_FUNCTION(pvs.value)) { assert(IS_END(pvs.item) + 1); return FALSE; } if (label_sym) { REBVAL refinement; VAL_INIT_WRITABLE_DEBUG(&refinement); // When a function is hit, path processing stops as soon as the // processed sub-path resolves to a function. The path is still sitting // on the position of the last component of that sub-path. Usually, // this last component in the sub-path is a word naming the function. // if (IS_WORD(pvs.item)) { *label_sym = VAL_WORD_SYM(pvs.item); } else { // In rarer cases, the final component (completing the sub-path to // the function to call) is not a word. Such as when you use a path // to pick by index out of a block of functions: // // functions: reduce [:add :subtract] // functions/1 10 20 // // Or when you have an immediate function value in a path with a // refinement. Tricky to make, but possible: // // do reduce [ // to-path reduce [:append 'only] [a] [b] // ] // // !!! When a function was not invoked through looking up a word // (or a word in a path) to use as a label, there were once three // different alternate labels used. One was SYM__APPLY_, another // was ROOT_NONAME, and another was to be the type of the function // being executed. None are fantastic, we do the type for now. *label_sym = SYM_FROM_KIND(VAL_TYPE(pvs.value)); } // Move on to the refinements (if any) ++pvs.item; // !!! Currently, the mainline path evaluation "punts" on refinements. // When it finds a function, it stops the path evaluation and leaves // the position pvs.path before the list of refinements. // // A more elegant solution would be able to process and notice (for // instance) that `:APPEND/ONLY` should yield a function value that // has been specialized with a refinement. Path chaining should thus // be able to effectively do this and give the refined function object // back to the evaluator or other client. // // If a label_sym is passed in, we recognize that a function dispatch // is going to be happening. We do not want to pay to generate the // new series that would be needed to make a temporary function that // will be invoked and immediately GC'd So we gather the refinements // on the data stack. // // This code simulates that path-processing-to-data-stack, but it // should really be something in dispatch iself. In any case, we put // refinements on the data stack...and caller knows refinements are // from dsp_orig to DSP (thanks to accounting, all other operations // should balance!) for (; NOT_END(pvs.item); ++pvs.item) { // "the refinements" if (IS_NONE(pvs.item)) continue; if (IS_GROUP(pvs.item)) { // Note it is not legal to use the data stack directly as the // output location for a DO (might be resized) if (DO_VAL_ARRAY_AT_THROWS(&refinement, pvs.item)) { *out = refinement; DS_DROP_TO(dsp_orig); return TRUE; } if (IS_NONE(&refinement)) continue; DS_PUSH(&refinement); } else if (IS_GET_WORD(pvs.item)) { DS_PUSH_TRASH; *DS_TOP = *GET_OPT_VAR_MAY_FAIL(pvs.item); if (IS_NONE(DS_TOP)) { DS_DROP; continue; } } else DS_PUSH(pvs.item); // Whatever we were trying to use as a refinement should now be // on the top of the data stack, and only words are legal ATM // if (!IS_WORD(DS_TOP)) fail (Error(RE_BAD_REFINE, DS_TOP)); // Go ahead and canonize the word symbol so we don't have to // do it each time in order to get a case-insenstive compare // INIT_WORD_SYM(DS_TOP, SYMBOL_TO_CANON(VAL_WORD_SYM(DS_TOP))); } // To make things easier for processing, reverse the refinements on // the data stack (we needed to evaluate them in forward order). // This way we can just pop them as we go, and know if they weren't // all consumed if it doesn't get back to `dsp_orig` by the end. if (dsp_orig != DSP) { REBVAL *bottom = DS_AT(dsp_orig + 1); REBVAL *top = DS_TOP; while (top > bottom) { refinement = *bottom; *bottom = *top; *top = refinement; top--; bottom++; } } } else { // !!! Historically this just ignores a result indicating this is a // function with refinements, e.g. ':append/only'. However that // ignoring seems unwise. It should presumably create a modified // function in that case which acts as if it has the refinement. // // If the caller did not pass in a label pointer we assume they are // likely not ready to process any refinements. // if (NOT_END(pvs.item + 1)) fail (Error(RE_TOO_LONG)); // !!! Better error or add feature } return FALSE; }
// // Compose_Any_Array_Throws: C // // Compose a block from a block of un-evaluated values and GROUP! arrays that // are evaluated. This calls into Do_Core, so if 'into' is provided, then its // series must be protected from garbage collection. // // deep - recurse into sub-blocks // only - parens that return blocks are kept as blocks // // Writes result value at address pointed to by out. // REBOOL Compose_Any_Array_Throws( REBVAL *out, const REBVAL *any_array, REBOOL deep, REBOOL only, REBOOL into ) { REBDSP dsp_orig = DSP; Reb_Enumerator e; PUSH_SAFE_ENUMERATOR(&e, any_array); // evaluating could disrupt any_array while (NOT_END(e.value)) { UPDATE_EXPRESSION_START(&e); // informs the error delivery better if (IS_GROUP(e.value)) { // // We evaluate here, but disable lookahead so it only evaluates // the GROUP! and doesn't trigger errors on what's after it. // REBVAL evaluated; DO_NEXT_REFETCH_MAY_THROW(&evaluated, &e, DO_FLAG_NO_LOOKAHEAD); if (THROWN(&evaluated)) { *out = evaluated; DS_DROP_TO(dsp_orig); DROP_SAFE_ENUMERATOR(&e); return TRUE; } if (IS_BLOCK(&evaluated) && !only) { // // compose [blocks ([a b c]) merge] => [blocks a b c merge] // RELVAL *push = VAL_ARRAY_AT(&evaluated); while (NOT_END(push)) { // // `evaluated` is known to be specific, but its specifier // may be needed to derelativize its children. // DS_PUSH_RELVAL(push, VAL_SPECIFIER(&evaluated)); push++; } } else if (!IS_VOID(&evaluated)) { // // compose [(1 + 2) inserts as-is] => [3 inserts as-is] // compose/only [([a b c]) unmerged] => [[a b c] unmerged] // DS_PUSH(&evaluated); } else { // // compose [(print "Voids *vanish*!")] => [] // } } else if (deep) { if (IS_BLOCK(e.value)) { // // compose/deep [does [(1 + 2)] nested] => [does [3] nested] REBVAL specific; COPY_VALUE(&specific, e.value, e.specifier); REBVAL composed; if (Compose_Any_Array_Throws( &composed, &specific, TRUE, only, into )) { *out = composed; DS_DROP_TO(dsp_orig); DROP_SAFE_ENUMERATOR(&e); return TRUE; } DS_PUSH(&composed); } else { if (ANY_ARRAY(e.value)) { // // compose [copy/(orig) (copy)] => [copy/(orig) (copy)] // !!! path and second group are copies, first group isn't // REBARR *copy = Copy_Array_Shallow( VAL_ARRAY(e.value), IS_RELATIVE(e.value) ? e.specifier // use parent specifier if relative... : VAL_SPECIFIER(const_KNOWN(e.value)) // else child's ); DS_PUSH_TRASH; Val_Init_Array_Index( DS_TOP, VAL_TYPE(e.value), copy, VAL_INDEX(e.value) ); // ...manages } else DS_PUSH_RELVAL(e.value, e.specifier); } FETCH_NEXT_ONLY_MAYBE_END(&e); } else { // // compose [[(1 + 2)] (reverse "wollahs")] => [[(1 + 2)] "shallow"] // DS_PUSH_RELVAL(e.value, e.specifier); FETCH_NEXT_ONLY_MAYBE_END(&e); } } if (into) Pop_Stack_Values_Into(out, dsp_orig); else Val_Init_Array(out, VAL_TYPE(any_array), Pop_Stack_Values(dsp_orig)); DROP_SAFE_ENUMERATOR(&e); return FALSE; }
// // Next_Path_Throws: C // // Evaluate next part of a path. // REBOOL Next_Path_Throws(REBPVS *pvs) { REBPEF dispatcher; // Path must have dispatcher, else return: dispatcher = Path_Dispatch[VAL_TYPE(pvs->value)]; if (!dispatcher) return FALSE; // unwind, then check for errors pvs->item++; //Debug_Fmt("Next_Path: %r/%r", pvs->path-1, pvs->path); // Determine the "selector". See notes on pvs->selector_temp for why // a local variable can't be used for the temporary space. // if (IS_GET_WORD(pvs->item)) { // e.g. object/:field pvs->selector = GET_MUTABLE_VAR_MAY_FAIL(pvs->item, pvs->item_specifier); if (IS_VOID(pvs->selector)) fail (Error_No_Value_Core(pvs->item, pvs->item_specifier)); SET_TRASH_IF_DEBUG(&pvs->selector_temp); } // object/(expr) case: else if (IS_GROUP(pvs->item)) { if (Do_At_Throws( &pvs->selector_temp, VAL_ARRAY(pvs->item), VAL_INDEX(pvs->item), IS_RELATIVE(pvs->item) ? pvs->item_specifier // if relative, use parent specifier... : VAL_SPECIFIER(const_KNOWN(pvs->item)) // ...else use child's )) { *pvs->store = pvs->selector_temp; return TRUE; } pvs->selector = &pvs->selector_temp; } else { // object/word and object/value case: // COPY_VALUE(&pvs->selector_temp, pvs->item, pvs->item_specifier); pvs->selector = &pvs->selector_temp; } switch (dispatcher(pvs)) { case PE_OK: break; case PE_SET_IF_END: if (pvs->opt_setval && IS_END(pvs->item + 1)) { *pvs->value = *pvs->opt_setval; pvs->opt_setval = NULL; } break; case PE_NONE: SET_BLANK(pvs->store); case PE_USE_STORE: pvs->value = pvs->store; pvs->value_specifier = SPECIFIED; break; default: assert(FALSE); } if (NOT_END(pvs->item + 1)) return Next_Path_Throws(pvs); return FALSE; }