static cell AMX_NATIVE_CALL ns_get_hive_ability(AMX *amx, cell *params) { CreatePlayerPointer(amx,params[1]); int result = get_private(player->GetEdict(), MAKE_OFFSET(HIVEABILITY)); return (params[2] > 0) ? (result >= params[2] - 1) : result; }
/* * InitProcGlobal - * Initialize the global process table during postmaster or standalone * backend startup. * * We also create all the per-process semaphores we will need to support * the requested number of backends. We used to allocate semaphores * only when backends were actually started up, but that is bad because * it lets Postgres fail under load --- a lot of Unix systems are * (mis)configured with small limits on the number of semaphores, and * running out when trying to start another backend is a common failure. * So, now we grab enough semaphores to support the desired max number * of backends immediately at initialization --- if the sysadmin has set * MaxConnections or autovacuum_max_workers higher than his kernel will * support, he'll find out sooner rather than later. * * Another reason for creating semaphores here is that the semaphore * implementation typically requires us to create semaphores in the * postmaster, not in backends. * * Note: this is NOT called by individual backends under a postmaster, * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs * pointers must be propagated specially for EXEC_BACKEND operation. */ void InitProcGlobal(int mppLocalProcessCounter) { PGPROC *procs; int i; bool found; /* Create the ProcGlobal shared structure */ ProcGlobal = (PROC_HDR *) ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found); Assert(!found); /* * Create the PGPROC structures for auxiliary (bgwriter) processes, too. * These do not get linked into the freeProcs list. */ AuxiliaryProcs = (PGPROC *) ShmemInitStruct("AuxiliaryProcs", NUM_AUXILIARY_PROCS * sizeof(PGPROC), &found); Assert(!found); /* * Initialize the data structures. */ ProcGlobal->freeProcs = INVALID_OFFSET; ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY; ProcGlobal->mppLocalProcessCounter = mppLocalProcessCounter; /* * Pre-create the PGPROC structures and create a semaphore for each. */ procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC)); if (!procs) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); MemSet(procs, 0, MaxBackends * sizeof(PGPROC)); for (i = 0; i < MaxBackends; i++) { PGSemaphoreCreate(&(procs[i].sem)); procs[i].links.next = ProcGlobal->freeProcs; ProcGlobal->freeProcs = MAKE_OFFSET(&procs[i]); } ProcGlobal->procs = procs; ProcGlobal->numFreeProcs = MaxBackends; MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC)); for (i = 0; i < NUM_AUXILIARY_PROCS; i++) { AuxiliaryProcs[i].pid = 0; /* marks auxiliary proc as not in use */ AuxiliaryProcs[i].postmasterResetRequired = true; PGSemaphoreCreate(&(AuxiliaryProcs[i].sem)); } }
static cell AMX_NATIVE_CALL ns_get_deaths(AMX *amx, cell *params) { CreatePlayerPointer(amx,params[1]); if (!player->IsConnected() || !player->HasPrivateData()) { return 0; } return get_private(player->GetEdict(),MAKE_OFFSET(DEATHS)); }
// ns_get_weap_ammo(WeaponID) static cell AMX_NATIVE_CALL ns_get_weapon_clip(AMX *amx, cell *params) { CreateNonPlayerEdict(amx,params[1]); if (Entity->pvPrivateData == NULL || Entity->free) { return 0; } return get_private(Entity,MAKE_OFFSET(WEAPCLIP)); }
static cell AMX_NATIVE_CALL ns_add_score(AMX *amx, cell *params) { CreatePlayerPointer(amx,params[1]); if (!player->IsConnected() || !player->HasPrivateData()) { return 0; } return inc_private(player->GetEdict(),MAKE_OFFSET(SCORE),static_cast<int>(params[2])); }
static cell AMX_NATIVE_CALL ns_add_weapon_clip(AMX *amx, cell *params) { CreateNonPlayerEdict(amx,params[1]); if (Entity->pvPrivateData == NULL || Entity->free) { return 0; } return inc_private(Entity,MAKE_OFFSET(WEAPCLIP),static_cast<int>(params[2]),0); }
static cell AMX_NATIVE_CALL ns_add_weap_reserve(AMX *amx, cell *params) { CreatePlayerPointer(amx,params[1]); if (!player->IsConnected() || !player->HasPrivateData()) { return 0; } switch (params[2]) { case WEAPON_PISTOL: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_PISTOL),params[3],0); case WEAPON_LMG: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_LMG),(int)params[3],0); case WEAPON_SHOTGUN: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_SHOTGUN),(int)params[3],0); case WEAPON_HMG: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_HMG),(int)params[3],0); case WEAPON_GRENADE_GUN: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_GL),(int)params[3],0); case WEAPON_GRENADE: return inc_private(player->GetEdict(),MAKE_OFFSET(AMMO_HG),(int)params[3],0); default: return 0; } return 0; }
// Float:ns_get_weap_dmg(WeaponID) static cell AMX_NATIVE_CALL ns_get_weapon_dmg(AMX *amx, cell *params) { CreateNonPlayerEdict(amx,params[1]); if (Entity->pvPrivateData == NULL || Entity->free) { return 0; } REAL ret=get_private_f(Entity,MAKE_OFFSET(WEAPDMG)); return amx_ftoc2(ret); }
// ns_set_weap_range(WeaponID,Float:range) static cell AMX_NATIVE_CALL ns_set_weapon_range(AMX *amx, cell *params) { CreateNonPlayerEdict(amx,params[1]); if (Entity->pvPrivateData == NULL || Entity->free) { return 0; } set_private_f(Entity,MAKE_OFFSET(WEAPRANGE),amx_ctof2(params[2])); return 1; }
static void dumpQ(SHM_QUEUE *q, char *s) { char elem[NAMEDATALEN]; char buf[1024]; SHM_QUEUE *start = q; int count = 0; snprintf(buf, sizeof(buf), "q prevs: %lx", MAKE_OFFSET(q)); q = (SHM_QUEUE *) MAKE_PTR(q->prev); while (q != start) { snprintf(elem, sizeof(elem), "--->%lx", MAKE_OFFSET(q)); strcat(buf, elem); q = (SHM_QUEUE *) MAKE_PTR(q->prev); if (q->prev == MAKE_OFFSET(q)) break; if (count++ > 40) { strcat(buf, "BAD PREV QUEUE!!"); break; } } snprintf(elem, sizeof(elem), "--->%lx", MAKE_OFFSET(q)); strcat(buf, elem); elog(DEBUG2, "%s: %s", s, buf); snprintf(buf, sizeof(buf), "q nexts: %lx", MAKE_OFFSET(q)); count = 0; q = (SHM_QUEUE *) MAKE_PTR(q->next); while (q != start) { snprintf(elem, sizeof(elem), "--->%lx", MAKE_OFFSET(q)); strcat(buf, elem); q = (SHM_QUEUE *) MAKE_PTR(q->next); if (q->next == MAKE_OFFSET(q)) break; if (count++ > 10) { strcat(buf, "BAD NEXT QUEUE!!"); break; } } snprintf(elem, sizeof(elem), "--->%lx", MAKE_OFFSET(q)); strcat(buf, elem); elog(DEBUG2, "%s: %s", s, buf); }
/* * freeProcEntryAndReturnReset -- free proc entry in PGPROC or AuxiliaryProcs array, * and return the postmasterResetRequired value. * * To avoid holding a lock on PGPROC structure, we use compare_and_swap to put * PGPROC entry back to the free list. */ bool freeProcEntryAndReturnReset(int pid) { Assert(ProcGlobal != NULL); bool resetRequired = true; PGPROC *procs = ProcGlobal->procs; /* Return PGPROC structure to freelist */ for (int i = 0; i < MaxBackends; i++) { PGPROC *myProc = &procs[i]; if (myProc->pid == pid) { resetRequired = myProc->postmasterResetRequired; myProc->postmasterResetRequired = true; Prepend(myProc); if (gp_debug_pgproc) { elog(LOG, "moving PGPROC entry to freelist: pid=%d (resetRequired=%d)", pid, resetRequired); elog(LOG, "freeing PGPROC entry for pid %d, freeProcs (prev offset, new offset): (%ld, %ld)", pid, myProc->links.next, MAKE_OFFSET(myProc)); } return resetRequired; } } bool found = false; resetRequired = freeAuxiliaryProcEntryAndReturnReset(pid, &found); if (found) return resetRequired; if (gp_debug_pgproc) { elog(LOG, "proc entry not found: pid=%d", pid); } return resetRequired; }
static void PrintLockQueue(LOCK *lock, const char *info) { PROC_QUEUE *waitQueue = &(lock->waitProcs); int queue_size = waitQueue->size; PGPROC *proc; int i; printf("%s lock %lx queue ", info, MAKE_OFFSET(lock)); proc = (PGPROC *) MAKE_PTR(waitQueue->links.next); for (i = 0; i < queue_size; i++) { printf(" %d", proc->pid); proc = (PGPROC *) MAKE_PTR(proc->links.next); } printf("\n"); fflush(stdout); }
// Float:ns_get_exp(Player) static cell AMX_NATIVE_CALL ns_get_exp(AMX *amx, cell *params) { if (!GameMan.IsCombat()) { return 0; } CreatePlayerPointer(amx,params[1]); if (!player->IsConnected()) { return 0; } if (!player->HasPrivateData()) { return 0; } return amx_ftoc2(get_private_f(player->GetEdict(),MAKE_OFFSET(EXP))); }
// ns_add_points(Player,points) static cell AMX_NATIVE_CALL ns_add_points(AMX *amx, cell *params) { if (!GameMan.IsCombat()) { return 0; } CreatePlayerPointer(amx, params[1]); if (!player->IsConnected()) { return 0; } if (!player->HasPrivateData()) { return 0; } return inc_private(player->GetEdict(),MAKE_OFFSET(POINTS),static_cast<int>(params[2]),0,9); }
// Float:ns_add_res(Player,Float:res) static cell AMX_NATIVE_CALL ns_add_res(AMX *amx, cell *params) { if (GameMan.IsCombat()) { return 0; } CreatePlayerPointer(amx,params[1]); if (!player->IsConnected()) { return 0; } if (!player->HasPrivateData()) { return 0; } return amx_ftoc2(inc_private_f(player->GetEdict(),MAKE_OFFSET(RESOURCES),amx_ctof2(params[2]),0.0,100.0)); }
/* * SHMQueueInsertBefore -- put elem in queue before the given queue * element. Inserting "before" the queue head puts the elem * at the tail of the queue. */ void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem) { SHM_QUEUE *prevPtr = (SHM_QUEUE *) MAKE_PTR((queue)->prev); SHMEM_OFFSET elemOffset = MAKE_OFFSET(elem); Assert(SHM_PTR_VALID(queue)); Assert(SHM_PTR_VALID(elem)); #ifdef SHMQUEUE_DEBUG dumpQ(queue, "in SHMQueueInsertBefore: begin"); #endif (elem)->next = prevPtr->next; (elem)->prev = queue->prev; (queue)->prev = elemOffset; prevPtr->next = elemOffset; #ifdef SHMQUEUE_DEBUG dumpQ(queue, "in SHMQueueInsertBefore: end"); #endif }
/* * ProcRemove - * used by the postmaster to clean up the global tables. This also frees * up the semaphore used for the lmgr of the process. (We have to do * this is the postmaster instead of doing a IpcSemaphoreKill on exiting * the process because the semaphore set is shared among backends and * we don't want to remove other's semaphores on exit.) */ bool ProcRemove(int pid) { SHMEM_OFFSET location; PROC *proc; location = INVALID_OFFSET; location = ShmemPIDDestroy(pid); if (location == INVALID_OFFSET) return(FALSE); proc = (PROC *) MAKE_PTR(location); SpinAcquire(ProcStructLock); ProcFreeSem(proc->sem.semKey, proc->sem.semNum); proc->links.next = ProcGlobal->freeProcs; ProcGlobal->freeProcs = MAKE_OFFSET(proc); SpinRelease(ProcStructLock); return(TRUE); }
/* * Initialize shared buffer pool * * This is called once during shared-memory initialization (either in the * postmaster, or in a standalone backend). */ void InitBufferPool(void) { char *BufferBlocks; bool foundBufs, foundDescs; int i; Data_Descriptors = NBuffers; Free_List_Descriptor = Data_Descriptors; Lookup_List_Descriptor = Data_Descriptors + 1; Num_Descriptors = Data_Descriptors + 1; /* * It's probably not really necessary to grab the lock --- if there's * anyone else attached to the shmem at this point, we've got * problems. */ LWLockAcquire(BufMgrLock, LW_EXCLUSIVE); #ifdef BMTRACE CurTraceBuf = (long *) ShmemInitStruct("Buffer trace", (BMT_LIMIT * sizeof(bmtrace)) + sizeof(long), &foundDescs); if (!foundDescs) MemSet(CurTraceBuf, 0, (BMT_LIMIT * sizeof(bmtrace)) + sizeof(long)); TraceBuf = (bmtrace *) & (CurTraceBuf[1]); #endif BufferDescriptors = (BufferDesc *) ShmemInitStruct("Buffer Descriptors", Num_Descriptors * sizeof(BufferDesc), &foundDescs); BufferBlocks = (char *) ShmemInitStruct("Buffer Blocks", NBuffers * BLCKSZ, &foundBufs); if (foundDescs || foundBufs) { /* both should be present or neither */ Assert(foundDescs && foundBufs); } else { BufferDesc *buf; char *block; buf = BufferDescriptors; block = BufferBlocks; /* * link the buffers into a circular, doubly-linked list to * initialize free list, and initialize the buffer headers. Still * don't know anything about replacement strategy in this file. */ for (i = 0; i < Data_Descriptors; block += BLCKSZ, buf++, i++) { Assert(ShmemIsValid((unsigned long) block)); buf->freeNext = i + 1; buf->freePrev = i - 1; CLEAR_BUFFERTAG(&(buf->tag)); buf->buf_id = i; buf->data = MAKE_OFFSET(block); buf->flags = (BM_DELETED | BM_FREE | BM_VALID); buf->refcount = 0; buf->io_in_progress_lock = LWLockAssign(); buf->cntx_lock = LWLockAssign(); buf->cntxDirty = false; buf->wait_backend_id = 0; } /* close the circular queue */ BufferDescriptors[0].freePrev = Data_Descriptors - 1; BufferDescriptors[Data_Descriptors - 1].freeNext = 0; } /* Init other shared buffer-management stuff */ InitBufTable(); InitFreeList(!foundDescs); LWLockRelease(BufMgrLock); }
/* ------------------------ * InitProc -- create a per-process data structure for this process * used by the lock manager on semaphore queues. * ------------------------ */ void InitProcess(IPCKey key) { bool found = false; int pid; int semstat; unsigned long location, myOffset; /* ------------------ * Routine called if deadlock timer goes off. See ProcSleep() * ------------------ */ #ifndef WIN32 signal(SIGALRM, HandleDeadLock); #endif /* WIN32 we'll have to figure out how to handle this later */ SpinAcquire(ProcStructLock); /* attach to the free list */ ProcGlobal = (PROC_HDR *) ShmemInitStruct("Proc Header",(unsigned)sizeof(PROC_HDR),&found); if (!found) { /* this should not happen. InitProcGlobal() is called before this. */ elog(WARN, "InitProcess: Proc Header uninitialized"); } if (MyProc != NULL) { SpinRelease(ProcStructLock); elog(WARN,"ProcInit: you already exist"); return; } /* try to get a proc from the free list first */ myOffset = ProcGlobal->freeProcs; if (myOffset != INVALID_OFFSET) { MyProc = (PROC *) MAKE_PTR(myOffset); ProcGlobal->freeProcs = MyProc->links.next; } else { /* have to allocate one. We can't use the normal binding * table mechanism because the proc structure is stored * by PID instead of by a global name (need to look it * up by PID when we cleanup dead processes). */ MyProc = (PROC *) ShmemAlloc((unsigned)sizeof(PROC)); if (! MyProc) { SpinRelease(ProcStructLock); elog (FATAL,"cannot create new proc: out of memory"); } /* this cannot be initialized until after the buffer pool */ SHMQueueInit(&(MyProc->lockQueue)); MyProc->procId = ProcGlobal->numProcs; ProcGlobal->numProcs++; } /* * zero out the spin lock counts and set the sLocks field for * ProcStructLock to 1 as we have acquired this spinlock above but * didn't record it since we didn't have MyProc until now. */ memset(MyProc->sLocks, 0, sizeof(MyProc->sLocks)); MyProc->sLocks[ProcStructLock] = 1; if (IsUnderPostmaster) { IPCKey semKey; int semNum; int semId; union semun semun; ProcGetNewSemKeyAndNum(&semKey, &semNum); semId = IpcSemaphoreCreate(semKey, PROC_NSEMS_PER_SET, IPCProtection, IpcSemaphoreDefaultStartValue, 0, &semstat); /* * we might be reusing a semaphore that belongs to a dead * backend. So be careful and reinitialize its value here. */ semun.val = IpcSemaphoreDefaultStartValue; semctl(semId, semNum, SETVAL, semun); IpcSemaphoreLock(semId, semNum, IpcExclusiveLock); MyProc->sem.semId = semId; MyProc->sem.semNum = semNum; MyProc->sem.semKey = semKey; } else { MyProc->sem.semId = -1; } /* ---------------------- * Release the lock. * ---------------------- */ SpinRelease(ProcStructLock); MyProc->pid = 0; #if 0 MyProc->pid = MyPid; #endif /* ---------------- * Start keeping spin lock stats from here on. Any botch before * this initialization is forever botched * ---------------- */ memset(MyProc->sLocks, 0, MAX_SPINS*sizeof(*MyProc->sLocks)); /* ------------------------- * Install ourselves in the binding table. The name to * use is determined by the OS-assigned process id. That * allows the cleanup process to find us after any untimely * exit. * ------------------------- */ pid = getpid(); location = MAKE_OFFSET(MyProc); if ((! ShmemPIDLookup(pid,&location)) || (location != MAKE_OFFSET(MyProc))) { elog(FATAL,"InitProc: ShmemPID table broken"); } MyProc->errType = NO_ERROR; SHMQueueElemInit(&(MyProc->links)); on_exitpg(ProcKill, (caddr_t)pid); ProcInitialized = TRUE; }
/* * MarkAsPreparing * Reserve the GID for the given transaction. * * Internally, this creates a gxact struct and puts it into the active array. * NOTE: this is also used when reloading a gxact after a crash; so avoid * assuming that we can use very much backend context. */ GlobalTransaction MarkAsPreparing(TransactionId xid, const char *gid, TimestampTz prepared_at, Oid owner, Oid databaseid) { GlobalTransaction gxact; int i; if (strlen(gid) >= GIDSIZE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("transaction identifier \"%s\" is too long", gid))); LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); /* * First, find and recycle any gxacts that failed during prepare. We do * this partly to ensure we don't mistakenly say their GIDs are still * reserved, and partly so we don't fail on out-of-slots unnecessarily. */ for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { gxact = TwoPhaseState->prepXacts[i]; if (!gxact->valid && !TransactionIdIsActive(gxact->locking_xid)) { /* It's dead Jim ... remove from the active array */ TwoPhaseState->numPrepXacts--; TwoPhaseState->prepXacts[i] = TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts]; /* and put it back in the freelist */ gxact->proc.links.next = TwoPhaseState->freeGXacts; TwoPhaseState->freeGXacts = MAKE_OFFSET(gxact); /* Back up index count too, so we don't miss scanning one */ i--; } } /* Check for conflicting GID */ for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { gxact = TwoPhaseState->prepXacts[i]; if (strcmp(gxact->gid, gid) == 0) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("transaction identifier \"%s\" is already in use", gid))); } } /* Get a free gxact from the freelist */ if (TwoPhaseState->freeGXacts == INVALID_OFFSET) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("maximum number of prepared transactions reached"), errhint("Increase max_prepared_transactions (currently %d).", max_prepared_xacts))); gxact = (GlobalTransaction) MAKE_PTR(TwoPhaseState->freeGXacts); TwoPhaseState->freeGXacts = gxact->proc.links.next; /* Initialize it */ MemSet(&gxact->proc, 0, sizeof(PGPROC)); SHMQueueElemInit(&(gxact->proc.links)); gxact->proc.waitStatus = STATUS_OK; gxact->proc.xid = xid; gxact->proc.xmin = InvalidTransactionId; gxact->proc.pid = 0; gxact->proc.databaseId = databaseid; gxact->proc.roleId = owner; gxact->proc.inVacuum = false; gxact->proc.lwWaiting = false; gxact->proc.lwExclusive = false; gxact->proc.lwWaitLink = NULL; gxact->proc.waitLock = NULL; gxact->proc.waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) SHMQueueInit(&(gxact->proc.myProcLocks[i])); /* subxid data must be filled later by GXactLoadSubxactData */ gxact->proc.subxids.overflowed = false; gxact->proc.subxids.nxids = 0; gxact->prepared_at = prepared_at; /* initialize LSN to 0 (start of WAL) */ gxact->prepare_lsn.xlogid = 0; gxact->prepare_lsn.xrecoff = 0; gxact->owner = owner; gxact->locking_xid = xid; gxact->valid = false; strcpy(gxact->gid, gid); /* And insert it into the active array */ Assert(TwoPhaseState->numPrepXacts < max_prepared_xacts); TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact; LWLockRelease(TwoPhaseStateLock); return gxact; }
/* * Initialize shared buffer pool * * This is called once during shared-memory initialization (either in the * postmaster, or in a standalone backend). */ void InitBufferPool(void) { char *BufferBlocks; bool foundBufs, foundDescs; int i; BufferDescriptors = (BufferDesc *) ShmemInitStruct("Buffer Descriptors", NBuffers * sizeof(BufferDesc), &foundDescs); BufferBlocks = (char *) ShmemInitStruct("Buffer Blocks", NBuffers * BLCKSZ, &foundBufs); if (foundDescs || foundBufs) { /* both should be present or neither */ Assert(foundDescs && foundBufs); } else { BufferDesc *buf; char *block; /* * It's probably not really necessary to grab the lock --- if * there's anyone else attached to the shmem at this point, we've * got problems. */ LWLockAcquire(BufMgrLock, LW_EXCLUSIVE); buf = BufferDescriptors; block = BufferBlocks; /* * Initialize all the buffer headers. */ for (i = 0; i < NBuffers; block += BLCKSZ, buf++, i++) { Assert(ShmemIsValid((unsigned long) block)); /* * The bufNext fields link together all totally-unused buffers. * Subsequent management of this list is done by * StrategyGetBuffer(). */ buf->bufNext = i + 1; CLEAR_BUFFERTAG(buf->tag); buf->buf_id = i; buf->data = MAKE_OFFSET(block); buf->flags = 0; buf->refcount = 0; buf->io_in_progress_lock = LWLockAssign(); buf->cntx_lock = LWLockAssign(); buf->cntxDirty = false; buf->wait_backend_id = 0; } /* Correct last entry of linked list */ BufferDescriptors[NBuffers - 1].bufNext = -1; LWLockRelease(BufMgrLock); } /* Init other shared buffer-management stuff */ StrategyInitialize(!foundDescs); }
// ns_get_weapon(idPlayer,weaponid,&weapontype=0) static cell AMX_NATIVE_CALL ns_get_weapon(AMX *amx, cell *params) { // Peachy did it like this: // if weapontype is 0, return the primary weapon index of the player // if weapontype is < 0, return the last inventory weapon index of the player // otherwise, scan the player's inventory and look for a weapon of the given type // such as WEAPON_KNIFE, etc, etc // I added the last parameter, which will byref the weapontype of the weapon found // returns 0 on failure // last param default value added to not conflict with his version CreatePlayerPointer(amx,params[1]); if (!player->IsConnected()) { return 0; } if (!player->HasPrivateData()) { return 0; } if (params[2]<0) // find lastinv weapon { edict_t *Weapon=private_to_edict(get_private_p<void *>(player->GetEdict(),MAKE_OFFSET(LAST_WEAPON))); if (Weapon==NULL) // no weapon { return 0; } if ((params[0] / sizeof(cell))>2) // If this plugin was compiled with peachy's .inc then don't byref { *MF_GetAmxAddr_NEW(amx,params[3])=get_private(Weapon,MAKE_OFFSET(WEAPID)); } return ENTINDEX_NEW(Weapon); } if (params[2]==0) // find current weapon { edict_t *Weapon=private_to_edict(get_private_p<void *>(player->GetEdict(),MAKE_OFFSET(CURRENT_WEAPON))); if (Weapon==NULL) // no weapon { return 0; } if ((params[0] / sizeof(cell))>2) // If this plugin was compiled with peachy's .inc then don't byref { *MF_GetAmxAddr_NEW(amx,params[3])=get_private(Weapon,MAKE_OFFSET(WEAPID)); } return ENTINDEX_NEW(Weapon); } // Finding weapon by ID char **pPlayerItems = reinterpret_cast<char**>(static_cast<char*>(player->GetEdict()->pvPrivateData) + MAKE_OFFSET(PLAYER_ITEMS)); char *pItem; int weapon=params[2]; for (int i = 0; i < 6; i++) { pItem = pPlayerItems[i]; while (pItem) { if (*(int *)(pItem + MAKE_OFFSET(WEAPID)) == weapon) { return ENTINDEX_NEW(private_to_edict(pItem)); } else { pItem = *(char **)(pItem + MAKE_OFFSET(WEAP_NEXT)); } } } return 0; }
/* * ShmemQueueInit -- make the head of a new queue point * to itself */ void SHMQueueInit(SHM_QUEUE *queue) { Assert(SHM_PTR_VALID(queue)); (queue)->prev = (queue)->next = MAKE_OFFSET(queue); }
/* * InitProcess -- initialize a per-process data structure for this backend */ void InitProcess(void) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; int i; /* * ProcGlobal should be set up already (if we are a backend, we inherit * this by fork() or EXEC_BACKEND mechanism from the postmaster). */ if (procglobal == NULL) elog(PANIC, "proc header uninitialized"); if (MyProc != NULL) elog(ERROR, "you already exist"); MyProc = RemoveFirst(); if (MyProc == NULL) { ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); } if (gp_debug_pgproc) { elog(LOG, "allocating PGPROC entry for pid %d, freeProcs (prev offset, new offset): (%ld, %ld)", MyProcPid, MAKE_OFFSET(MyProc), MyProc->links.next); } set_spins_per_delay(procglobal->spins_per_delay); int mppLocalProcessSerial = gp_atomic_add_32(&procglobal->mppLocalProcessCounter, 1); lockHolderProcPtr = MyProc; /* Set the next pointer to INVALID_OFFSET */ MyProc->links.next = INVALID_OFFSET; /* * Initialize all fields of MyProc, except for the semaphore which was * prepared for us by InitProcGlobal. */ SHMQueueElemInit(&(MyProc->links)); MyProc->waitStatus = STATUS_OK; MyProc->xid = InvalidTransactionId; MyProc->xmin = InvalidTransactionId; MyProc->pid = MyProcPid; /* databaseId and roleId will be filled in later */ MyProc->databaseId = InvalidOid; MyProc->roleId = InvalidOid; MyProc->inVacuum = false; MyProc->postmasterResetRequired = true; MyProc->lwWaiting = false; MyProc->lwExclusive = false; MyProc->lwWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) SHMQueueInit(&(MyProc->myProcLocks[i])); /* * mppLocalProcessSerial uniquely identifies this backend process among * all those that our parent postmaster process creates over its lifetime. * * Since we use the process serial number to decide if we should * deliver a response from a server under this spin, we need to * assign it under the spin lock. */ MyProc->mppLocalProcessSerial = mppLocalProcessSerial; /* * A nonzero gp_session_id uniquely identifies an MPP client session * over the lifetime of the entry postmaster process. A qDisp passes * its gp_session_id down to all of its qExecs. If this is a qExec, * we have already received the gp_session_id from the qDisp. */ elog(DEBUG1,"InitProcess(): gp_session_id %d", gp_session_id); if (Gp_role == GP_ROLE_DISPATCH && gp_session_id == -1) gp_session_id = mppLocalProcessSerial; MyProc->mppSessionId = gp_session_id; MyProc->mppIsWriter = Gp_is_writer; /* * We might be reusing a semaphore that belonged to a failed process. So * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); /* Set wait portal (do not check if resource scheduling is enabled) */ MyProc->waitPortalId = INVALID_PORTALID; MyProc->queryCommandId = -1; /* * Arrange to clean up at backend exit. */ on_shmem_exit(ProcKill, 0); /* * Now that we have a PGPROC, we could try to acquire locks, so initialize * the deadlock checker. */ InitDeadLockChecking(); }
static cell AMX_NATIVE_CALL ns_remove_upgrade(AMX *amx, cell *params) { CreatePlayerPointer(amx, params[1]); if (!GameMan.IsCombat()) { return 0; } if (!player->IsConnected() || !player->HasPrivateData()) { return 0; } // Upgrades are stored in a std::vector<int> in the player's private data // The integer value represents the impulse for the offset // std::vector's memory layout is: // void *start // void *lastobject // void *lastreserved struct upgradevector { int *start; int *end; int *allocated; inline int size() { return static_cast<int>((reinterpret_cast<unsigned int>(end) - reinterpret_cast<unsigned int>(start)) / sizeof(int)); } inline int at(int which) { return start[which]; } inline void set(int which, int val) { start[which] = val; } inline bool remove(int val) { for (int i = 0; i < this->size(); i++) { if (this->at(i) == val) { int last = this->size() - 1; while (i < last) { this->set(i, this->at(i + 1)); i++; } this->end--; return true; } } return false; } inline void print() { printf("size: %d values: ", this->size()); for (int i = 0; i < this->size(); i++) { if (i != 0) printf(", "); printf("%d", this->at(i)); } printf("\n"); } }; upgradevector *bought = reinterpret_cast<upgradevector *>(reinterpret_cast<char *>(player->GetEdict()->pvPrivateData) + MAKE_OFFSET(UPGRADES_BOUGHT)); upgradevector *active = reinterpret_cast<upgradevector *>(reinterpret_cast<char *>(player->GetEdict()->pvPrivateData) + MAKE_OFFSET(UPGRADES_ACTIVE)); //bought->print(); //active->print(); bool bfound = bought->remove(params[2]); bool afound = active->remove(params[2]); if (bfound) { if (afound) { return 2; } return 1; } if (afound) { // shouldn't happen, but just incase return 3; } return 0; }