示例#1
0
uint32_t armv7a::bit_count(bits& value)
{
    //GCC's built-in function which returns the number of 1-bits of the input unsigned integer
    return (uint32_t)__builtin_popcount(value.val);
}
示例#2
0
bool
is_power_of_two(unsigned long n)
{
    return __builtin_popcount(n) == 1;
}
示例#3
0
		inline SPROUT_CONSTEXPR int
		popcount(unsigned n) {
			return __builtin_popcount(n);
		}
示例#4
0
文件: parser.c 项目: alex-mikheev/ucx
int ucs_config_sprintf_bitmask(char *buf, size_t max, void *src, const void *arg)
{
    return snprintf(buf, max, "%u", __builtin_popcount(*(unsigned*)src));
}
示例#5
0
文件: helpers.c 项目: imclab/gfe2368
/*
 * popcount
 *
 * — Built-in Function: int __builtin_popcount (unsigned int x)
 * 
 *    Returns the number of 1-bits in x. 
 */ 
unsigned int popcount(unsigned int x) {

    return(__builtin_popcount(x));
}
示例#6
0
/*
 * Parameter time: Aktuelle Zeit, kann systemTime sein
 *                 Es bietet sich an eine Zeit im ms Raster zu benutzen, dies muss jedoch nicht sein.
 *                 So könnte die Zeit genauso das Timerraster widerspiegeln, in dem die Routine aufgerufen
 *                 wird. Dann müssen die Konstanten für die Wartezeiten natürlich ebenfalls nach dieser Einheit
 *                 ausgerichtet sein.
 * Es gibt eine Wartezeit direkt nach einer Bestromung der Relaisspule. Sie dient dazu, besser den
 * Ladezustand der Bulkkondensatoren ermitteln zu können. Daraus wird die Anzahl der anschließend möglichen
 * Schaltvorgänge ermittelt.
 * Rückgabewert true wenn neue SPI-Daten generiert wurden, also eine Übertragung notwendig wäre.
 */
bool Relay::DoSwitching(unsigned time, unsigned &RelDriverData)
{
 bool retval = false;
 IdleDetect(time);
 // Erst die Verwaltung der SubStates (aktuell laufender Puls, Wartezeit nach Puls...)
 // =================================
 if (SubState == RelSubStates::Pulse) // Es wird gerade eine/mehrere Relaisspulen bestromt
 {
#ifdef RELAYUSEDISCRETETIMING
  if ((signed)(time-NextPointInTime) >= 0)
  //if ((time-PulseStartTime) >= RELAYPULSEDURATION)
#else
  if ((signed)(time-NextPointInTime) > 0)
  //if ((time-PulseStartTime) > RELAYPULSEDURATION)
#endif
  {
   DriverData = 0;
   retval = true;
   SubState = RelSubStates::Delay;
   NextPointInTime = time + RELAYPOSTDELAY1;
  }
 }

 if (SubState == RelSubStates::Delay) // Wartezeit nach einem Ansteuerpuls für eine korrekte Vermessung
 {
#ifdef RELAYUSEDISCRETETIMING
  if ((signed)(time-NextPointInTime) >= 0)
  //if ((time-PulseStartTime) >= RELAYPOSTDELAY)
#else
  if ((signed)(time-NextPointInTime) > 0)
  //if ((time-PulseStartTime) > RELAYPOSTDELAY)
#endif
  {
   pwmEnable(false);

   SubState = RelSubStates::Delay2;
   NextPointInTime = time + RELAYPOSTDELAY2;
   if (OpState == RelOperatingStates::MeasMode)
   {
    unsigned zw1 = EnergyCalcRefVoltage;
    zw1 = zw1*zw1;
    // aktuelle Bulkspannung festhalten
    unsigned zw2 = GetRailVoltage();
    zw2 = zw2*zw2;
    if (zw1 > zw2)
    {
     // Benötigte Energie ausrechnen und abspeichern
     SingleSwitchEnergy = zw1 - zw2; // Nach Messungen keine Sicherheitsmarge erforderlich
    } else {
     // Problem... Einfach eine seeehr große Zahl annehmen.
     SingleSwitchEnergy = 100000;
    }
   }
  }
 }

 if (SubState == RelSubStates::Delay2) // Die noch folgende Wartezeit bis zum nächsten Puls
 {
#ifdef RELAYUSEDISCRETETIMING
  if ((signed)(time-NextPointInTime) >= 0)
  //if ((time-PulseStartTime) >= RELAYPOSTDELAY)
#else
  if ((signed)(time-NextPointInTime) > 0)
  //if ((time-PulseStartTime) > RELAYPOSTDELAY)
#endif
  {
   SubState = RelSubStates::Idle;
  }
 }

 // Folgend die Verwaltung der Operating States der Relay-Unit
 // ==========================================================
 if ((OpState == RelOperatingStates::MeasMode) && (SingleSwitchEnergy))
 { // Messung wurde bereits durchgeführt, warten bis erreichen der Mindestreserve für's BusVoltageFailureSwitching
  if (OpChgReq & (RELREQSTOP | RELREQBUSVFAIL))
  { // Wenn währenddessen der Start schon wieder abgeblasen wurde...
   OpState = RelOperatingStates::Disable;
   OpChgReq = 0;
  } else {
   if (CalcAvailRelEnergy() >= __builtin_popcount(BusVFailMask))
   {
    for (unsigned ch=0; ch<CHANNELCNT; ch++)
     PulseRepTmr[ch] = time; // Die PulseRepTmr werden auf "fällig" gesetzt
    OpState = RelOperatingStates::Operating;
   }
  }
 }

 int RelEnergyAvail=0;
 bool StartASwitch = false;

 if (OpState == RelOperatingStates::Disable)
 {
  if (OpChgReq & (RELREQSTOP | RELREQBUSVFAIL))
  {
   OpChgReq = 0; // Dann auch einen evtl StartRequest löschen
  }
  if (OpChgReq & RELREQSTART)
  {
   // Bulkspannung speichern
   EnergyCalcRefVoltage = GetRailVoltage();
   // Wenn Bulkspannung > fester Wert
   if (EnergyCalcRefVoltage > MINURAILINITVOLTAGE)
   {
    // Es wird das erste Relais bestromt mit der alten Schaltrichtung,
    // d.h. es wird nicht umgeschaltet. Dies dient nur zur Messung,
    // um wieviel die Bulkspannung dabei einbricht.
    if (ChRealSwStatus & 1)
    {
     DriverData = RELAYPATTERNON;
    } else {
     DriverData = RELAYPATTERNOFF;
    }
    PulseRepTmr[0] = time + RELAYREPPULSEDELAYLONG;
    OpState = RelOperatingStates::MeasMode;
    NextPointInTime = time + RELAYPULSEDURATION;
    SubState = RelSubStates::Pulse;
    retval = true;
    OpChgReq &= 0;
   }
  }
 }

 if ((OpState == RelOperatingStates::BusVFail) && (SubState == RelSubStates::Idle))
 {
  OpState = RelOperatingStates::Disable;
 }

 if ((OpState == RelOperatingStates::Operating) && (SubState == RelSubStates::Idle))
 {
  if ((OpChgReq & RELREQBUSVFAIL))
  {
   if (BusVFailMask)
   { // Es gibt Kanäle, die für ein BusVoltageFailureSwitching konfiguriert sind
    DriverData = 0;
    // Die Routine ist zwar ähnlich wie die "normale" Schaltroutine, doch leider zu unerschiedlich um sie ohne weiteres zu vereinigen.
    for (unsigned ch=0;ch<CHANNELCNT;ch++)
    {
     if (BusVFailMask & (1 << ch))
     {
      //BusVFailMask &= ~(1 << ch); unnötig
      if (BusVFailData & (1 << ch))
      {
       ChTargetSwStatus |= (1 << ch);
       ChRealSwStatus |= (1 << ch);
       DriverData |= (RELAYPATTERNON << (2*ch));
      } else {
       ChTargetSwStatus &= ~(1 << ch);
       ChRealSwStatus &= ~(1 << ch);
       DriverData |= (RELAYPATTERNOFF << (2*ch));
      }
     }
    }
    NextPointInTime = time + RELAYPULSEDURATION;
    SubState = RelSubStates::Pulse;
    OpState = RelOperatingStates::BusVFail;
    retval = true;
   } else { // kein BusVoltageFailureSwitching, dann geht es hier ganz einfach
    OpState = RelOperatingStates::Disable;
   }
   OpChgReq = 0;
  } else {
   if ((OpChgReq & RELREQSTOP))
   { // Es wurde ein Stop verlangt, dann gibt es kein BusVoltageFailureSwitching!
    OpState = RelOperatingStates::Disable;
    OpChgReq = 0;
   } else {
    if (BuffersNonEmpty())
    {
     // Für wie viele Relais reicht die gespeicherte Energie?
     RelEnergyAvail = CalcAvailRelEnergy() - __builtin_popcount(BusVFailMask);
     if (RelEnergyAvail > 0)
      StartASwitch = true;
    } else
     if (IdleDetect(time))
      if ((CalcAvailRelEnergy() - __builtin_popcount(BusVFailMask)) > 0)
      {
       // Nix los, Elkos voll.
       // Mal nachgucken, ob eine Pulswiederholung ansteht.
       // Wenn ein Kanal geschaltet worden ist, wird später noch mal ein Puls nachgelegt.
       int oldest_ch = -1;
       int oldest_age = -1;
       int age;
       for (int ch=0; ch < CHANNELCNT; ch++)
       {
        age = (signed)(time-PulseRepTmr[ch]);
        if (age >= 0)
        {
         if (age > oldest_age)
         {
          oldest_ch = ch;
          oldest_age = age;
         }
        }
       }
       if (oldest_ch >= 0)
       {
        PulseRepTmr[oldest_ch] = time + RELAYREPPULSEDELAYLONG;
        int mask = 1 << oldest_ch;
        if (ChRealSwStatus & mask)
        {
         DriverData = RELAYPATTERNON << (2*oldest_ch);
        } else {
         DriverData = RELAYPATTERNOFF << (2*oldest_ch);
        }
        NextPointInTime = time + RELAYPULSEDURATION;
        SubState = RelSubStates::Pulse;
        retval = true;
       }
      }
   }
  }
 }

 // Nachfolgend wird das Ansteuermuster für die Relaistreiber generiert.
 // Die Routine wird aus einem Auftrag so viele Schalthandlungen raushohlen, wie Energie zur Verfügung
 // steht (wenn gewünscht) oder umgekehrt den Schaltauftrag zusammenhalten und erst ausführen,
 // wenn genug Energie zur Verfügung steht (wenn RELAYKEEPTASKSTOGETHER definiert).

 // Die Routine kann auch mehrere Aufträge zusammenfassen, wenn die Energie reicht (unter Beachtung
 // von RELAYKEEPTASKSTOGETHER). Es ist durch DoEnqueue() sichergestellt, dass ein Kanal nie mehr als
 // einmal in der Warteschlage vorkommt, deshalb ist das unproblematisch.
 if (StartASwitch)
 {
  int RelEnergyNeeded;
  bool AnotherLoop = false;
  DriverData = 0;
  do
  {
#ifdef RELAYKEEPTASKSTOGETHER
   RelEnergyNeeded = __builtin_popcount(Buffer[BufRdPtr].Mask); // Zähle gesetzte Bits in .Mask
#else
   RelEnergyNeeded = 1;
#endif
   if (RelEnergyAvail < RelEnergyNeeded)
   {
    break;
   }
   for (unsigned ch=0;ch<CHANNELCNT;ch++)
   {
    if (Buffer[BufRdPtr].Mask & (1 << ch))
    {
     Buffer[BufRdPtr].Mask &= ~(1 << ch);
     ChForcedSwMsk &= ~(1 << ch);
     PulseRepTmr[ch] = time + RELAYREPPULSEDELAY;
     if (Buffer[BufRdPtr].Bits & (1 << ch))
     {
      ChRealSwStatus |= (1 << ch);
      DriverData |= (RELAYPATTERNON << (2*ch));
     } else {
      ChRealSwStatus &= ~(1 << ch);
      DriverData |= (RELAYPATTERNOFF << (2*ch));
     }
     if ((--RelEnergyAvail == 0) || (Buffer[BufRdPtr].Mask == 0))
     {
      break;
     }
    }
   }
   if (Buffer[BufRdPtr].Mask == 0)
   {
    AnotherLoop = NextBufEntry(BufRdPtr);
   }
  } while (AnotherLoop);
  if (DriverData != 0)
  {
   NextPointInTime = time + RELAYPULSEDURATION;
   SubState = RelSubStates::Pulse;
   retval = true;
  }
 }
 if (retval && (DriverData != 0))
  pwmEnable(true);
 RelDriverData = DriverData;
 return retval;
}
示例#7
0
inline size_t population_count<uint16_t>(uint16_t i)
{
	return __builtin_popcount(i);
}
示例#8
0
文件: Bits.hpp 项目: koturn/CppCPLib
static inline int
popcnt(std::uint32_t n)
{
  return __builtin_popcount(n);
}
示例#9
0
文件: buffer.c 项目: 0x8000-0000/fx3
struct buffer* buf_alloc(uint16_t capacity)
{
   struct buffer* buf = NULL;

   if (BUF_SMALL_BUF_SIZE >= capacity)
   {
      uint32_t smallBufIndex = bit_alloc(&smallBufferBitmap);
      if (32 > smallBufIndex)
      {
         buf = (struct buffer*) &smallBufferPool[smallBufIndex * BUF_SMALL_BUF_SIZE];

         buf->next     = NULL;
         buf->capacity = BUF_SMALL_BUF_SIZE;
         buf->size     = 0;

         int usage = BUF_SMALL_BUF_COUNT - __builtin_popcount(smallBufferBitmap);
         smallBufferHistogram[usage] ++;
      }
      else
      {
         buf_on_poolExhausted(BUF_SMALL_BUF_SIZE);
      }
   }

   if ((NULL == buf) && (BUF_MEDIUM_BUF_SIZE >= capacity))
   {
      uint32_t mediumBufIndex = bit_alloc(&mediumBufferBitmap);
      if (32 > mediumBufIndex)
      {
         buf = (struct buffer*) &mediumBufferPool[mediumBufIndex * BUF_MEDIUM_BUF_SIZE];

         buf->next     = NULL;
         buf->capacity = BUF_MEDIUM_BUF_SIZE;
         buf->size     = 0;

         int usage = BUF_MEDIUM_BUF_COUNT - __builtin_popcount(mediumBufferBitmap);
         mediumBufferHistogram[usage] ++;
      }
      else
      {
         buf_on_poolExhausted(BUF_MEDIUM_BUF_SIZE);
      }
   }

   if ((NULL == buf) && (BUF_LARGE_BUF_SIZE >= capacity))
   {
      uint32_t largeBufIndex = bit_alloc(&largeBufferBitmap);
      if (32 > largeBufIndex)
      {
         buf = (struct buffer*) &largeBufferPool[largeBufIndex * BUF_LARGE_BUF_SIZE];

         buf->next     = NULL;
         buf->capacity = BUF_LARGE_BUF_SIZE;
         buf->size     = 0;

         int usage = BUF_LARGE_BUF_COUNT - __builtin_popcount(largeBufferBitmap);
         largeBufferHistogram[usage] ++;
      }
      else
      {
         buf_on_poolExhausted(BUF_LARGE_BUF_SIZE);
      }
   }

   return buf;
}
示例#10
0
文件: type.c 项目: manasdas17/nvc
type_t type_read(type_rd_ctx_t ctx)
{
   fbuf_t *f = tree_read_file(ctx->tree_ctx);

   uint16_t marker = read_u16(f);
   if (marker == UINT16_C(0xffff))
      return NULL;   // Null marker
   else if (marker == UINT16_C(0xfffe)) {
      // Back reference marker
      unsigned index = read_u32(f);
      assert(index < ctx->n_types);
      return ctx->store[index];
   }

   assert(marker < T_LAST_TYPE_KIND);

   type_t t = type_new((type_kind_t)marker);
   t->ident = ident_read(ctx->ident_ctx);

   // Stash pointer for later back references
   // This must be done early as a child node of this type may
   // reference upwards
   if (ctx->n_types == ctx->store_sz) {
      ctx->store_sz *= 2;
      ctx->store = xrealloc(ctx->store, ctx->store_sz * sizeof(type_t));
   }
   ctx->store[ctx->n_types++] = t;

   const uint32_t has = has_map[t->kind];
   const int nitems = __builtin_popcount(has);
   uint32_t mask = 1;
   for (int n = 0; n < nitems; mask <<= 1) {
      if (has & mask) {
         if (ITEM_TYPE_ARRAY & mask) {
            type_array_t *a = &(t->items[n].type_array);
            type_array_resize(a, read_u16(f), NULL);

            for (unsigned i = 0; i < a->count; i++)
               a->items[i] = type_read(ctx);
         }
         else if (ITEM_TYPE & mask)
            t->items[n].type = type_read(ctx);
         else if (ITEM_TREE & mask)
            t->items[n].tree = tree_read(ctx->tree_ctx);
         else if (ITEM_TREE_ARRAY & mask) {
            tree_array_t *a = &(t->items[n].tree_array);
            tree_array_resize(a, read_u16(f), NULL);

            for (unsigned i = 0; i < a->count; i++)
               a->items[i] = tree_read(ctx->tree_ctx);
         }
         else if (ITEM_RANGE_ARRAY & mask) {
            range_array_t *a = &(t->items[n].range_array);
            range_t dummy = { NULL, NULL, 0 };
            range_array_resize(a, read_u16(f), dummy);

            for (unsigned i = 0; i < a->count; i++) {
               a->items[i].kind  = read_u8(f);
               a->items[i].left  = tree_read(ctx->tree_ctx);
               a->items[i].right = tree_read(ctx->tree_ctx);
            }
         }
         else
            item_without_type(mask);
         n++;
      }
   }

   return t;
}
示例#11
0
Datum
gbfp_distance(PG_FUNCTION_ARGS)
{
    GISTENTRY      *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
    // bytea          *query = PG_GETARG_DATA_TYPE_P(1);
    StrategyNumber  strategy = (StrategyNumber) PG_GETARG_UINT16(2);
    bytea          *key = (bytea*)DatumGetPointer(entry->key);

    bytea          *query;
    double          nCommon, nCommonUp, nCommonDown, nQuery, distance;
    double          nKey = 0.0;

    fcinfo->flinfo->fn_extra = SearchBitmapFPCache(
                                                   fcinfo->flinfo->fn_extra,
                                                   fcinfo->flinfo->fn_mcxt,
                                                   PG_GETARG_DATUM(1),
                                                   NULL, NULL,&query);

    if (ISALLTRUE(query))
        elog(ERROR, "Query malformed");

    /*
    * Counts basic numbers, but don't count nKey on inner
    * page (see comments below)
    */
    nQuery = (double)sizebitvec(query);
    if (ISALLTRUE(key))
        {

        if (GIST_LEAF(entry)) nKey = (double)SIGLENBIT(query);

        nCommon = nQuery;
        }
    else
        {
        int i, cnt = 0;
        unsigned char *pk = (unsigned char*)VARDATA(key),
            *pq = (unsigned char*)VARDATA(query);

        if (SIGLEN(key) != SIGLEN(query))
            elog(ERROR, "All fingerprints should be the same length");

#ifndef USE_BUILTIN_POPCOUNT
        for(i=0;i<SIGLEN(key);i++)
            cnt += number_of_ones[ pk[i] & pq[i] ];
#else
        unsigned eidx=SIGLEN(key)/sizeof(unsigned int);
        for(i=0;i<SIGLEN(key)/sizeof(unsigned int);++i){
          cnt += __builtin_popcount(((unsigned int *)pk)[i] & ((unsigned int *)pq)[i]);
        }
        for(i=eidx*sizeof(unsigned);i<SIGLEN(key);++i){
          cnt += number_of_ones[ pk[i] & pq[i] ];
        }
#endif        

        nCommon = (double)cnt;
        if (GIST_LEAF(entry))
            nKey = (double)sizebitvec(key);
        }

    nCommonUp = nCommon;
    nCommonDown = nCommon;

    switch(strategy)
    {
        case RDKitOrderByTanimotoStrategy:
        /*
        * Nsame / (Na + Nb - Nsame)
        */
        if (GIST_LEAF(entry))
        {
            distance = nCommonUp / (nKey + nQuery - nCommonUp);
        }

        else
        {
            distance = nCommonUp / nQuery;
        }

        break;

        case RDKitOrderByDiceStrategy:
        /*
        * 2 * Nsame / (Na + Nb)
        */
        if (GIST_LEAF(entry))
        {
            distance = 2.0 * nCommonUp / (nKey + nQuery);
        }

        else
        {
            distance =  2.0 * nCommonUp / (nCommonDown + nQuery);
        }

        break;

        default:
        elog(ERROR,"Unknown strategy: %d", strategy);
    }

    PG_RETURN_FLOAT8(1.0 - distance);
}
示例#12
0
文件: type.c 项目: manasdas17/nvc
type_t type_copy_sweep(type_t t, object_copy_ctx_t *ctx)
{
   if (t == NULL)
      return NULL;

   assert(t->generation == ctx->generation);

   if (t->index == UINT32_MAX)
      return t;

   assert(t->index < ctx->index);

   if (ctx->copied[t->index] != NULL) {
      // Already copied this type
      return (type_t)ctx->copied[t->index];
   }

   type_t copy = type_new(t->kind);
   ctx->copied[t->index] = copy;

   copy->ident = t->ident;

   const uint32_t has = has_map[t->kind];
   const int nitems = __builtin_popcount(has);
   uint32_t mask = 1;
   for (int n = 0; n < nitems; mask <<= 1) {
      if (has & mask) {
         if (ITEM_TYPE_ARRAY & mask) {
            const type_array_t *from = &(t->items[n].type_array);
            type_array_t *to = &(copy->items[n].type_array);

            type_array_resize(to, from->count, NULL);

            for (unsigned i = 0; i < from->count; i++)
               to->items[i] = type_copy_sweep(from->items[i], ctx);
         }
         else if (ITEM_TYPE & mask)
            copy->items[n].type = type_copy_sweep(t->items[n].type, ctx);
         else if (ITEM_TREE & mask)
            copy->items[n].tree = tree_copy_sweep(t->items[n].tree, ctx);
         else if (ITEM_TREE_ARRAY & mask) {
            const tree_array_t *from = &(t->items[n].tree_array);
            tree_array_t *to = &(copy->items[n].tree_array);

            tree_array_resize(to, from->count, NULL);

            for (size_t i = 0; i < from->count; i++)
               to->items[i] = tree_copy_sweep(from->items[i], ctx);
         }
         else if (ITEM_RANGE_ARRAY & mask) {
            const range_array_t *from = &(t->items[n].range_array);
            range_array_t *to = &(copy->items[n].range_array);

            range_t dummy;
            range_array_resize(to, from->count, dummy);

            for (unsigned i = 0; i < from->count; i++) {
               to->items[i].kind = from->items[i].kind;
               to->items[i].left = tree_copy_sweep(from->items[i].left, ctx);
               to->items[i].right = tree_copy_sweep(from->items[i].right, ctx);
            }
         }
         else
            item_without_type(mask);
         n++;
      }
   }

   return copy;
}
示例#13
0
文件: dcom.c 项目: ploetzma/occ
// Function Specification
//
// Name: dcom_initialize_roles
//
// Description: Initialize roles so we know if we are master or slave
//
// End Function Specification
void dcom_initialize_roles(void)
{
    G_occ_role = OCC_SLAVE;

    // Locals
    pba_xcfg_t pbax_cfg_reg;

    // Used as a debug tool to correlate time between OCCs & System Time
    // getscom_ffdc(OCB_OTBR, &G_dcomTime.tod, NULL); // Commits errors internally

    G_dcomTime.tod = in64(OCB_OTBR) >> 4;
    G_dcomTime.base = ssx_timebase_get();
    pbax_cfg_reg.value = in64(PBA_XCFG);

    if(pbax_cfg_reg.fields.rcv_groupid < MAX_NUM_NODES &&
       pbax_cfg_reg.fields.rcv_chipid < MAX_NUM_OCC)
    {

        TRAC_IMP("Proc ChipId (%d)  NodeId (%d)",
                 pbax_cfg_reg.fields.rcv_chipid,
                 pbax_cfg_reg.fields.rcv_groupid);

        G_pbax_id.valid     = 1;
        G_pbax_id.node_id   = pbax_cfg_reg.fields.rcv_groupid;
        G_pbax_id.chip_id   = pbax_cfg_reg.fields.rcv_chipid;
        G_pbax_id.module_id = G_pbax_id.chip_id;
        // Always start as OCC Slave
        G_occ_role = OCC_SLAVE;
        rtl_set_run_mask(RTL_FLAG_NOTMSTR);


        // Set the initial presence mask, and count the number of occ's present
        G_sysConfigData.is_occ_present |= (0x01 << G_pbax_id.chip_id);
        G_occ_num_present = __builtin_popcount(G_sysConfigData.is_occ_present);

    }
    else // Invalid chip/node ID(s)
    {
        TRAC_ERR("Proc ChipId (%d) and/or NodeId (%d) too high: request reset",
                 pbax_cfg_reg.fields.rcv_chipid,
                 pbax_cfg_reg.fields.rcv_groupid);
        /* @
         * @errortype
         * @moduleid    DCOM_MID_INIT_ROLES
         * @reasoncode  INVALID_CONFIG_DATA
         * @userdata1   PBAXCFG (upper)
         * @userdata2   PBAXCFG (lower)
         * @userdata4   ERC_CHIP_IDS_INVALID
         * @devdesc     Failure determining OCC role
         */
        errlHndl_t  l_errl = createErrl(
            DCOM_MID_INIT_ROLES,            //ModId
            INVALID_CONFIG_DATA,            //Reasoncode
            ERC_CHIP_IDS_INVALID,           //Extended reasoncode
            ERRL_SEV_UNRECOVERABLE,         //Severity
            NULL,                           //Trace Buf
            DEFAULT_TRACE_SIZE,             //Trace Size
            pbax_cfg_reg.words.high_order,  //Userdata1
            pbax_cfg_reg.words.low_order    //Userdata2
            );

        // Callout firmware
        addCalloutToErrl(l_errl,
                         ERRL_CALLOUT_TYPE_COMPONENT_ID,
                         ERRL_COMPONENT_ID_FIRMWARE,
                         ERRL_CALLOUT_PRIORITY_HIGH);

        //Add processor callout
        addCalloutToErrl(l_errl,
                         ERRL_CALLOUT_TYPE_HUID,
                         G_sysConfigData.proc_huid,
                         ERRL_CALLOUT_PRIORITY_LOW);

        G_pbax_id.valid   = 0;  // Invalid Chip/Node ID
    }

// Initialize DCOM Thread Sem
    ssx_semaphore_create( &G_dcomThreadWakeupSem, // Semaphore
                          1,                      // Initial Count
                          0);                     // No Max Count

}
示例#14
0
size_t BucketAlloc::nextHighestPowerOfTwo(size_t v) {
    // gcc intrinsics bitch.
    // __builtin_clz returns the amount of leading zeros in a number
    // __builtin_popcount returns the amount of ones in a number
    return 31 - __builtin_clz(v) + (__builtin_popcount(v) > 1);
}
示例#15
0
void
app_main_loop_worker_pipeline_lpm_ipv6(void) {
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id;
	uint32_t i;

	RTE_LOG(INFO, USER1,
		"Core %u is doing work (pipeline with IPv6 LPM table)\n",
		rte_lcore_id());

	/* Pipeline configuration */
	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("Unable to configure the pipeline\n");

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings_rx[i],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.arg_ah = NULL,
			.burst_size = app.burst_size_worker_read,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i]))
			rte_panic("Unable to configure input port for "
				"ring %d\n", i);
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_writer_params port_ring_params = {
			.ring = app.rings_tx[i],
			.tx_burst_sz = app.burst_size_worker_write,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ring_writer_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i]))
			rte_panic("Unable to configure output port for "
				"ring %d\n", i);
	}

	/* Table configuration */
	{
		struct rte_table_lpm_ipv6_params table_lpm_ipv6_params = {
			.name = "LPM",
			.n_rules = 1 << 24,
			.number_tbl8s = 1 << 21,
			.entry_unique_size =
				sizeof(struct rte_pipeline_table_entry),
			.offset = APP_METADATA_OFFSET(32),
		};

		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_lpm_ipv6_ops,
			.arg_create = &table_lpm_ipv6_params,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id))
			rte_panic("Unable to configure the IPv6 LPM table\n");
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id))
			rte_panic("Unable to connect input port %u to "
				"table %u\n", port_in_id[i],  table_id);

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i & (app.n_ports - 1)]},
		};

		struct rte_table_lpm_ipv6_key key;
		struct rte_pipeline_table_entry *entry_ptr;
		uint32_t ip;
		int key_found, status;

		key.depth = 8 + __builtin_popcount(app.n_ports - 1);

		ip = rte_bswap32(i << (24 -
			__builtin_popcount(app.n_ports - 1)));
		memcpy(key.ip, &ip, sizeof(uint32_t));

		printf("Adding rule to IPv6 LPM table (IPv6 destination = "
			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => "
			"port out = %u)\n",
			key.ip[0], key.ip[1], key.ip[2], key.ip[3],
			key.ip[4], key.ip[5], key.ip[6], key.ip[7],
			key.ip[8], key.ip[9], key.ip[10], key.ip[11],
			key.ip[12], key.ip[13], key.ip[14], key.ip[15],
			key.depth, i);

		status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
			&key_found, &entry_ptr);
		if (status < 0)
			rte_panic("Unable to add entry to table %u (%d)\n",
				table_id, status);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("Pipeline consistency check failed\n");

	/* Run-time */
#if APP_FLUSH == 0
	for ( ; ; )
		rte_pipeline_run(p);
#else
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
#endif
}
示例#16
0
inline unsigned int popcount (unsigned int x) { return __builtin_popcount (x); }
示例#17
0
/*
 * Rückgabewert True, wenn die Busspannung nicht mehr ausreichend ist, um die Speicherrail auf
 * Mindestwert (in Bezug auf BusVoltageFailSwitching) zu laden. Dies ist dann bereits ein
 * BrownOut-Kriterium.
 */
bool Relay::BusVoltageFailRailLevel(void)
{
 int zw = max(GetBusVoltage() - ADCRAILVOLTAGELOSS, 0);
 return ((unsigned)(zw*zw) < (SingleSwitchEnergy*__builtin_popcount(BusVFailMask)+ADC12VOLTSQR));
}
示例#18
0
inline uint_fast8_t popcnt32(uint32_t a)
{
	//should be implemented with an intrinsic probably, but with sufficient optimizations this will work cross platform (this should compile to a single popcnt instruction
	//return static_cast<uint_fast8_t>(std::bitset<32>(a)::count());
	return __builtin_popcount(a);
}
示例#19
0
inline size_t population_count<int8_t>(int8_t i)
{
	return __builtin_popcount(i);
}
示例#20
0
inline uint_fast8_t popcnt8(uint_fast8_t a)
{
	//return static_cast<uint_fast8_t>(std::bitset<8>(a)::count());
	return __builtin_popcount(a);
}
示例#21
0
文件: darwin-64.c 项目: ChaosJohn/gcc
int __popcountsi2 (uSI x) { return __builtin_popcount (x); }
示例#22
0
	frame |= ((byte << 5) & 0x1fe0);

	if(parity)
		frame |= TPI_PARITY_MASK;
	
	return frame;
}

static int
tpi_frame2byte(uint16_t frame, uint8_t * byte)
{
	/* drop idle and start bit(s) */
	*byte = (frame >> 5) & 0xff;

	int parity = __builtin_popcount(*byte) & 1;
	int parity_rcvd = (frame & TPI_PARITY_MASK) ? 1 : 0;

	return parity != parity_rcvd;
}

static int
avrftdi_tpi_break(PROGRAMMER * pgm)
{
	unsigned char buffer[] = { MPSSE_DO_WRITE | MPSSE_WRITE_NEG | MPSSE_LSB, 1, 0, 0, 0 };
	E(ftdi_write_data(to_pdata(pgm)->ftdic, buffer, sizeof(buffer)) != sizeof(buffer), to_pdata(pgm)->ftdic);

	return 0;
}

static int
示例#23
0
文件: hash.c 项目: dtzWill/xkcd-hash
static inline unsigned distance(unsigned x, unsigned y) {
  // XXX: This may be slower than bit twiddling on some archs w/o popcnt.
  // Also, __builtin_popcountll is faster when it's a native instruction,
  // but leaving this as-is for wider compatability.
  return __builtin_popcount(x ^ y);
}
示例#24
0
文件: ymm.c 项目: tomari/yumimi3
/* divide peid to RRRRPPPP */
static UINT32 peid_of(ymm_mac_st *ms, UINT32 rank, UINT32 pipe) {
	UINT32 pipemask=ms->npipe-1;
	UINT32 sla=__builtin_popcount(pipemask);
	return (rank<<sla)|pipe;
}
示例#25
0
文件: rankbv.c 项目: mpetri/wtmmap
inline uint32_t
rankbv_popcount8(const uint32_t x)
{
    return __builtin_popcount(x&0xff);
}
示例#26
0
文件: ymm.c 项目: tomari/yumimi3
/* accessor */
extern UINT32 ymm_read_dword(address_space *as, offs_t a) {
	UINT32 result=(UINT32)-1;
	if(a<LM_BYTES) {
		UINT32 *this_mem=as->mem;
		result=htonl(this_mem[a/4]);
	} else if(RM_P1_BASE<=a && a<(RM_P1_BASE+RM_BYTES)) {
		UINT32 dstpeid=next_id(as->machine_state,as->tag,0);
		UINT16 my_base=(pipe_of_peid(as->machine_state,as->tag)&1)?RM_IN2_BASE:RM_IN1_BASE;
		UINT16 offs=a-RM_P1_BASE+my_base;
		UINT32 *dstmem=mem_of_pe(as->machine_state,dstpeid);
		if(dstpeid<(as->machine_state->npipe*as->machine_state->nrank))
			result=htonl(dstmem[offs/4]);
		else
			result=(UINT32)-1;
	} else if(RM_P2_BASE<=a && a<(RM_P2_BASE+RM_BYTES)) {
		UINT32 dstpeid=next_id(as->machine_state,as->tag,1);
		UINT16 my_base=(pipe_of_peid(as->machine_state,as->tag)&1)?RM_IN2_BASE:RM_IN1_BASE;
		UINT16 offs=a-RM_P2_BASE+my_base;
		UINT32 *dstmem=mem_of_pe(as->machine_state,dstpeid);
		if(dstpeid<(as->machine_state->npipe*as->machine_state->nrank))
			result=htonl(dstmem[offs/4]);
		else
			result=(UINT32)-1;
	} else if(CONF_BASE<=a) {
		UINT32 offs=a-CONF_BASE;
		if(offs<0x10) {
			/* offset   0    1    2    3    4    5    6    7    */
			/*          PIDll -LH  -HL  -HH RIDll -LH  -HL  -HH */
			/* offset   8    9    a    b    c    d    e    f    */
			/*          P#ll  -lh  -hl  -hh R#ll  -lh  -hl  -hh */
			UINT16 id_offs=offs;
			UINT32 val;
			switch(id_offs&0x0c) {
			case 0:
				val=pipe_of_peid(as->machine_state,as->tag);
				break;
			case 0x4:
				val=rank_of_peid(as->machine_state,as->tag);
				break;
			case 0x8:
				val=as->machine_state->npipe;
				break;
			default:
				val=as->machine_state->nrank;
				break;
			}
			result=val;
			/*fprintf(stderr,"P%08X type=%02X val=%08X, R[%04X] = %02X\n",
				as->tag,id_offs&0x0c,val,
				a,result);*/
		} else if(offs==0x10) {
			result=__builtin_popcount((as->machine_state->npipe)-1);
		} else {
			fprintf(stderr,"Out-of-range read on P%08X R[%04X] = %02X\n",
				as->tag,(unsigned)a,result);
		}
	}
	{
		UINT32 r,p;
		r=rank_of_peid(as->machine_state,as->tag);
		p=pipe_of_peid(as->machine_state,as->tag);
		if(a&3) 
			fprintf(stdout,"[UNALIGNEDR] %016lld R: %05d P: %05d [%04X]\n",
				(unsigned long long)as->machine_state->total_cycles,
				r,p,(unsigned)a);
		else if(0) {
			fprintf(stdout,"[TARGET] %016lld R: %05d P: %05d [%08X] => %08X\n",
				(unsigned long long)as->machine_state->total_cycles,
				r,p,(unsigned)a,result);
		}
	}

	return result;
}
示例#27
0
#include <cstdio>
#include <cstring>
typedef unsigned long long ull;

int K[62];
int get(int i) {
    <<<<<<< HEAD
    // printf("%d\n", i);
    if(K[i] == -1) K[i] = get(__builtin_popcount(i));
    return K[i];
}
ull l[2];
int k;
int memo[2][62][2][62];
int solve(int x, int d, bool prev, int num) {
    if(d == 61) return get(num) == k;
    // printf("%d %d %d %d\n", x, d, prev, num);
    int &m = memo[x][d][prev][num];
    // puts("AA");
    if(m != -1) return m;
    m = 0;
    for(int i = 0; i <= 1; i++) {
        if(prev && i && !(l[x] && (1ull << (60 - d))))
            break;
        m += solve(x, d + 1, prev && i == !!(l[x] && (1ull << (60 - d))), num + i);
        =======
            if(K[i] == -1) K[i] = get(__builtin_popcount(i)) + 1;
        return K[i];
    }

    int k;
示例#28
0
文件: dynprog.c 项目: ks6g10/3yrprj
inline  dint cardinality( dint seta) {
     return __builtin_popcount(seta);
}
示例#29
0
void
app_main_loop_worker_pipeline_acl(void) {
    struct rte_pipeline_params pipeline_params = {
        .name = "pipeline",
        .socket_id = rte_socket_id(),
    };

    struct rte_pipeline *p;
    uint32_t port_in_id[APP_MAX_PORTS];
    uint32_t port_out_id[APP_MAX_PORTS];
    uint32_t table_id;
    uint32_t i;

    RTE_LOG(INFO, USER1,
            "Core %u is doing work (pipeline with ACL table)\n",
            rte_lcore_id());

    /* Pipeline configuration */
    p = rte_pipeline_create(&pipeline_params);
    if (p == NULL)
        rte_panic("Unable to configure the pipeline\n");

    /* Input port configuration */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_port_ring_reader_params port_ring_params = {
            .ring = app.rings_rx[i],
        };

        struct rte_pipeline_port_in_params port_params = {
            .ops = &rte_port_ring_reader_ops,
            .arg_create = (void *) &port_ring_params,
            .f_action = NULL,
            .arg_ah = NULL,
            .burst_size = app.burst_size_worker_read,
        };

        if (rte_pipeline_port_in_create(p, &port_params,
                                        &port_in_id[i]))
            rte_panic("Unable to configure input port for "
                      "ring %d\n", i);
    }

    /* Output port configuration */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_port_ring_writer_params port_ring_params = {
            .ring = app.rings_tx[i],
            .tx_burst_sz = app.burst_size_worker_write,
        };

        struct rte_pipeline_port_out_params port_params = {
            .ops = &rte_port_ring_writer_ops,
            .arg_create = (void *) &port_ring_params,
            .f_action = NULL,
            .arg_ah = NULL,
        };

        if (rte_pipeline_port_out_create(p, &port_params,
                                         &port_out_id[i]))
            rte_panic("Unable to configure output port for "
                      "ring %d\n", i);
    }

    /* Table configuration */
    {
        struct rte_table_acl_params table_acl_params = {
            .name = "test", /* unique identifier for acl contexts */
            .n_rules = 1 << 5,
            .n_rule_fields = DIM(ipv4_field_formats),
        };

        /* Copy in the rule meta-data defined above into the params */
        memcpy(table_acl_params.field_format, ipv4_field_formats,
               sizeof(ipv4_field_formats));

        struct rte_pipeline_table_params table_params = {
            .ops = &rte_table_acl_ops,
            .arg_create = &table_acl_params,
            .f_action_hit = NULL,
            .f_action_miss = NULL,
            .arg_ah = NULL,
            .action_data_size = 0,
        };

        if (rte_pipeline_table_create(p, &table_params, &table_id))
            rte_panic("Unable to configure the ACL table\n");
    }

    /* Interconnecting ports and tables */
    for (i = 0; i < app.n_ports; i++)
        if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
                table_id))
            rte_panic("Unable to connect input port %u to "
                      "table %u\n", port_in_id[i],  table_id);

    /* Add entries to tables */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_pipeline_table_entry table_entry = {
            .action = RTE_PIPELINE_ACTION_PORT,
            {.port_id = port_out_id[i & (app.n_ports - 1)]},
        };
        struct rte_table_acl_rule_add_params rule_params;
        struct rte_pipeline_table_entry *entry_ptr;
        int key_found, ret;

        memset(&rule_params, 0, sizeof(rule_params));

        /* Set the rule values */
        rule_params.field_value[SRC_FIELD_IPV4].value.u32 = 0;
        rule_params.field_value[SRC_FIELD_IPV4].mask_range.u32 = 0;
        rule_params.field_value[DST_FIELD_IPV4].value.u32 =
            i << (24 - __builtin_popcount(app.n_ports - 1));
        rule_params.field_value[DST_FIELD_IPV4].mask_range.u32 =
            8 + __builtin_popcount(app.n_ports - 1);
        rule_params.field_value[SRCP_FIELD_IPV4].value.u16 = 0;
        rule_params.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
            UINT16_MAX;
        rule_params.field_value[DSTP_FIELD_IPV4].value.u16 = 0;
        rule_params.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
            UINT16_MAX;
        rule_params.field_value[PROTO_FIELD_IPV4].value.u8 = 0;
        rule_params.field_value[PROTO_FIELD_IPV4].mask_range.u8 = 0;

        rule_params.priority = 0;

        uint32_t dst_addr = rule_params.field_value[DST_FIELD_IPV4].
                            value.u32;
        uint32_t dst_mask =
            rule_params.field_value[DST_FIELD_IPV4].mask_range.u32;

        printf("Adding rule to ACL table (IPv4 destination = "
               "%u.%u.%u.%u/%u => port out = %u)\n",
               (dst_addr & 0xFF000000) >> 24,
               (dst_addr & 0x00FF0000) >> 16,
               (dst_addr & 0x0000FF00) >> 8,
               dst_addr & 0x000000FF,
               dst_mask,
               table_entry.port_id);

        /* For ACL, add needs an rte_table_acl_rule_add_params struct */
        ret = rte_pipeline_table_entry_add(p, table_id, &rule_params,
                                           &table_entry, &key_found, &entry_ptr);
        if (ret < 0)
            rte_panic("Unable to add entry to table %u (%d)\n",
                      table_id, ret);
    }

    /* Enable input ports */
    for (i = 0; i < app.n_ports; i++)
        if (rte_pipeline_port_in_enable(p, port_in_id[i]))
            rte_panic("Unable to enable input port %u\n",
                      port_in_id[i]);

    /* Check pipeline consistency */
    if (rte_pipeline_check(p) < 0)
        rte_panic("Pipeline consistency check failed\n");

    /* Run-time */
#if APP_FLUSH == 0
    for ( ; ; )
        rte_pipeline_run(p);
#else
    for (i = 0; ; i++) {
        rte_pipeline_run(p);

        if ((i & APP_FLUSH) == 0)
            rte_pipeline_flush(p);
    }
#endif
}
示例#30
0
文件: column.c 项目: ini-bdds/bdqc
/**
  * Handle the case of a vector consisting entirely (or almost entirely) of
  * integers. (If the cardinality of string values in a vector is 1 and that
  * unique string value looks a representation of missing data, the vector
  * is treated as if it were entirely integral.) This is the trickiest case
  * since it could be any or none of the 3 statistical classes.
  *
  * Heuristics.
  * 1. card({values}) < MAX_CATEGORY_CARDINALITY
  *     The real question is whether or not categorical statistical tests
  *     are applicable to a given vector.
  *     Categorical methods are rarely applied to data with more than
  *     MAX_CATEGORY_CARDINALITY categories. However, as the sample size
  *     grows MAX_CATEGORY_CARDINALITY *can* grow as well.
  * 2. max(abs({values})) < MAX_VALUE
  *		Although integers can serve as category labels it rarely makes
  *     sense for those integers to have large absolute values.
  *     Zip codes are a confounder of this heuristic.
  * 3. negative integers are almost never involved in categorical data
  *    UNLESS they represent levels symmetric around 0...which further
  *    constrains plausible max(abs({values})).
  *
  * Caveats:
  * 1. The current rationale will miss quantitative variables (of either
  *    integral or floating-point type) that might be usefully treated as
  *    categorical (e.g. a vector of zip codes containing only 3 distinct
  *    values). Or even more extreme: an essentially boolean variable in
  *    which the labels happen to be two large integers.
  *
  * For plausibly categorical cases might be better to consider number
  * of duplicate values (using value_bag instead of value_set).
  *
  */
static int _integer_inference( const struct column *c ) {

	int stat_class = STC_UNK; // ...unless overridden below.

	const int N
		= c->type_vote[ FTY_INTEGER ];
	const double K
		= set_count( & c->value_set );
	const int N_MAG
		= __builtin_popcount( c->integer_magnitudes );

	if( c->excess_values ) {

		// It can only be ordinal or quantitative, and...

		if( c->has_negative_integers ) { // ...it's not ordinal.

			stat_class = STC_QUA;

		} else {

			const int MAX_MAG
				= (int)floorf( log10f( c->extrema[1] ) );

			// Following can't positively id ordinal variables,
			// but it can positively rule them out. I'm sampling
			// the interval [1,N] divided into ranges by base-10
			// magnitude. If data are missing in any range, it's
			// not strictly ordinal...or it's just missing some
			// data. TODO: See comments at top.

			if( ( N_MAG == MAX_MAG )
				&& ( (int)round(c->extrema[0]) == 1 )
				&& ( (int)round(c->extrema[1]) == N ) )
				stat_class = STC_ORD;
			else
				stat_class = STC_QUA;
		}

	} else { // |{value_set}| <= MAX_CATEGORY_CARDINALITY

		// There are few enough values to treat it as a
		// categorical variable, but...

		if( c->has_negative_integers ) {

			// ...require all values to be in (-K,+K) where
			// K == MAX_ABSOLUTE_CATEGORICAL_VALUE
			// e.g. { -2, -1, 0, 1, 2 } with -2 indicating
			// "strongly dislike" and +2 "strongly like"

			stat_class 
				= (-(MAX_ABSOLUTE_CATEGORICAL_VALUE/2) <= c->extrema[0])
					&& (c->extrema[1] <= +(MAX_ABSOLUTE_CATEGORICAL_VALUE/2))
				? STC_CAT
				: STC_QUA;

		} else {

			// The column takes on a "small" set of non-negative
			// integer values. Very likely categorical. The
			// relation between the cardinality of the value set
			// and the sample size is primary determinant now...

			stat_class 
				 = (K <= MAX_CATEGORY_CARDINALITY)
				 	&& (c->extrema[1] <= MAX_ABSOLUTE_CATEGORICAL_VALUE)
				 	&& (K < (N/2))
					// 3rd clause is just a sanity check for very small
					// sample sizes.
				 ? STC_CAT
				 : STC_QUA;

			// Zip codes are a confounding case. They *can* be categorical,
			// but in a large sample they're more likely to be nominative--
			// that is, non-statistical.
		}
	}

	return stat_class;
}