Example #1
0
string
gs_system () {
#ifdef OS_MINGW
	url gs= url_system ("C:\\") * url_wildcard ("Program Files*") * url_system ("gs") * url_wildcard ("gs*")* url_system ("bin") * url_wildcard ("gswin*c.exe");
	return materialize (gs);
#else
   return "gs";
#endif
}
Example #2
0
File: font.cpp Project: asir6/Colt
WT_Result WT_Font::skip_operand(WT_Opcode const & opcode, WT_File & file)
{
    switch (opcode.type())
    {
    case WT_Opcode::Single_Byte:
        {
            return materialize(opcode, file);
        } break;
    case WT_Opcode::Extended_ASCII:
        {
            WD_CHECK (opcode.skip_past_matching_paren(file));
        } break;
    case WT_Opcode::Extended_Binary:
    default:
        return WT_Result::Opcode_Not_Valid_For_This_Object;
    } // switch

    return WT_Result::Success;
}
Example #3
0
void grpc_slice_intern_shutdown(void) {
  for (size_t i = 0; i < SHARD_COUNT; i++) {
    slice_shard *shard = &g_shards[i];
    gpr_mu_destroy(&shard->mu);
    /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
    if (shard->count != 0) {
      gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked",
              shard->count);
      for (size_t j = 0; j < shard->capacity; j++) {
        for (interned_slice_refcount *s = shard->strs[j]; s;
             s = s->bucket_next) {
          char *text =
              grpc_dump_slice(materialize(s), GPR_DUMP_HEX | GPR_DUMP_ASCII);
          gpr_log(GPR_DEBUG, "LEAKED: %s", text);
          gpr_free(text);
        }
      }
      if (grpc_iomgr_abort_on_leaks()) {
        abort();
      }
    }
    gpr_free(shard->strs);
  }
}
Example #4
0
    /**
     *  Private function to setPosition in a WindowChunk
     */
    void WindowChunk::setPosition(WindowArrayIterator const* iterator, Coordinates const& pos)
    {
        _arrayIterator = iterator;
        _firstPos = pos;
        Dimensions const& dims = _array._desc.getDimensions();

        for (size_t i = 0, n = dims.size(); i < n; i++) {
            _lastPos[i] = _firstPos[i] + dims[i].getChunkInterval() - 1;
            if (_lastPos[i] > dims[i].getEndMax())
            {
                _lastPos[i] = dims[i].getEndMax();
            }
        }
        _materialized = false;
        if (_aggregate.get() == 0)
        {
            return;
        }

        if (_array._desc.getEmptyBitmapAttribute())
        {
            //
            //  At this point, we need to make a 1-bit decision about how we
            // will compute the window(...) result. Do we materialize all of
            // the cells in the inputChunk into a coords -> value map before
            // we compute the per-cellwindow aggregate, or do we probe the
            // inputChunk's iterator on demand?
            //
            //  The way we figure this out is to (a) compute the total size of
            // the materialization by taking at the size of the inputChunk
            // (number of elements) and calculating how big the in-memory map
            // data structure would be. Then (b) we compare this size to a
            // (configurable) threshhold, which is a constant (configurable)
            // multiplier of the CONFIG_MEM_ARRAY_THRESHHOLD.
            //
            //  Although using size estimations appears to be a significant
            // improvement over using a simple estimate of the sparsity of the
            // input, there are several problems with the mechanism
            // implemented here.
            //
            //  1. The calculation of the inputChunk.count() can involve a
            //  complete iteration through the inputChunk's values, which
            //  means that we might be computing a sub-query's results
            //  for the operator twice.
            //
            //  Consider: window ( filter ( A, expr ), ... ).
            //
            //  FIXME: Need to support some kind of cheap and reasonably
            //         accurate estimate of the size of an operator's
            //         output chunk, given the size(s) of its input chunk(s).
            //
            //  2.  The real thing we are trying to minimize here is the
            //   expense of all of the of probe calls to into the inputChunk.
            //   The total number of probes calls is a product of the input
            //   size, the number of cells, and the chunk's sparsity. Probing
            //   (or ideally scanning) a materialized inputChunk is usually
            //   a lot less expensive than probing an unmaterialized
            //   inputChunk.
            //
            //    BUT the constant overhead to materialize the inputChunk is
            //   quite high. So we would probably benefit from a smarter way
            //   to choose between the two algorithms that incorporated the
            //   fixed cost.
            //
            //  3.  As input chunk is often going to be ordered, the cost of
            //   materializing the inputChunk by using a map<> is higher than
            //   it needs to be. See detailed note in the materialize()
            //   function.
            //
            if (_arrayIterator->getMethod() == WindowArray::MATERIALIZE)
            {
                materialize();
            } else if (_arrayIterator->getMethod() != WindowArray::PROBE)
            {
                //
                //  The operator has expressed no preference about the
                // algorithm. So we figure out whther materializing the source
                // involves too much memory.
                ConstChunk const& inputChunk = _arrayIterator->iterator->getChunk();
                size_t varSize = getAttributeDesc().getVarSize();

                if (varSize <= 8)
                {
                    varSize=0;
                } else if (varSize ==0)
                {
                    varSize=Config::getInstance()->getOption<int>(CONFIG_STRING_SIZE_ESTIMATION);
                }

                size_t materializedChunkSize = inputChunk.count() *
                                               ( sizeof( _Rb_tree_node_base ) +
                                               sizeof ( scidb::Value ) +
                                               sizeof ( position_t ) +
                                               varSize );

                size_t maxMaterializedChunkSize = (
                    Config::getInstance()->getOption<int>(CONFIG_MATERIALIZED_WINDOW_THRESHOLD)
                    * MiB);   // All config.ini params are in Mebibytes.

                if ( materializedChunkSize <= maxMaterializedChunkSize )
                {
                    materialize();
                } else {

				    LOG4CXX_TRACE ( windowLogger,
                                    "WindowChunk::setPosition(..) - NOT MATERIALIZING \n"
                                    << "\t materializedChunkSize = " << materializedChunkSize
                                    << " as inputChunk.count() = " << inputChunk.count() << " and varSize = " << varSize
                                    << " and maxMaterializedChunkSize = " << maxMaterializedChunkSize
				                  );
				    LOG4CXX_TRACE ( windowLogger, "\t NOT MATERIALIZING ");
                }
            }
        }
    }
bool column_major_matrix_table_slice_builder::add(data_view x) {
  return append(materialize(x));
}
Example #6
0
File: units.cpp Project: asir6/Colt
WT_Result WT_Units::skip_operand(WT_Opcode const & opcode, WT_File & file)
{
    return materialize(opcode, file);
}
Example #7
0
grpc_slice grpc_slice_intern(grpc_slice slice) {
  GPR_TIMER_BEGIN("grpc_slice_intern", 0);
  if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
    GPR_TIMER_END("grpc_slice_intern", 0);
    return slice;
  }

  uint32_t hash = grpc_slice_hash(slice);
  for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) {
    static_metadata_hash_ent ent =
        static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
    if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
        grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) {
      GPR_TIMER_END("grpc_slice_intern", 0);
      return grpc_static_slice_table[ent.idx];
    }
  }

  interned_slice_refcount *s;
  slice_shard *shard = &g_shards[SHARD_IDX(hash)];

  gpr_mu_lock(&shard->mu);

  /* search for an existing string */
  size_t idx = TABLE_IDX(hash, shard->capacity);
  for (s = shard->strs[idx]; s; s = s->bucket_next) {
    if (s->hash == hash && grpc_slice_eq(slice, materialize(s))) {
      if (gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) == 0) {
        /* If we get here, we've added a ref to something that was about to
         * die - drop it immediately.
         * The *only* possible path here (given the shard mutex) should be to
         * drop from one ref back to zero - assert that with a CAS */
        GPR_ASSERT(gpr_atm_rel_cas(&s->refcnt, 1, 0));
        /* and treat this as if we were never here... sshhh */
      } else {
        gpr_mu_unlock(&shard->mu);
        GPR_TIMER_END("grpc_slice_intern", 0);
        return materialize(s);
      }
    }
  }

  /* not found: create a new string */
  /* string data goes after the internal_string header */
  s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
                                            GRPC_SLICE_LENGTH(slice));
  gpr_atm_rel_store(&s->refcnt, 1);
  s->length = GRPC_SLICE_LENGTH(slice);
  s->hash = hash;
  s->base.vtable = &interned_slice_vtable;
  s->base.sub_refcount = &s->sub;
  s->sub.vtable = &interned_slice_sub_vtable;
  s->sub.sub_refcount = &s->sub;
  s->bucket_next = shard->strs[idx];
  shard->strs[idx] = s;
  memcpy(s + 1, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));

  shard->count++;

  if (shard->count > shard->capacity * 2) {
    grow_shard(shard);
  }

  gpr_mu_unlock(&shard->mu);

  GPR_TIMER_END("grpc_slice_intern", 0);
  return materialize(s);
}