示例#1
0
void
qst_set_float (caddr_t * state, state_slot_t * sl, float fv)
{
#ifdef QST_DEBUG
  if (sl->ssl_index < QI_FIRST_FREE)
    GPF_T1 ("Invalid SSL in qst_set");
  else if (sl->ssl_type == SSL_CONSTANT)
    GPF_T1 ("Invalid constant SSL in qst_set");
  else
    {
#endif
  caddr_t *place = IS_SSL_REF_PARAMETER (sl->ssl_type)
    ? (caddr_t *) state[sl->ssl_index]
    : (caddr_t *) & state[sl->ssl_index];
  caddr_t old = *place;
  if (IS_BOX_POINTER (old) && DV_SINGLE_FLOAT == box_tag (old))
    *(float *) old = fv;
  else
    {
      if (old)
	ssl_free_data (sl, *place);
      *place = box_float (fv);
    }
#ifdef QST_DEBUG
    }
#endif
}
示例#2
0
void
qst_set_numeric_buf (caddr_t * qst, state_slot_t * sl, db_buf_t xx)
{
#ifdef QST_DEBUG
  if (sl->ssl_index < QI_FIRST_FREE)
    GPF_T1 ("Invalid SSL in qst_set");
  else if (sl->ssl_type == SSL_CONSTANT)
    GPF_T1 ("Invalid constant SSL in qst_set");
  else
    {
#endif
  caddr_t old = NULL;
  old = QST_GET (qst, sl);
  if (DV_NUMERIC != DV_TYPE_OF (old))
    old = NULL;
  if (!old)
    {
      old = (caddr_t) numeric_allocate ();
      numeric_from_buf ((numeric_t) old, xx);
      qst_set (qst, sl, old);
    }
  else
    {
      numeric_from_buf ((numeric_t) old, xx);
    }
#ifdef QST_DEBUG
    }
#endif
}
int
mutex_enter (dk_mutex_t *mtx)
#endif
{
#ifdef MTX_DEBUG
  du_thread_t * self = thread_current ();
#endif
  int rc;

#ifdef MTX_DEBUG
  assert (mtx->mtx_owner !=  self || !self);
  if (mtx->mtx_entry_check
      && !mtx->mtx_entry_check (mtx, self, mtx->mtx_entry_check_cd))
    GPF_T1 ("Mtx entry check fail");
#endif
  if (mtx->mtx_spins < MTX_MAX_SPINS)
    {
      int ctr;
      for (ctr = 0; ctr < MTX_MAX_SPINS; ctr++)
	{
	  if (TRYLOCK_SUCCESS == pthread_mutex_trylock (&mtx->mtx_mtx))
	    {
#ifdef MTX_METER 
	      if (ctr > 0)
		mtx->mtx_spin_waits++;
#endif 
	      mtx->mtx_spins += (ctr - mtx->mtx_spins) / 8;
	      goto got_it;
	    }
	}
      mtx->mtx_spins = MTX_MAX_SPINS;
    }
  else 
    {
      if (++mtx->mtx_spins > 10 +  MTX_MAX_SPINS)
	mtx->mtx_spins = 0;
    }
  pthread_mutex_lock (&mtx->mtx_mtx);
#ifdef MTX_METER 
  mtx->mtx_waits++;
#endif
 got_it:
#ifdef MTX_METER
      mtx->mtx_enters++;
#endif

#ifdef MTX_DEBUG
  assert (mtx->mtx_owner == NULL);
  mtx->mtx_owner = self;
  mtx->mtx_entry_file = (char *) file;
  mtx->mtx_entry_line = line;
#endif
  return 0;

failed:
  GPF_T1 ("mutex_enter() failed");
  return -1;
}
void
semaphore_leave (semaphore_t *sem)
#endif
{
  thread_t *thr;
  int rc;

  rc = pthread_mutex_lock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);

#ifdef SEM_DEBUG
    {
      int inx;
      if (304 == ln && sem->sem_entry_count) GPF_T1 ("should have 0 count when signalling clrg_wait");
      for (inx = MAX_SEM_ENT - 1; inx > 0; inx--)
	{
	  sem->sem_last_left_line[inx] = sem->sem_last_left_line[inx - 1];
	  sem->sem_last_left_file[inx] = sem->sem_last_left_file[inx - 1];
	}
      sem->sem_last_left_line[0] = ln;
      sem->sem_last_left_file[0] = file;
    }
#endif
  if (sem->sem_entry_count)
    sem->sem_entry_count++;
  else
    {
#ifndef SEM_NO_ORDER
      thr = thread_queue_from (&sem->sem_waiting);
      if (thr)
	{
	  _thread_num_wait--;
	  assert (thr->thr_status == WAITSEM);
	  thr->thr_status = RUNNING;
	  pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
	}
      else
	sem->sem_entry_count++;
#else
      if (sem->sem_waiting.thq_count)
	{
	  _thread_num_wait--;
	  sem->sem_any_signalled = 1;
	  pthread_cond_signal ((pthread_cond_t *) sem->sem_cv);
	}
      else
	sem->sem_entry_count++;
#endif
    }

  rc = pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);
  return;

failed:
  GPF_T1 ("semaphore_leave() failed");
}
示例#5
0
buffer_desc_t *
itc_delta_this_buffer (it_cursor_t * itc, buffer_desc_t * buf, int stay_in_map)
{
  /* The caller has no access but intends to change the parent link. */
  it_map_t * itm;
  dp_addr_t remap_to;
#ifdef PAGE_TRACE
  dp_addr_t old_dp = buf->bd_physical_page;
#endif
#ifdef _NOT
  FAILCK (itc);
#endif
  ASSERT_IN_MAP (itc->itc_tree, buf->bd_page);
  itm = IT_DP_MAP (itc->itc_tree, buf->bd_page);
#ifdef MTX_DEBUG
  if (buf->bd_is_dirty && !gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap))
    GPF_T1 ("dirty but not remapped in checking delta");
#endif
  if (gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap))
    {
      buf->bd_is_dirty = 1;
      return (buf);
    }
  if (it_can_reuse_logical (itc->itc_tree, buf->bd_page))
    remap_to = buf->bd_page;
  else
    remap_to = em_new_dp (itc->itc_tree->it_extent_map, EXT_REMAP, 0, &itc->itc_n_pages_on_hold);

  if (!remap_to)
    {
      if (LT_CLOSING == itc->itc_ltrx->lt_status)
	{
	  log_error ("Out if disk during commit.  The transaction is in effect and will be replayed from the log at restart.  Exiting due to no disk space, thus cannot maintain separation of checkpoint and commit space and transactional semantic."
		     "This happens due to running out of safety margin, which is not expected to happen.  If this takes place without in fact being out of disk on the database or consistently in a given situation, the condition may be reported to support.   This is a planned exit and not a database corruption.  A core will be made for possible support.");
	  GPF_T1 ("Deliberately made core for possible support");
	}
      if (itc->itc_n_pages_on_hold)
	GPF_T1 ("The database is out of disk during an insert.  The insert has exceeded its space safety margin.  This does not normally happen.  This is a planned exit and not a corruption. Make more disk space available.  If this occurs continuously or without in fact running out of space, this may be reported to support.");
      if (DELTA_STAY_INSIDE == stay_in_map)
	GPF_T1 ("out of disk on reloc_right_leaves.");
      log_error ("Out of disk space for database");
      itc->itc_ltrx->lt_error = LTE_NO_DISK;
      itc_bust_this_trx (itc, &buf, ITC_BUST_THROW);
    }

  buf->bd_physical_page = remap_to;
  sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap,
	   DP_ADDR2VOID (remap_to));
  buf->bd_is_dirty = 1;
  DBG_PT_DELTA_CLEAN (buf, old_dp);
  return buf;
}
int
semaphore_enter (semaphore_t * sem)
{
  thread_t *thr = current_thread;
  int rc;

  rc = pthread_mutex_lock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);

  if (sem->sem_entry_count)
    sem->sem_entry_count--;
  else
    {
#ifndef SEM_NO_ORDER
      thread_queue_to (&sem->sem_waiting, thr);
      _thread_num_wait++;
      thr->thr_status = WAITSEM;
      do
	{
	  rc = pthread_cond_wait ((pthread_cond_t *) thr->thr_cv, (pthread_mutex_t*) sem->sem_handle);
	  CKRET (rc);
	} while (thr->thr_status == WAITSEM);
#else      
      thread_queue_to (&sem->sem_waiting, thr);
      _thread_num_wait++;
      thr->thr_status = WAITSEM;
      do 
	{
	  rc = pthread_cond_wait ((pthread_cond_t *) sem->sem_cv, (pthread_mutex_t*) sem->sem_handle);
	  CKRET (rc);
	}
      while (sem->sem_n_signalled == sem->sem_last_signalled); 
      sem->sem_n_signalled --; /* this one is signalled */
      sem->sem_last_signalled = sem->sem_n_signalled;
      thr->thr_status = RUNNING;
      thread_queue_remove (&sem->sem_waiting, thr);
      if (sem->sem_n_signalled < 0) GPF_T1 ("The semaphore counter went wrong");
#endif
    }

  pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);

  return 0;

failed:
  GPF_T1 ("semaphore_enter() failed");
  return -1;
}
示例#7
0
dp_addr_t
ext_get_dp (extent_t * ext, dp_addr_t near)
{
  int word, bit;
  if (EXT_FULL & ext->ext_flags)
    return 0;
  if (near)
    {
      if (!DP_IN_EXTENT (near, ext))
	GPF_T1 ("near outside of extent");
	word = (near - ext->ext_dp) / BITS_IN_LONG;
	if (ext->ext_pages[word] != 0xffffffff)
	  {
	    bit = word_free_bit (ext->ext_pages[word]);
	    goto bit_found;
	  }
    }
  for (word = 0; word < EXTENT_SZ / BITS_IN_LONG; word++)
    {
      if (ext->ext_pages[word] != 0xffffffff)
	{
	  bit = word_free_bit (ext->ext_pages[word]);
	  goto bit_found;
	}
    }
  ext->ext_flags |= EXT_FULL;
  return 0;
 bit_found:
  ext->ext_pages[word] |= 1 << bit;
  return ext->ext_dp + (word * BITS_IN_LONG) + bit;
}
caddr_t
t_box_utf8_as_wide_char (ccaddr_t _utf8, caddr_t _wide_dest, size_t utf8_len, size_t max_wide_len, dtp_t dtp)
{
  unsigned char *utf8 = (unsigned char *) _utf8;
  unsigned char *utf8work;
  size_t wide_len;
  virt_mbstate_t state;
  caddr_t dest;

  utf8work = utf8;
  memset (&state, 0, sizeof (virt_mbstate_t));
  wide_len = virt_mbsnrtowcs (NULL, &utf8work, utf8_len, 0, &state);
  if (((long) wide_len) < 0)
    return _wide_dest ? ((caddr_t) wide_len) : NULL;
  if (max_wide_len && max_wide_len < wide_len)
    wide_len = max_wide_len;
  if (_wide_dest)
    dest = _wide_dest;
  else
    dest = t_alloc_box ((int) (wide_len  + 1) * sizeof (wchar_t), dtp);

  utf8work = utf8;
  memset (&state, 0, sizeof (virt_mbstate_t));
  if (wide_len != virt_mbsnrtowcs ((wchar_t *) dest, &utf8work, utf8_len, wide_len, &state))
    GPF_T1("non consistent multi-byte to wide char translation of a buffer");

  ((wchar_t *)dest)[wide_len] = L'\0';
  if (_wide_dest)
    return ((caddr_t)wide_len);
  else
    return dest;
}
示例#9
0
int
main_the_rest (void)
{
  while (1)
    {
      main_thread_ready = 1;
      semaphore_enter (background_sem);
      if (db_shutdown)
	{
	  sf_shutdown (NULL, NULL);
	}
      else
	{
	  if (main_continuation_reason == MAIN_CONTINUE_ON_SCHEDULER &&
	      cfg_scheduler_period > 0)
	    {
	      sched_do_round ();
	    }
	  else if (cfg_autocheckpoint)
	    {
	      sf_make_auto_cp ();	/* Use the one and same old log file. */
	    }
	  else
	    /* Should not happen! */
	    {
	      GPF_T1 ("Initial thread continued, "
		  "although autocheckpointing is not used.");
	    }
	  main_continuation_reason = MAIN_CONTINUE_ON_CHECKPOINT;
	}
    }
  return 0;
}
示例#10
0
int
cmp_dv_box (caddr_t dv, caddr_t box)
{
  NUMERIC_VAR (dn1);
  NUMERIC_VAR (dn2);
  dtp_t dtp1 = dv_ext_to_num ((db_buf_t) dv, (caddr_t) & dn1);
  dtp_t dtp2;
  dtp_t res_dtp;

  NUM_TO_MEM (dn2, dtp2, box);

  if (dtp1 == DV_DB_NULL || dtp2 == DV_DB_NULL)
    GPF_T1 ("not supposed to be null in comparison");

  if (n_coerce ((caddr_t) & dn1, (caddr_t) & dn2, dtp1, dtp2, &res_dtp))
    {
      switch (res_dtp)
	{
	case DV_LONG_INT:
	  return (NUM_COMPARE (*(boxint *) &dn1, *(boxint *) &dn2));
	case DV_SINGLE_FLOAT:
	  return cmp_double (*(float *) &dn1, *(float *) &dn2, FLT_EPSILON);
	case DV_DOUBLE_FLOAT:
	  return cmp_double (*(double *) &dn1, *(double *) &dn2, DBL_EPSILON);
	case DV_NUMERIC:
	  return (numeric_compare_dvc ((numeric_t) &dn1, (numeric_t) &dn2));
	}
    }
  else
    sqlr_new_error ("22003", "SR082", "Non numeric comparison");
  return 0;
}
示例#11
0
void
buf_cancel_write (buffer_desc_t * buf)
{
  /* remove from write queue, mostly as a result of a dirty buffer going empty.
   * Note that the buf may simultaneously be cancelled on one thread and rem'd from write queue by the writer thread.
   * this is if the buffer is occupied when the write turn comes, the write will be skipped and the buffer rem'd from the queue.
   * Thus the bd_iq of an occupied buffer can be async reset by another thread. */
  io_queue_t * iq = buf->bd_iq;
  if (buf->bd_tree)
    {
      ASSERT_OUTSIDE_MAP (buf->bd_tree, buf->bd_page);
    }

  /* Note that this can block waiting for IQ which is owned by another
  thread in iq_schedule. The thread in iq_schedule can block on this
  same page map when leaving the buffer after queue insertion, hence
  deadlocking.  Hence this function's caller is required to own the
  buffer but not to be in its tree's map when calling. */

  if (!buf->bd_is_write)
    GPF_T1 ("write cancel when nobody inside buffer");
  if (iq)
    {
      IN_IOQ (iq);
      if (buf->bd_iq == iq)
	{
	  mti_writes_queued--;
	  rdbg_printf (("Write cancel L=%d P=%d \n", buf->bd_page, buf->bd_physical_page));
	  L2_DELETE (iq->iq_first, iq->iq_last, buf, bd_iq_);
	  TC (tc_write_cancel);
	  buf->bd_iq = NULL;
	}
      LEAVE_IOQ (iq);
    }
}
示例#12
0
void
dbs_extent_free (dbe_storage_t * dbs, dp_addr_t ext_dp, int must_be_in_em)
{
  extent_map_t * em;
  extent_t * ext;
  int word, bit;
  uint32 * arr, page_no;
  ASSERT_IN_DBS (dbs);
  dbs_locate_ext_bit (dbs, ext_dp, &arr, &page_no, &word, &bit);
  if (0 == (arr[word] & 1 << bit))
    GPF_T1 ("double free in ext set");
  page_set_update_checksum (arr, word, bit);
  arr[word] &= ~(1 << bit);
  em = DBS_DP_TO_EM (dbs, ext_dp);
  if (em)
    {
      ASSERT_IN_MTX (em->em_mtx);
      ext = EM_DP_TO_EXT (em, ext_dp);
      if (ext)
	{
	  remhash (DP_ADDR2VOID (ext_dp), em->em_dp_to_ext);
	  switch (EXT_TYPE (ext))
	    {
	    case EXT_INDEX:
	      em->em_n_pages -= EXTENT_SZ;
	      em->em_n_free_pages -= EXTENT_SZ;
	      break;
	    case EXT_REMAP:
	      em->em_n_remap_pages -= EXTENT_SZ;
	      em->em_n_free_remap_pages -= EXTENT_SZ;
	      break;
	    case EXT_BLOB:
	      em->em_n_blob_pages -= EXTENT_SZ;
	      em->em_n_free_blob_pages -= EXTENT_SZ;
	      break;
	    }
	  ext->ext_flags = EXT_FREE;
	}
      if (ext == em->em_last_remap_ext)
	em->em_last_remap_ext = NULL;
      if (ext == em->em_last_blob_ext)
	em->em_last_blob_ext = NULL;
      remhash (DP_ADDR2VOID (ext_dp), dbs->dbs_dp_to_extent_map);
    }
  else if (must_be_in_em)
    GPF_T1 ("cannot free ext that is not part of any em");
}
示例#13
0
void
iq_read_merge (struct aiocb ** list, int n, char * temp)
{
  int inx, inx2, bytes;
  int fd = list[0]->aio_fildes;
  OFF_T first_offset = list[0]->aio_offset;
  OFF_T last_planned = first_offset, seek;
  for (inx = 1; inx < n; inx++)
    {
      if (list[inx]->aio_fildes != fd
	  || list[inx]->aio_lio_opcode != LIO_READ)
	break;
      if (list[inx]->aio_offset - last_planned > 2 * PAGE_SZ
	  || list[inx]->aio_offset - first_offset > (MAX_MERGE - 1) * PAGE_SZ)
	break;
      list[inx]->__error_code = -1;
      last_planned = list[inx]->aio_offset;
    }
  seek = LSEEK (fd, first_offset, SEEK_SET);
  if (seek != first_offset)
    GPF_T1 ("bad return from lseek");
  if (first_offset == last_planned)
    {
      bytes = read (fd, list[0]->aio_buf, PAGE_SZ);
      if (bytes != PAGE_SZ)
	GPF_T1 ("bad no of bytes from read");
      list[0]->__return_value = bytes;
      list[0]->__error_code = 0;
      return;
    }
  list[0]->__error_code = -1;
  tc_merge_reads++;
  tc_merge_read_pages += ((last_planned - first_offset) + PAGE_SZ) / PAGE_SZ;
  bytes = read (fd, temp,  (last_planned - first_offset) + PAGE_SZ);
  if (bytes != (last_planned - first_offset) + PAGE_SZ)
    GPF_T1 ("bad no of bytes returned for merged read");
  for (inx2 = 0; inx2 <= MIN (inx, n - 1); inx2++)
    {
      if (-1 == list[inx2]->__error_code)
	{
	  memcpy (list[inx2]->aio_buf, temp + (list[inx2]->aio_offset - first_offset), PAGE_SZ);
	  list[inx2]->__return_value = PAGE_SZ;
	  list[inx2]->__error_code = 0;
	}
    }
}
示例#14
0
void
qst_set_long (caddr_t * state, state_slot_t * sl, boxint lv)
{
#ifdef QST_DEBUG
  if (sl->ssl_index < QI_FIRST_FREE)
    GPF_T1 ("Invalid SSL in qst_set");
  else if (sl->ssl_type == SSL_CONSTANT)
    GPF_T1 ("Invalid constant SSL in qst_set");
  else
    {
#endif
  caddr_t *place = IS_SSL_REF_PARAMETER (sl->ssl_type)
    ? (caddr_t *) state[sl->ssl_index]
    : (caddr_t *) & state[sl->ssl_index];
  caddr_t old = *place;
  if (IS_BOX_POINTER (old))
    {
      if (DV_LONG_INT == box_tag (old))
	{
	  *(boxint *) old = lv;
	}
      else
	{
	  ssl_free_data (sl, *place);
	  *place = box_num (lv);
	}
    }
  else
    {
      if (IS_BOXINT_POINTER (lv))
	*place = box_num (lv);
      else
	*(ptrlong *) place = lv;
    }
#ifdef QST_DEBUG
    }
#endif
}
示例#15
0
void
dbs_locate_ext_bit (dbe_storage_t* dbs, dp_addr_t near_dp,
    uint32 **array, dp_addr_t *page_no, int *inx, int *bit)
{
  dp_addr_t near_page;
  dp_addr_t n;
  buffer_desc_t* free_set = dbs->dbs_extent_set;
  if (near_dp % EXTENT_SZ)
    GPF_T1 ("when locating extent bit, must have a dp that is at extent boundary");
  near_dp /= EXTENT_SZ;
  near_page = near_dp / BITS_ON_PAGE;

  *page_no = near_page;
  for (n = 0; n < near_page; n++)
    {
      if (!free_set->bd_next)
	GPF_T1 ("extent set too short");
      free_set = free_set->bd_next;
    }
  page_set_check (free_set->bd_buffer + DP_DATA);
  *array = (dp_addr_t *) (free_set->bd_buffer + DP_DATA);
  *inx = (int) ((near_dp % BITS_ON_PAGE) / BITS_IN_LONG);
  *bit = (int) ((near_dp % BITS_ON_PAGE) % BITS_IN_LONG);
}
示例#16
0
void
it_free_page (index_tree_t * it, buffer_desc_t * buf)
{
  short l;
  it_map_t * itm;
  dp_addr_t remap;
  ASSERT_IN_MAP (buf->bd_tree, buf->bd_page);
  itm = IT_DP_MAP (buf->bd_tree, buf->bd_page);
  remap = (dp_addr_t) (ptrlong) gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap);
  if (!buf->bd_is_write)
    GPF_T1 ("isp_free_page without write access to buffer.");
  dp_may_compact (buf->bd_storage, buf->bd_page); /* no need to keep deld buffers in checked for compact list */
  l=SHORT_REF (buf->bd_buffer + DP_FLAGS);
  if (!(l == DPF_BLOB || l == DPF_BLOB_DIR)
      && !remap)
    GPF_T1 ("Freeing a page that is not remapped");
  if (DPF_INDEX == l)
    it->it_n_index_est--;
  else
    it->it_n_blob_est--;
  if (buf->bd_page != buf->bd_physical_page && (DPF_BLOB_DIR == l || DPF_BLOB == l))
    GPF_T1 ("blob is not supposed to be remapped");
  DBG_PT_PRINTF (("    Delete %ld remap %ld FL=%d buf=%p\n", buf->bd_page, buf->bd_physical_page, l, buf));
  if (buf->bd_iq)
    {
      mutex_leave (&itm->itm_mtx);
      buf_cancel_write (buf);
      mutex_enter (&itm->itm_mtx);
    }

  if (!remap)
    {
      /* a blob in checkpoint space can be deleted without a remap existing in commit space. */
      if (DPF_BLOB != l && DPF_BLOB_DIR != l )
	GPF_T1 ("not supposed to delete a buffer in a different space unless it's a blob");
      if (buf->bd_is_dirty)
	GPF_T1 ("blob in checkpoint space can't be dirty - has no remap, in commit, hence is in checkpoint");
      sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void*) (ptrlong) DP_DELETED);
      remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf);
      page_leave_as_deleted (buf);
      return;
    }
  if (IS_NEW_BUFFER (buf))
    /* if this was CREATED AND DELETED without intervening checkpoint the delete
     * does not carry outside the commit space. */
    remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap);
  else
    sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void *) (ptrlong) DP_DELETED);
  if (!remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf))
    GPF_T1 ("it_free_page does not hit the buffer in tree cache");

  it_free_remap (it, buf->bd_page, buf->bd_physical_page, l);
  page_leave_as_deleted (buf);
}
示例#17
0
static caddr_t
bif_mts_all_info(caddr_t* qst, caddr_t * err_ret, state_slot_t ** args)
{
  query_instance_t* qi = (query_instance_t*)qst;
  client_connection_t* cli =
      qi->qi_client;
  dbg_printf(("******** cli %x tr %x st %d nthr %d tr2 %x\n",
	 cli,
	 cli->cli_trx,
	 cli->cli_trx->lt_status,
	 cli->cli_trx->lt_threads,
	 qi->qi_trx));
#if 0
  if (!cli->cli_tp_data)
    GPF_T1("should in distributed transaction\n");
#endif
  return NEW_DB_NULL;
}
示例#18
0
int
mutex_enter (dk_mutex_t *mtx)
#endif
{
#ifndef MTX_DEBUG
  return semaphore_enter (mtx->mtx_handle);
#else
  semaphore_t *sem = (semaphore_t *) mtx->mtx_handle;
#ifdef MALLOC_DEBUG
  if (_current_fiber == NULL)
    {
      assert (mtx == _dbgmal_mtx);
      return semaphore_enter (sem);
    }
#endif
  assert (_current_fiber != NULL);
  if (sem->sem_entry_count)
    {
      assert (sem->sem_entry_count == 1);
      assert (mtx->mtx_owner == NULL);
      sem->sem_entry_count--;
    }
  else
    {
      assert (mtx->mtx_owner != _current_fiber);
      thread_queue_to (&sem->sem_waiting, _current_fiber);
      _fiber_status (_current_fiber, WAITSEM);
      _fiber_schedule_next ();
      assert (sem->sem_entry_count == 0);
    }
  assert (mtx->mtx_owner == NULL);
  if (mtx->mtx_entry_check
      && !mtx->mtx_entry_check (mtx, THREAD_CURRENT_THREAD, mtx->mtx_entry_check_cd))
    GPF_T1 ("Mtx entry check fail");

  mtx->mtx_owner = _current_fiber;
  mtx->mtx_entry_file = (char *) file;
  mtx->mtx_entry_line = line;

  return 0;
#endif
}
int
thread_release_dead_threads (int leave_count)
{
  thread_t *thr;
  int rc;
  long thread_killed = 0;
  thread_queue_t term;

  Q_LOCK ();
  if (_deadq.thq_count <= leave_count)
    {
      Q_UNLOCK ();
      return 0;
    }
  thread_queue_init (&term);
  while (_deadq.thq_count > leave_count)
    {
      thr = thread_queue_from (&_deadq);
      if (!thr)
	break;
      _thread_num_dead--;
      thread_queue_to (&term, thr);
    }
  Q_UNLOCK ();

  while (NULL != (thr = thread_queue_from (&term)))
    {
      thr->thr_status = TERMINATE;
      rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
      CKRET (rc);
      thread_killed++;
    }
#if 0
  if (thread_killed)
    log_info ("%ld OS threads released", thread_killed);
#endif
  return thread_killed;
failed:
  GPF_T1("Thread restart failed");
  return 0;
}
示例#20
0
/* INFO: toplevel udt_clr_instantiate_class */
int
create_instance (caddr_t *type_vec, int n_args, long _mode, caddr_t asm_name, caddr_t type,
    void * udt)
{
  MonoArray *v_args, *i_array = NULL, *o_array = NULL;
  MonoObject *mono_list;
  int len, ret = 0;
  MonoDomain *domain = virtuoso_domain;

  get_mono_thread ();

  v_args = MAKE_PARAM_ARRAY (domain, 5);

  if (param_to_mono_array (type_vec, n_args, &i_array, &o_array))
    sqlr_new_error ("22023", "MN010", "Can't convert parameters");

  SET_INT_ARG (domain, v_args, 0, _mode);
  SET_STRING_ARG (domain, v_args, 1, asm_name);
  SET_STRING_ARG (domain, v_args, 2, type);

  mono_array_set (v_args, gpointer, 3, i_array);
  mono_array_set (v_args, gpointer, 4, o_array);

  mono_list = call_mono (VIRTCLR_NAME, "VInvoke:create_ins_asm", v_args, domain);
  len = mono_array_length ((MonoArray*)mono_list);
  if (len == 2)
    {
      caddr_t aret = sa_to_dk ((MonoArray *) mono_list, 0, 1, udt);
      ret = unbox (aret);
      dk_free_box (aret);
    }
  else
    GPF_T1 ("create_instance");

  return ret;
}
示例#21
0
caddr_t
dotnet_get_instance_name (int instance)
{
  MonoArray *v_args;
  MonoObject *mono_ret;
  caddr_t ret = NULL;
  MonoDomain *domain = virtuoso_domain;
  char *utf8;

  get_mono_thread ();
  v_args = MAKE_PARAM_ARRAY (domain, 1);

  SET_INT_ARG (domain, v_args, 0, instance);

  mono_ret = call_mono (VIRTCLR_NAME, "VInvoke:get_instance_name", v_args, domain);

  if (!mono_ret || !mono_object_isinst (mono_ret, mono_get_string_class ()))
    GPF_T1 ("not a string in dotnet_get_instance_name");
  utf8 = mono_string_to_utf8 ((MonoString *)mono_ret);
  ret = box_dv_short_string (utf8);
  g_free (utf8);

  return ret;
}
示例#22
0
dk_set_t
bh_string_list_w (/* this was before 3.0: index_space_t * isp,*/ lock_trx_t * lt, blob_handle_t * bh, long get_chars, int omit, long blob_type)
{
  /* take current page at current place and make string of
     n bytes from the place and write to client */
  caddr_t page_string;
  dk_set_t string_list = NULL;
  dp_addr_t start = bh->bh_current_page;
  buffer_desc_t *buf = NULL;
  long from_char = bh->bh_position;
  long chars_filled = 0, chars_on_page;
  virt_mbstate_t state;
  wchar_t wpage[PAGE_SZ];
#if 0 /* this was */
  it_cursor_t *tmp_itc = itc_create (isp, lt);
#else
  it_cursor_t *tmp_itc = itc_create (NULL, lt);
  itc_from_it (tmp_itc, bh->bh_it);
#endif

  while (start)
    {
      long char_len, byte_len, next;
      unsigned char *mbc;
      uint32 timestamp;
      int type;

      memset (&state, 0, sizeof (state));
      if (!page_wait_blob_access (tmp_itc, start, &buf, PA_READ, bh, 1))
	break;

      type = SHORT_REF (buf->bd_buffer + DP_FLAGS);
      timestamp = LONG_REF (buf->bd_buffer + DP_BLOB_TS);

      if ((DPF_BLOB != type) &&
	  (DPF_BLOB_DIR != type))
	{
	  page_leave_outside_map (buf);
	  dbg_printf (("wrong blob type\n"));
	  return 0;
	}

      if ((bh->bh_timestamp != BH_ANY) && (timestamp != bh->bh_timestamp))
	{
	  page_leave_outside_map (buf);
	  return BH_DIRTYREAD;
	}

      byte_len = LONG_REF (buf->bd_buffer + DP_BLOB_LEN);
      mbc = buf->bd_buffer + DP_DATA;
      char_len = (long) virt_mbsnrtowcs (wpage, &mbc, byte_len, PAGE_DATA_SZ, &state);
      if (char_len < 0)
	GPF_T1 ("bad UTF8 data in wide blob page");
      chars_on_page = MIN (char_len - from_char, get_chars);
      if (chars_on_page)
	{
	  /* dbg_printf (("Read blob page %ld, %ld bytes.\n", start,
		bytes_on_page)); */
	  if (!omit)
	    {
	      if (DK_MEM_RESERVE)
		{
		  SET_DK_MEM_RESERVE_STATE (lt);
		  itc_bust_this_trx (tmp_itc, &buf, ITC_BUST_THROW);
		}
	      page_string = dk_alloc_box ((chars_on_page + 1) * sizeof(wchar_t), DV_WIDE);
	      memcpy (page_string, wpage + from_char,
		  chars_on_page * sizeof (wchar_t));
	      ((wchar_t *)page_string)[chars_on_page] = 0;
	      dk_set_push (&string_list, page_string);
	    }
	  chars_filled += chars_on_page;
	  get_chars -= chars_on_page;
	  from_char += chars_on_page;
	}
      next = LONG_REF (buf->bd_buffer + DP_OVERFLOW);
      page_leave_outside_map (buf);
      if (0 == get_chars)
	{
	  bh->bh_position = from_char;
	  break;
	}
      bh->bh_current_page = next;
      bh->bh_position = 0;
      from_char = 0;
      start = next;
    }
  itc_free (tmp_itc);
  return (dk_set_nreverse (string_list));
}
示例#23
0
int
dbs_file_extend (dbe_storage_t * dbs, extent_t ** new_ext_ret, int is_in_sys_em)
{
  extent_map_t * em;
  	  extent_t * new_ext = NULL;
  int n, n_allocated = 0;
  int32 em_n_free;
  dp_addr_t ext_first = dbs->dbs_n_pages;
  ASSERT_IN_DBS (dbs);
  if (dbf_no_disk)
    return 0;
  if (dbs->dbs_disks)
    {
      n = dbs_seg_extend (dbs, EXTENT_SZ);
      if (n != EXTENT_SZ)
	return 0;
    }
  else
    {
      mutex_enter (dbs->dbs_file_mtx);
      n = fd_extend (dbs, dbs->dbs_fd, EXTENT_SZ);
      mutex_leave (dbs->dbs_file_mtx);
      if (EXTENT_SZ != n)
	return 0;
      dbs->dbs_file_length += PAGE_SZ * EXTENT_SZ;
      dbs->dbs_n_pages+= EXTENT_SZ;
      dbs->dbs_n_free_pages+= EXTENT_SZ;
    }
  wi_storage_offsets ();
  em = dbs->dbs_extent_map;
  if (!em)
    {
      return n;
    }
  if (!is_in_sys_em)
    mutex_enter (em->em_mtx);

  if (dbs_check_extent_free_pages)
    {
      em_n_free = em_free_count (em, EXT_INDEX);
      if (em->em_n_free_pages != em_n_free)
	{
	  log_error ("The %s free pages incorrect %d != %d actually free", em->em_name, em->em_n_free_pages, em_n_free);
	  em->em_n_free_pages = em_n_free;
	}
    }

  if (em->em_n_free_pages < 16)
    {
      /* extending and the system extent has little space.  Make this ext a system index ext.  If allocating some other ext, retry and take the next ext for that.  */
      int fill;
      buffer_desc_t * last;
      last = page_set_last (em->em_buf);
      fill = LONG_REF (last->bd_buffer + DP_BLOB_LEN);
      if (fill + sizeof (extent_t) > PAGE_DATA_SZ)
	{
	  dp_addr_t em_dp = ext_first + n_allocated;
	  n_allocated++;
	  last = page_set_extend (dbs, &em->em_buf, em_dp, DPF_EXTENT_MAP);
	  LONG_SET (last->bd_buffer + DP_BLOB_LEN, sizeof (extent_t));
	  new_ext = (extent_t *) (last->bd_buffer + DP_DATA);
	}
      else
	{
	  new_ext = (extent_t*) (last->bd_buffer + fill + DP_DATA);
	  LONG_SET (last->bd_buffer + DP_BLOB_LEN, fill + sizeof (extent_t));
	}
      em->em_n_pages += EXTENT_SZ;
      em->em_n_free_pages += EXTENT_SZ;
      new_ext->ext_flags = EXT_INDEX;
      new_ext->ext_dp = ext_first;
      if (gethash (DP_ADDR2VOID (new_ext->ext_dp), dbs->dbs_dp_to_extent_map))
	GPF_T1 ("ext for new dp range already exists in dbs");
      sethash (DP_ADDR2VOID (new_ext->ext_dp), em->em_dp_to_ext, (void*)new_ext);
      sethash (DP_ADDR2VOID (new_ext->ext_dp), dbs->dbs_dp_to_extent_map, (void*)em);
      new_ext->ext_prev = EXT_EXTENDS_NONE;
      if (n_allocated)
	{
	  new_ext->ext_pages[0] = 1;
	  em->em_n_free_pages--;
	}
    }
  /* there is a guarantee of at least 16 pages in the dbs sys extent map */
  if (dbs->dbs_n_pages > dbs->dbs_n_pages_in_sets)
    {
      /* add a page of global free set and backup set */
      buffer_desc_t * last = page_set_extend (dbs, &dbs->dbs_free_set, 0, DPF_FREE_SET);
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      if (n_allocated)
	dbs_page_allocated (dbs, ext_first);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for page set page");
      EM_DEC_FREE (em, EXT_INDEX);

      last = page_set_extend (dbs, &dbs->dbs_incbackup_set, 0, DPF_INCBACKUP_SET);
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for page set page");
      EM_DEC_FREE (em, EXT_INDEX);
      dbs->dbs_n_pages_in_sets += BITS_ON_PAGE;
    }
  if (dbs->dbs_n_pages > dbs->dbs_n_pages_in_extent_set)
    {
      buffer_desc_t * last = page_set_extend (dbs, &dbs->dbs_extent_set, 0, DPF_EXTENT_SET);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for extents alloc page");
      EM_DEC_FREE (em, EXT_INDEX);
      LONG_SET (last->bd_buffer + DP_DATA, 1); /* the newly made ext is the 1st of this page of the ext set, so set the bm 1st bit to 1 */
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      dbs->dbs_n_pages_in_extent_set += EXTENT_SZ * BITS_ON_PAGE;
    }
  if (new_ext)
    {
      dbs_extent_allocated (dbs, ext_first);
    }
  *new_ext_ret = new_ext;
  if (!is_in_sys_em)
    mutex_leave (em->em_mtx);
  return n;
}
int
mutex_enter (dk_mutex_t *mtx)
#endif
{
#ifdef MTX_DEBUG
  du_thread_t * self = thread_current ();
#endif
  int rc;

#ifdef MTX_DEBUG
  assert (mtx->mtx_owner !=  self || !self);
  if (mtx->mtx_entry_check
      && !mtx->mtx_entry_check (mtx, self, mtx->mtx_entry_check_cd))
    GPF_T1 ("Mtx entry check fail");
#endif
#ifdef MTX_METER
#if HAVE_SPINLOCK
  if (MUTEX_TYPE_SPIN == mtx->mtx_type)
    rc = pthread_spin_trylock (&mtx->l.spinl);
  else 
#endif
    rc = pthread_mutex_trylock ((pthread_mutex_t*) &mtx->mtx_mtx);
  if (TRYLOCK_SUCCESS != rc)
    {
      long long wait_ts = rdtsc ();
      static int unnamed_waits;
#if HAVE_SPINLOCK
      if (MUTEX_TYPE_SPIN == mtx->mtx_type)
	rc = pthread_spin_lock (&mtx->l.spinl);
      else
#endif
	rc = pthread_mutex_lock ((pthread_mutex_t*) &mtx->mtx_mtx);
      mtx->mtx_wait_clocks += rdtsc () - wait_ts;
      mtx->mtx_waits++;
      if (!mtx->mtx_name)
	unnamed_waits++; /*for dbg breakpoint */
      mtx->mtx_enters++;
    }
  else
    mtx->mtx_enters++;
#else
#if HAVE_SPINLOCK
  if (MUTEX_TYPE_SPIN == mtx->mtx_type)
    rc = pthread_spin_lock (&mtx->l.spinl);
  else
#endif
    rc = pthread_mutex_lock ((pthread_mutex_t*) &mtx->mtx_mtx);
#endif
  CKRET (rc);
#ifdef MTX_DEBUG
  assert (mtx->mtx_owner == NULL);
  mtx->mtx_owner = self;
  mtx->mtx_entry_file = (char *) file;
  mtx->mtx_entry_line = line;
#endif
  return 0;

failed:
  GPF_T1 ("mutex_enter() failed");
  return -1;
}
示例#25
0
int
DBGP_NAME (page_wait_access) (DBGP_PARAMS it_cursor_t * itc, dp_addr_t dp,  buffer_desc_t * buf_from,
    buffer_desc_t ** buf_ret, int mode, int max_change)
{
  buffer_desc_t decoy;
  buffer_desc_t *buf;
  dp_addr_t phys_dp;
  itc->itc_to_reset = RWG_NO_WAIT;
  itc->itc_max_transit_change = max_change;
  itc->itc_must_kill_trx = 0;
  if (!dp)
    GPF_T1 ("Zero DP in page_fault_map_sem");

  if (buf_from)
    {
      ITC_IN_TRANSIT (itc, dp, buf_from->bd_page);
    }
  else
    ASSERT_IN_MAP (itc->itc_tree, dp);

  buf = IT_DP_TO_BUF (itc->itc_tree, dp);
  if (!buf)
    {
      ra_req_t * ra = NULL;
      IT_DP_REMAP (itc->itc_tree, dp, phys_dp);
#ifdef MTX_DEBUG
      em_check_dp (itc->itc_tree->it_extent_map, phys_dp);
      if (phys_dp != dp)
	em_check_dp (itc->itc_tree->it_extent_map, dp);
#endif
      if ((DP_DELETED == phys_dp || dbs_is_free_page (itc->itc_tree->it_storage, phys_dp))
	  && !strchr (wi_inst.wi_open_mode, 'a'))
	{
	  log_error ("Reference to page with free remap dp = %ld, remap = %ld",
		     (long) phys_dp, (long) dp);
	  if (0 && DBS_PAGE_IN_RANGE (itc->itc_tree->it_storage, phys_dp))
	    dbs_page_allocated (itc->itc_tree->it_storage, phys_dp);
	  else
	    {
	      *buf_ret = PF_OF_DELETED;
	      itc->itc_must_kill_trx = 1;
	      itc->itc_to_reset = RWG_WAIT_ANY;
	      ITC_LEAVE_MAPS (itc);
	      return RWG_WAIT_ANY;
	    }
	}
      memset (&decoy, 0, sizeof (buffer_desc_t));
      decoy.bd_being_read = 1;
      if (PA_READ == mode)
	decoy.bd_readers = 1;
      else
	BD_SET_IS_WRITE (&decoy, 1);
      sethash (DP_ADDR2VOID (dp), &IT_DP_MAP (itc->itc_tree, dp)->itm_dp_to_buf, (void*)&decoy);
      ITC_LEAVE_MAPS (itc);
      buf = bp_get_buffer (NULL, BP_BUF_REQUIRED);
      is_read_pending++;
      buf->bd_being_read = 1;
      buf->bd_page = dp;
      buf->bd_storage = itc->itc_tree->it_storage;
      buf->bd_physical_page = phys_dp;
      BD_SET_IS_WRITE (buf, 0);
      buf->bd_write_waiting = NULL;
      if (buf_from && !itc->itc_landed)
	ra = itc_read_aside (itc, buf_from, dp);
      itc->itc_n_reads++;
      ITC_MARK_READ (itc);
      buf->bd_tree = itc->itc_tree;
      buf_disk_read (buf);
      is_read_pending--;
      if (ra)
	itc_read_ahead_blob (itc, ra, RAB_SPECULATIVE);

      if (buf_from)
	{
	  ITC_IN_TRANSIT (itc, dp, buf_from->bd_page)
	    }
	  else
示例#26
0
void
iq_schedule (buffer_desc_t ** bufs, int n)
{
  int inx;
  int is_reads = 0;
  buf_sort (bufs, n, (sort_key_func_t) bd_phys_page_key);
  for (inx = 0; inx < n; inx++)
    {
      if (bufs[inx]->bd_iq)
	GPF_T1 ("buffer added to iq already has a bd_iq");
      bufs[inx]->bd_iq = db_io_queue (bufs[inx]->bd_storage, bufs[inx]->bd_physical_page);
    }
  DO_SET (io_queue_t *, iq, &mti_io_queues)
    {
      int n_added = 0;
      buffer_desc_t * ipoint;
      int was_empty;
      IN_IOQ (iq);
      inx = 0;
      ipoint  = iq->iq_first;
      was_empty = (iq->iq_first == NULL);

      while (inx < n)
	{
	  buffer_desc_t * buf = bufs[inx];
	  if (!buf || buf->bd_iq != iq)
	    {
	      inx++;
	      continue;
	    }
	  is_reads = buf->bd_being_read;
	  if (buf->bd_iq_next || buf->bd_iq_prev)
	    GPF_T1 ("can't schedule same buffer twice");
	  bufs[inx] = NULL;
	next_ipoint:
	  if (!ipoint)
	    {
	      L2_PUSH_LAST (iq->iq_first, iq->iq_last, buf, bd_iq_);
	      n_added++;
	      inx++;
	    }
	  else if (BUF_SORT_DP (ipoint) < BUF_SORT_DP (buf))
	    {
	      ipoint = ipoint->bd_iq_next;
	      goto next_ipoint;
	    }
	  else if (BUF_SORT_DP (ipoint) == BUF_SORT_DP (buf))
	    GPF_T1 ("the same buffer can't be scheduled twice for io");
	  else
	    {
	      L2_INSERT (iq->iq_first, iq->iq_last, ipoint, buf, bd_iq_);
	      n_added++;
	      inx++;
	    }
	  if (!buf->bd_being_read)
	    {
	      page_leave_outside_map (buf);
	    }
	}
      LEAVE_IOQ (iq);
      if (n_added && !is_reads)
        {
	  dbg_printf (("IQ %s %d %s added, %s.\n", IQ_NAME (iq),
		      n_added, is_reads ? "reads" : "writes",
		      was_empty ? "starting" : "running"));
	}
      if (n_added && was_empty)
	semaphore_leave (iq->iq_sem);

    }
  END_DO_SET ();
  if (n)
    {
      if (is_reads)
	mti_reads_queued += n;
      else
	mti_writes_queued += n;
    }
}
示例#27
0
int
compare_utf8_with_collation (caddr_t dv1, long n1,
    caddr_t dv2, long n2, collation_t *collation)
{
  long inx1, inx2;

  wchar_t wtmp1, wtmp2;
  virt_mbstate_t state1, state2;
  int rc1, rc2;

  memset (&state1, 0, sizeof (virt_mbstate_t));
  memset (&state2, 0, sizeof (virt_mbstate_t));

  inx1 = inx2 = 0;
  if (collation)
    while(1)
      {
	if (inx1 == n1)
	  {
	    while (inx2 < n2)
	      { /* skip all ignorable rest chars */
		rc2 = (int) virt_mbrtowc (&wtmp2, (unsigned char *) (dv2 + inx2), n2 - inx2, &state2);
		if (rc2 <= 0)
		  GPF_T1 ("inconsistent wide char data");
		if (!((wchar_t *)collation->co_table)[wtmp2])
		  {
		    inx2+=rc2;
		    continue;
		  }
		else
		  break;
	      }

	    if (inx2 == n2)
	      return DVC_MATCH;
	    else
	      return DVC_LESS;
	  }
	if (inx2 == n2)
	  return DVC_GREATER;

	rc1 = (int) virt_mbrtowc (&wtmp1, (unsigned char *) (dv1 + inx1), n1 - inx1, &state1);
	if (rc1 <= 0)
	  GPF_T1 ("inconsistent wide char data");
	rc2 = (int) virt_mbrtowc (&wtmp2, (unsigned char *) (dv2 + inx2), n2 - inx2, &state2);
	if (rc2 <= 0)
	  GPF_T1 ("inconsistent wide char data");

	if (!((wchar_t *)collation->co_table)[wtmp1])
	  {
	    inx1+=rc1;
	    continue;
	  }
	if (!((wchar_t *)collation->co_table)[wtmp2])
	  {
	    inx2+=rc2;
	    continue;
	  }
	if (((wchar_t *)collation->co_table)[wtmp1] <
	    ((wchar_t *)collation->co_table)[wtmp2])
	  return DVC_LESS;
	if (((wchar_t *)collation->co_table)[wtmp1] >
	    ((wchar_t *)collation->co_table)[wtmp2])
	  return DVC_GREATER;
	inx1 += rc1;
	inx2 += rc2;
      }
  else
    while(1)
      {
	if (inx1 == n1)
	  {
	    if (inx2 == n2)
	      return DVC_MATCH;
	    else
	      return DVC_LESS;
	  }
	if (inx2 == n2)
	  return DVC_GREATER;

	rc1 = (int) virt_mbrtowc (&wtmp1, (unsigned char *) (dv1 + inx1), n1 - inx1, &state1);
	if (rc1 <= 0)
	  GPF_T1 ("inconsistent wide char data");
	rc2 = (int) virt_mbrtowc (&wtmp2, (unsigned char *) (dv2 + inx2), n2 - inx2, &state2);
	if (rc2 <= 0)
	  GPF_T1 ("inconsistent wide char data");
	if (wtmp1 < wtmp2)
	  return DVC_LESS;
	if (wtmp1 > wtmp2)
	  return DVC_GREATER;
	inx1 += rc1;
	inx2 += rc2;
      }
}
示例#28
0
int
compare_wide_to_utf8_with_collation (wchar_t *wide_data, long wide_wcharcount, utf8char *utf8_data, long utf8_bytes,
    collation_t *collation)
{
  long winx, ninx;

  wchar_t wtmp;
  virt_mbstate_t state;
  int rc;

  memset (&state, 0, sizeof (virt_mbstate_t));

  ninx = winx = 0;
  if (collation)
    while(1)
      {
	if (ninx == utf8_bytes)
	  {
	    if (winx == wide_wcharcount)
	      return DVC_MATCH;
	    else
	      return DVC_LESS;
	  }
	if (winx == wide_wcharcount)
	  return DVC_GREATER;

	rc = (int) virt_mbrtowc (&wtmp, utf8_data + ninx, utf8_bytes - ninx, &state);
	if (rc <= 0)
	  GPF_T1 ("inconsistent wide char data");
	if (((wchar_t *)collation->co_table)[wtmp] <
	    ((wchar_t *)collation->co_table)[wide_data[winx]])
	  return DVC_LESS;
	if (((wchar_t *)collation->co_table)[wtmp] >
	    ((wchar_t *)collation->co_table)[wide_data[winx]])
	  return DVC_GREATER;
	winx++;
	ninx += rc;
      }
  else
    while(1)
      {
	if (ninx == utf8_bytes)
	  {
	    if (winx == wide_wcharcount)
	      return DVC_MATCH;
	    else
	      return DVC_LESS;
	  }
	if (winx == wide_wcharcount)
	  return DVC_GREATER;

	rc = (int) virt_mbrtowc (&wtmp, utf8_data + ninx, utf8_bytes - ninx, &state);
	if (rc <= 0)
	  GPF_T1 ("inconsistent wide char data");
	if (wtmp < wide_data[winx])
	  return DVC_LESS;
	if (wtmp > wide_data[winx])
	  return DVC_GREATER;
	winx++;
	ninx += rc;
      }
}
示例#29
0
dk_session_t *
bh_string_output_w (/* this was before 3.0: index_space_t * isp, */ lock_trx_t * lt, blob_handle_t * bh, int omit)
{
  /* take current page at current place and make string of
     n bytes from the place and write to client */
  dk_session_t *string_output = NULL;
  dp_addr_t start = bh->bh_current_page;
  buffer_desc_t *buf;
  long from_char = bh->bh_position;
  long chars_filled = 0, chars_on_page;
  virt_mbstate_t state;
  wchar_t wpage[PAGE_SZ];
#if 0 /* this was */
  it_cursor_t *tmp_itc = itc_create (isp, lt);
#else
  it_cursor_t *tmp_itc = itc_create (NULL, lt);
  itc_from_it (tmp_itc, bh->bh_it);
#endif

  while (start)
    {
      long char_len, byte_len, next;
      unsigned char *mbc;
      if (NULL == string_output)
	string_output = strses_allocate();
      memset (&state, 0, sizeof (state));
      ITC_IN_KNOWN_MAP (tmp_itc, start);
      page_wait_access (tmp_itc, start, NULL, &buf, PA_READ, RWG_WAIT_ANY);
      if (!buf || PF_OF_DELETED == buf)
	{
	  log_info ("Attempt to read deleted blob dp = %d start = %d.",
		    start, bh->bh_page);
	  break;
	}
      byte_len = LONG_REF (buf->bd_buffer + DP_BLOB_LEN);
      mbc = buf->bd_buffer + DP_DATA;
      char_len = (long) virt_mbsnrtowcs (wpage, &mbc, byte_len, PAGE_DATA_SZ, &state);
      if (char_len < 0)
	GPF_T1 ("bad UTF8 data in wide blob page");
      chars_on_page = char_len - from_char;
      if (chars_on_page)
	{
	  /* dbg_printf (("Read blob page %ld, %ld bytes.\n", start,
		bytes_on_page)); */
	  if (!omit)
	      session_buffered_write (string_output, (char *) (wpage + from_char), chars_on_page * sizeof (wchar_t));

	  chars_filled += chars_on_page;
	  from_char += chars_on_page;
	}
      next = LONG_REF (buf->bd_buffer + DP_OVERFLOW);
      page_leave_outside_map (buf);
      if (start == bh->bh_page)
      {
	      dp_addr_t t = LONG_REF (buf->bd_buffer + DP_BLOB_DIR);
	      if (bh->bh_dir_page && t!=bh->bh_dir_page)
		log_info ("Mismatch in directory page ID %d(%x) vs %d(%x).",
		    t,t,bh->bh_dir_page,bh->bh_dir_page);
	      bh->bh_dir_page=t;
      }
      bh->bh_current_page = next;
      bh->bh_position = 0;
      from_char = 0;
      start = next;
    }
  itc_free (tmp_itc);
  return (string_output);
}
示例#30
0
/* slow solution, should be rewritten later */
caddr_t
strstr_utf8_with_collation (caddr_t dv1, long n1,
    caddr_t dv2, long n2, caddr_t *next, collation_t *collation)
{
  int n1inx = 0, n2inx = 0, n1inx_beg = 0;
  int utf8_1len = box_length (dv1) - 1;
  int utf8_2len = box_length (dv2) - 1;
  virt_mbstate_t state1, state2;
  wchar_t wtmp1, wtmp2;
  memset (&state1, 0, sizeof (virt_mbstate_t));
  memset (&state2, 0, sizeof (virt_mbstate_t));

  if (collation)
    {
      while (1)
	{
	  int rc1, rc2;
	  if (!n1inx_beg)
	    n1inx_beg = n1inx;
	again:
	  if (n1inx == utf8_1len && n2inx != utf8_2len)
	    return 0;
	  if (n2inx == utf8_2len)
	    {
	      if (next)
		next[0] = dv1+n1inx;

	      while(1)
		{
		  /* ignore all remaining ignorable signs */
		  rc1 = (int) virt_mbrtowc (&wtmp1, (unsigned char *) dv1+n1inx_beg,
		      utf8_1len-n1inx_beg, &state1);
		  if (rc1 < 0)
		    GPF_T1 ("inconsistent wide char data");
		  if (!((wchar_t *)collation->co_table)[wtmp1])
		    { /* ignore symbol, unicode normalization algorithm */
		      n1inx_beg+=rc1;
		    }
		  else
		    return dv1+n1inx_beg;
		}
	    }
	  rc2 = (int) virt_mbrtowc (&wtmp2, (unsigned char *) dv2+n2inx,
	      utf8_2len-n2inx, &state2);
	  if (rc2 < 0)
	    GPF_T1 ("inconsistent wide char data");
	  if (!((wchar_t *)collation->co_table)[wtmp2])
	    { /* ignore symbol, unicode normalization algorithm */
	      n2inx+=rc2;
	      goto again;
	    }
	  rc1 = (int) virt_mbrtowc (&wtmp1, (unsigned char *) dv1+n1inx,
	      utf8_1len-n1inx, &state1);
	  if (rc1 < 0)
	    GPF_T1 ("inconsistent wide char data");
	  if (!((wchar_t *)collation->co_table)[wtmp1])
	    { /* ignore symbol, unicode normalization algorithm */
	      n1inx+=rc1;
	      goto again;
	    }

	  if (((wchar_t *)collation->co_table)[wtmp1] != ((wchar_t *)collation->co_table)[wtmp2])
	    {
	      n1inx+=rc1;
	      n2inx=0;
	      n1inx_beg=n1inx;
	      memset (&state2, 0, sizeof (virt_mbstate_t));
	      continue;
	    }
	  n1inx+=rc1;
	  n2inx+=rc2;
	}
    }
  else
    {
      while (1)
	{
	  int rc1, rc2;
	  if (!n1inx_beg)
	    n1inx_beg = n1inx;
	  if (n1inx == utf8_1len && n2inx != utf8_2len)
	    return 0;
	  if (n2inx == utf8_2len)
	    {
	      if (next)
		next[0] = dv1+n1inx;
	      return dv1+n1inx_beg;
	    }
	  rc1 = (int) virt_mbrtowc (&wtmp1, (unsigned char *) dv1+n1inx,
	      utf8_1len-n1inx, &state1);
	  rc2 = (int) virt_mbrtowc (&wtmp2, (unsigned char *) dv2+n2inx,
	      utf8_2len-n2inx, &state2);
	  if (rc1 < 0  || rc2 < 0)
	    GPF_T1 ("inconsistent wide char data");
	  if (wtmp1 != wtmp2)
	    {
	      n1inx+=rc1;
	      n2inx=0;
	      n1inx_beg=n1inx;
	      memset (&state2, 0, sizeof (virt_mbstate_t));
	      continue;
	    }
	  n1inx+=rc1;
	  n2inx+=rc2;
	}
    }

  return 0;
}