dbe_key_t * setp_temp_key (setp_node_t * setp, long *row_len_ptr, int quietcast) { int inx = 0; NEW_VARZ (dbe_key_t, key); key->key_n_significant = dk_set_length (setp->setp_keys); key->key_id = KI_TEMP; key->key_is_primary = 0; key->key_super_id = KI_TEMP; DO_SET (state_slot_t *, ssl, &setp->setp_keys) { if (row_len_ptr) *row_len_ptr += sqt_row_data_length (& ssl->ssl_sqt); key_col_from_ssl (key, ssl, quietcast, NULL); } END_DO_SET(); DO_SET (state_slot_t *, ssl, &setp->setp_dependent) { gb_op_t *gb_op = dk_set_nth (setp->setp_gb_ops, inx); if (row_len_ptr) *row_len_ptr += sqt_row_data_length (& ssl->ssl_sqt); key_col_from_ssl (key, ssl, quietcast, gb_op); inx ++; } END_DO_SET(); dbe_key_layout (key, NULL); dk_set_push (&setp->src_gen.src_query->qr_temp_keys, (void*) key); return key; }
int ct_col_ref (sql_comp_t * sc, comp_table_t * ct, ST * ref, dbe_column_t ** col_ret, col_ref_rec_t ** crr_ret, int err_if_not) { char *col_name = ref->_.col_ref.name; if (!ct->ct_derived) { dbe_column_t *dbe_col = tb_name_to_column (ct->ct_table, col_name); #ifdef BIF_XML if (!dbe_col && ct_is_entity (sc, ct)) dbe_col = lt_xml_col (NULL, col_name); #endif if (dbe_col) { *col_ret = dbe_col; *crr_ret = NULL; DO_SET (col_ref_rec_t *, crr, &ct->ct_out_crrs) { if (0 == CASEMODESTRCMP (crr->crr_col_ref->_.col_ref.name, ref->_.col_ref.name)) { *crr_ret = crr; break; } } END_DO_SET (); return 1; } else { if (err_if_not)
void * basket_remove_if (basket_t * bsk, basket_check_t f, void *cd) { int found = 0; void *remd = NULL; dk_set_t tmp = NULL; void *elt; #ifdef MTX_DEBUG if (bsk->bsk_req_mtx) ASSERT_IN_MTX (bsk->bsk_req_mtx); #endif while ((elt = basket_get (bsk))) { if (!found && f (elt, cd)) { remd = elt; found = 1; } else dk_set_push (&tmp, elt); } dk_set_nreverse (tmp); DO_SET (void *, x, &tmp) { basket_add (bsk, x); } END_DO_SET (); dk_set_free (tmp); return remd; }
col_ref_rec_t * ct_col_crr (comp_table_t * ct, ST * ref) { DO_SET (col_ref_rec_t *, crr, &ct->ct_out_crrs) { if (0 == CASEMODESTRCMP (ref->_.col_ref.name, crr->crr_col_ref->_.col_ref.name)) return crr; } END_DO_SET (); return NULL; }
void mutex_stat () { #ifdef MTX_METER DO_SET (dk_mutex_t *, mtx, &all_mtxs) { printf ("%s %lx E: %ld W %ld \n", mtx->mtx_name ? mtx->mtx_name : "<?>", (unsigned long) mtx, mtx->mtx_enters, mtx->mtx_waits); } END_DO_SET(); #else printf ("Mutex stats not enabled.}\n"); #endif }
void mutex_stat () { #ifdef MTX_METER DO_SET (dk_mutex_t *, mtx, &all_mtxs) { #ifdef APP_SPIN printf ("%s %p E: %ld W %ld spinw: %ld spin: %d\n", mtx->mtx_name ? mtx->mtx_name : "<?>", mtx, mtx->mtx_enters, mtx->mtx_waits, mtx->mtx_spin_waits, mtx->mtx_spins); #else printf ("%s %p E: %ld W %ld wclk %ld \n", mtx->mtx_name ? mtx->mtx_name : "<?>", mtx, mtx->mtx_enters, mtx->mtx_waits, mtx->mtx_wait_clocks); #endif } END_DO_SET(); #else printf ("Mutex stats not enabled.}\n"); #endif }
void set_xj_pk (xv_join_elt_t * xj) /*get primary key*/ { if (xj->xj_table) { dbe_table_t *tb = sch_name_to_table (wi_inst.wi_schema, xj->xj_table); if (!tb) { sqlr_error ("S0002", "No table '%.300s' in create xml", xj->xj_table); } if (!xj->xj_pk) { int fill = 0; dbe_key_t *pk = tb->tb_primary_key; xj->xj_pk = (caddr_t *) dk_alloc_box_zero (pk->key_n_significant * sizeof (caddr_t), DV_ARRAY_OF_POINTER); DO_SET (dbe_column_t *, col, &pk->key_parts) { xj->xj_pk[fill++] = box_dv_short_string (col->col_name); if (fill >= pk->key_n_significant) break; } END_DO_SET (); }
void setp_distinct_hash (sql_comp_t * sc, setp_node_t * setp, long n_rows) { int quietcast = sc->sc_cc->cc_query->qr_no_cast_error; /* This was: int quietcast = DFE_DT == sc->sc_so->so_dfe->dfe_type ? NULL != sqlo_opt_value (sc->sc_so->so_dfe->_.sub.ot->ot_opts, OPT_SPARQL) : 0; */ int inx; int n_keys = dk_set_length (setp->setp_keys); int n_deps = dk_set_length (setp->setp_dependent); NEW_VARZ (hash_area_t, ha); DO_SET (state_slot_t *, ssl, &setp->setp_keys) { if (!quietcast && IS_BLOB_DTP (ssl->ssl_sqt.sqt_dtp)) sqlc_new_error (sc->sc_cc, "42000", "SQ186", "Long data types not allowed for distinct, order, " "group or join condition columns (%s)", ssl->ssl_name); else if (DV_OBJECT == ssl->ssl_sqt.sqt_dtp) sqlc_new_error (sc->sc_cc, "42000", "SQ187", "user defined data types not allowed for distinct, order, " "group or join condition columns (%s)", ssl->ssl_name); } END_DO_SET(); ha->ha_row_size = 0; ha->ha_key = setp_temp_key (setp, &ha->ha_row_size, quietcast); setp->setp_ha = setp->setp_reserve_ha = ha; ha->ha_tree = ssl_new_tree (sc->sc_cc, "DISTINCT HASH"); ha->ha_ref_itc = ssl_new_itc (sc->sc_cc); ha->ha_insert_itc = ssl_new_itc (sc->sc_cc); #ifdef NEW_HASH ha->ha_bp_ref_itc = ssl_new_itc (sc->sc_cc); #endif ha->ha_n_keys = n_keys; ha->ha_n_deps = n_deps; if (n_rows < 0) n_rows = 100000; /* count probably overflowed.- Large amount */ else if (n_rows < 1000) n_rows = 1000; /* no less than 1000 if overflows memcache, must be at least this much */ else if (n_rows > 1000000) n_rows = 1000000; /* have a cap on hash size */ ha->ha_row_count = n_rows; ha->ha_key_cols = (dbe_col_loc_t *) dk_alloc_box_zero ((n_deps + n_keys + 1) * sizeof (dbe_col_loc_t), DV_CUSTOM); for (inx = 0; inx < n_keys + n_deps; inx++) { dbe_col_loc_t * cl = key_find_cl (ha->ha_key, inx +1); ha->ha_key_cols[inx] = cl[0]; if ((inx >= n_keys) && (cl->cl_fixed_len <= 0)) ha->ha_memcache_only = 1; } ha->ha_slots = (state_slot_t **) list_to_array (dk_set_conc (dk_set_copy (setp->setp_keys), dk_set_copy (setp->setp_dependent))); #if 1 if (ha->ha_memcache_only && setp->setp_gb_ops && setp->setp_gb_ops->data) { inx = n_keys; DO_SET (gb_op_t *, op, &(setp->setp_gb_ops)) { state_slot_t * ssl = ha->ha_slots[inx]; switch (op->go_op) { case AMMSC_COUNT: case AMMSC_COUNTSUM: case AMMSC_SUM: case AMMSC_AVG: case AMMSC_MIN: case AMMSC_MAX: { /* check dep part to be numeric type */ if (IS_NUM_DTP (ssl->ssl_dtp)) ha->ha_memcache_only = 0; else { ha->ha_memcache_only = 1; goto check_done; } break; } default: break; } inx++; } END_DO_SET (); check_done:; }
void iq_schedule (buffer_desc_t ** bufs, int n) { int inx; int is_reads = 0; buf_sort (bufs, n, (sort_key_func_t) bd_phys_page_key); for (inx = 0; inx < n; inx++) { if (bufs[inx]->bd_iq) GPF_T1 ("buffer added to iq already has a bd_iq"); bufs[inx]->bd_iq = db_io_queue (bufs[inx]->bd_storage, bufs[inx]->bd_physical_page); } DO_SET (io_queue_t *, iq, &mti_io_queues) { int n_added = 0; buffer_desc_t * ipoint; int was_empty; IN_IOQ (iq); inx = 0; ipoint = iq->iq_first; was_empty = (iq->iq_first == NULL); while (inx < n) { buffer_desc_t * buf = bufs[inx]; if (!buf || buf->bd_iq != iq) { inx++; continue; } is_reads = buf->bd_being_read; if (buf->bd_iq_next || buf->bd_iq_prev) GPF_T1 ("can't schedule same buffer twice"); bufs[inx] = NULL; next_ipoint: if (!ipoint) { L2_PUSH_LAST (iq->iq_first, iq->iq_last, buf, bd_iq_); n_added++; inx++; } else if (BUF_SORT_DP (ipoint) < BUF_SORT_DP (buf)) { ipoint = ipoint->bd_iq_next; goto next_ipoint; } else if (BUF_SORT_DP (ipoint) == BUF_SORT_DP (buf)) GPF_T1 ("the same buffer can't be scheduled twice for io"); else { L2_INSERT (iq->iq_first, iq->iq_last, ipoint, buf, bd_iq_); n_added++; inx++; } if (!buf->bd_being_read) { page_leave_outside_map (buf); } } LEAVE_IOQ (iq); if (n_added && !is_reads) { dbg_printf (("IQ %s %d %s added, %s.\n", IQ_NAME (iq), n_added, is_reads ? "reads" : "writes", was_empty ? "starting" : "running")); } if (n_added && was_empty) semaphore_leave (iq->iq_sem); } END_DO_SET (); if (n) { if (is_reads) mti_reads_queued += n; else mti_writes_queued += n; } }