static void gca_sm( i4 sid ) { GCA_LCB *lcb = &gca_lcb_tab[ sid ]; bool branch = FALSE; top: if ( JDBC_global.trace_level >= 5 && lsn_states[ lcb->state ].action != LSA_LABEL && lsn_states[ lcb->state ].action != LSA_GOTO ) TRdisplay( "%4d JDBC %s status 0x%x %s\n", sid, lsn_sa_names[ lsn_states[ lcb->state ].action ], lcb->statusp ? *lcb->statusp : OK, branch ? " (branch)" : "" ); branch = FALSE; switch( lsn_states[ lcb->state++ ].action ) { case LSA_INIT : /* Initialize, branch if listening */ branch = listening; lcb->sp = 0; lcb->flags = 0; if ( ! labels[ 1 ] ) { i4 i; for( i = 0; i < ARR_SIZE( lsn_states ); i++ ) if ( lsn_states[ i ].action == LSA_LABEL ) labels[ lsn_states[ i ].label ] = i + 1; } break; case LSA_GOTO : /* Branch unconditionally */ branch = TRUE; break; case LSA_GOSUB : /* Call subroutine */ lcb->ss[ lcb->sp++ ] = lcb->state; branch = TRUE; break; case LSA_RETURN : /* Return from subroutine */ lcb->state = lcb->ss[ --lcb->sp ]; break; case LSA_EXIT : /* Terminate thread */ lcb->state = 0; /* Initialize state */ /* ** Exit if there shutting down or there is an ** active listen. Otherwise, the current ** thread continues as the new listen thread. */ if ( lcb->flags & LCB_SHUTDOWN || listening ) return; break; case LSA_IF_RESUME : /* Branch if INCOMPLETE */ branch = (*lcb->statusp == E_GCFFFE_INCOMPLETE); break; case LSA_IF_TIMEOUT : /* Branch if TIMEOUT */ branch = (*lcb->statusp == E_GC0020_TIME_OUT); break; case LSA_IF_ERROR : /* Branch if status not OK */ branch = (*lcb->statusp != OK); break; case LSA_IF_SHUT : /* Branch if SHUTDOWN requested */ branch = (lcb->flags & LCB_SHUTDOWN) ? TRUE : FALSE; break; case LSA_SET_SHUT: /* Prepare to shutdown server */ GCshut(); lcb->stat = E_GC0040_CS_OK; lcb->statusp = &lcb->stat; break; case LSA_CLEAR_ERR : /* Set status to OK */ lcb->stat = OK; lcb->statusp = &lcb->stat; break; case LSA_LOG : /* Log an error */ if ( *lcb->statusp != OK && *lcb->statusp != FAIL && *lcb->statusp != E_GC0032_NO_PEER ) gcu_erlog( 0, JDBC_global.language, *lcb->statusp, NULL, 0, NULL ); break; case LSA_CHECK : /* Background checks */ jdbc_idle_check(); jdbc_pool_check(); break; case LSA_LISTEN: /* Post a listen */ { i4 timeout = -1; SYSTIME now; MEfill( sizeof(lcb->parms), 0, (PTR) &lcb->parms ); lcb->statusp = &lcb->parms.gca_ls_parms.gca_status; listening = TRUE; TMnow( &now ); if ( JDBC_global.client_idle_limit ) { i4 secs; if ( JDBC_global.client_check.TM_secs <= 0 ) secs = JDBC_global.client_idle_limit; else if ( TMcmp( &now, &JDBC_global.client_check ) < 0 ) secs = JDBC_global.client_check.TM_secs - now.TM_secs; else secs = JDBC_global.client_idle_limit / 2; if ( timeout <= 0 || secs < timeout ) timeout = secs; } if ( JDBC_global.pool_idle_limit ) { i4 secs; if ( JDBC_global.pool_check.TM_secs <= 0 ) secs = JDBC_global.pool_idle_limit; else if ( TMcmp( &now, &JDBC_global.pool_check ) < 0 ) secs = JDBC_global.pool_check.TM_secs - now.TM_secs; else secs = JDBC_global.pool_idle_limit / 2; if ( timeout <= 0 || secs < timeout ) timeout = secs; } /* ** If there is a timeout, leave some ** lee-way to ensure we actually pass ** the target check time. Also, convert ** seconds to milli-seconds. */ if ( timeout >= 0 ) timeout = (timeout + 10) * 1000; IIGCa_cb_call( &gca_cb, GCA_LISTEN, &lcb->parms, GCA_ASYNC_FLAG, (PTR)sid, timeout, &lcb->stat ); if ( lcb->stat != OK ) break; } return; case LSA_LS_RESUME : /* Resume a listen */ IIGCa_cb_call( &gca_cb, GCA_LISTEN, &lcb->parms, GCA_RESUME, (PTR)sid, -1, &lcb->stat ); if ( lcb->stat != OK ) break; return; case LSA_REPOST: /* Repost a listen */ { i4 id; listening = FALSE; for( id = 0; id < LCB_MAX; id++ ) if ( ! gca_lcb_tab[ id ].state ) { gca_sm( id ); break; } } break; case LSA_LS_DONE : /* Listen request has completed */ lcb->assoc_id = lcb->parms.gca_ls_parms.gca_assoc_id; break; case LSA_NEGOTIATE : /* Validate client */ lcb->protocol = min( lcb->parms.gca_ls_parms.gca_partner_protocol, JDBC_GCA_PROTO_LVL ); /* ** Check for shutdown/quiesce request. */ while( lcb->parms.gca_ls_parms.gca_l_aux_data > 0 ) { GCA_AUX_DATA aux_hdr; char *aux_data; i4 aux_len; MEcopy( lcb->parms.gca_ls_parms.gca_aux_data, sizeof( aux_hdr ), (PTR)&aux_hdr ); aux_data = (char *)lcb->parms.gca_ls_parms.gca_aux_data + sizeof( aux_hdr ); aux_len = aux_hdr.len_aux_data - sizeof( aux_hdr ); switch( aux_hdr.type_aux_data ) { case GCA_ID_QUIESCE : case GCA_ID_SHUTDOWN : lcb->flags |= LCB_SHUTDOWN; break; } lcb->parms.gca_ls_parms.gca_aux_data = (PTR)(aux_data + aux_len); lcb->parms.gca_ls_parms.gca_l_aux_data -= aux_hdr.len_aux_data; } break; case LSA_REJECT : /* Reject a client request */ lcb->stat = E_JD010B_NO_CLIENTS; lcb->statusp = &lcb->stat; break; case LSA_RQRESP : /* Respond to client request */ MEfill( sizeof(lcb->parms), 0, (PTR) &lcb->parms); lcb->parms.gca_rr_parms.gca_assoc_id = lcb->assoc_id; lcb->parms.gca_rr_parms.gca_request_status = *lcb->statusp; lcb->parms.gca_rr_parms.gca_local_protocol = lcb->protocol; lcb->statusp = &lcb->parms.gca_rr_parms.gca_status; IIGCa_cb_call( &gca_cb, GCA_RQRESP, &lcb->parms, GCA_ASYNC_FLAG, (PTR)sid, -1, &lcb->stat ); if ( lcb->stat != OK ) break; return; case LSA_DISASSOC : /* Disconnect association */ MEfill( sizeof(lcb->parms), 0, (PTR) &lcb->parms ); lcb->parms.gca_da_parms.gca_association_id = lcb->assoc_id; lcb->statusp = &lcb->parms.gca_da_parms.gca_status; IIGCa_cb_call( &gca_cb, GCA_DISASSOC, &lcb->parms, GCA_ASYNC_FLAG, (PTR)sid, -1, &lcb->stat ); if ( lcb->stat != OK ) break; return; case LSA_DA_RESUME : /* Resume disassociate */ IIGCa_cb_call( &gca_cb, GCA_DISASSOC, &lcb->parms, GCA_RESUME, (PTR)sid, -1, &lcb->stat ); if ( lcb->stat != OK ) break; return; } if ( branch ) lcb->state = labels[ lsn_states[ lcb->state - 1 ].label ]; goto top; }
/*{ ** Name: DIrename - Renames a file. ** ** Description: ** The DIrename will change the name of a file. ** The file MUST be closed. The file can be renamed ** but the path cannot be changed. A fully qualified ** filename must be provided for old and new names. ** This includes the type qualifier extension. ** ** Inputs: ** di_io_unused UNUSED DI_IO pointer (always set to 0 by caller) ** path Pointer to the path name. ** pathlength Length of path name. ** oldfilename Pointer to old file name. ** oldlength Length of old file name. ** newfilename Pointer to new file name. ** newlength Length of new file name. ** Outputs: ** err_code Pointer to a variable used ** to return operating system ** errors. ** Returns: ** OK ** DI_BADRNAME Any i/o error during rename. ** DI_BADPARAM Parameter(s) in error. ** DI_DIRNOTFOUND Path not found. ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 26-mar-87 (mmm) ** Created new for 6.0. ** 06-feb-89 (mikem) ** Clear the CL_ERR_DESC. ** 15-apr-1992 (bryanp) ** Remove DI_IO argument and no longer support renaming open files. ** 30-nov-1992 (rmuth) ** - Prototype. ** - DIlru error checking ** 17-sep-1994 (nanpr01) ** - Needs to check for interrupted system calls specially for ** SIGUSR2. Curren implementation of 1 more retry is optimistic. ** In lot of UNIX systems, link, unlink, rename cannot be ** interrupted(HP-UX).But Solaris returns EINTR. Bug # 57938. ** 10-oct-1994 (nanpr01) ** - Wrong number of parameter in DIlru_flush. Bug # 64169 ** 20-Feb-1998 (jenjo02) ** DIlru_flush() prototype changed, it now computes the number of ** FDs to close instead of being passed an arbitrary number. ** Cleaned up handling of errno, which will be invalid after calling ** DIlru_flush(). ** 15-Apr-2004 (fanch01) ** Force closing of LRU file descriptors when a rename error is ** is encountered. Only occurs on a rename failure and the only ** file that is closed is the file associated with the error. ** Relieves problems on filesystems which don't accomodate renaming ** open files. "Interesting" semaphore usage is consistent with other ** DI usage. ** 21-Apr-2004 (schka24) ** retry declaration got misplaced somehow, fix so it compiles. ** 26-Jul-2005 (schka24) ** Don't flush fd's on any random rename failure. Do a better job ** of re-verifying the fd and di-io after locking the fd when we're ** searching for a file-open conflict. ** 30-Sep-2005 (jenjo02) ** htb_fd_list_mutex, fd_mutex are now CS_SYNCH objects. */ STATUS DIrename( DI_IO *di_io_unused, char *path, u_i4 pathlength, char *oldfilename, u_i4 oldlength, char *newfilename, u_i4 newlength, CL_ERR_DESC *err_code) { char oldfile[DI_FULL_PATH_MAX]; char newfile[DI_FULL_PATH_MAX]; STATUS ret_val, intern_status; CL_ERR_DESC local_err; /* unix variables */ int os_ret; /* retry variables */ i4 retry = 0, failflag = 0; /* default returns */ ret_val = OK; if ((pathlength > DI_PATH_MAX) || (pathlength == 0) || (oldlength > DI_FILENAME_MAX) || (oldlength == 0) || (newlength > DI_FILENAME_MAX) || (newlength == 0)) return (DI_BADPARAM); /* get null terminated path and filename for old file */ MEcopy((PTR) path, pathlength, (PTR) oldfile); oldfile[pathlength] = '/'; MEcopy((PTR) oldfilename, oldlength, (PTR) &oldfile[pathlength + 1]); oldfile[pathlength + oldlength + 1] = '\0'; /* get null terminated path and filename for new file */ MEcopy((PTR) path, pathlength, (PTR) newfile); newfile[pathlength] = '/'; MEcopy((PTR) newfilename, newlength, (PTR) &newfile[pathlength + 1]); newfile[pathlength + newlength + 1] = '\0'; do { if (retry > 0 && failflag++ == 0) TRdisplay("%@ DIrename: retry on %t/%t\n", pathlength, path, oldlength, oldfilename); retry = 0; CL_CLEAR_ERR( err_code ); #ifdef xCL_035_RENAME_EXISTS /* Now rename the file. */ while ((os_ret = rename(oldfile, newfile)) == -1) { SETCLERR(err_code, 0, ER_rename); if (err_code->errnum != EINTR) break; } #else /* xCL_035_RENAME_EXISTS */ /* Now rename the file. */ while ((os_ret = link(oldfile, newfile)) == -1) { SETCLERR(err_code, 0, ER_rename); if (err_code->errnum != EINTR) break; } if (os_ret != -1) { while ((os_ret = unlink(oldfile)) == -1) { if (err_code->errnum != EINTR) break; } } #endif /* xCL_035_RENAME_EXISTS */ /* if the rename failed, see if we're holding the file open */ if (os_ret == -1 && htb_initialized) { QUEUE *p, *q, *next; CS_synch_lock(&htb->htb_fd_list_mutex); q = &htb->htb_fd_list; for (p = q->q_prev; p != q; p = next) { DI_FILE_DESC *di_file = (DI_FILE_DESC *) p; DI_IO *di_io = (DI_IO *) di_file->fd_uniq.uniq_di_file; next = p->q_prev; if (di_io != NULL && di_file->fd_state == FD_IN_USE && di_io->io_type == DI_IO_ASCII_ID && pathlength == di_io->io_l_pathname && oldlength == di_io->io_l_filename) { CS_synch_unlock(&htb->htb_fd_list_mutex); CS_synch_lock(&di_file->fd_mutex); /* Make sure it's still the right ** DI_IO and compare the filename */ if ((DI_IO *) di_file->fd_uniq.uniq_di_file == di_io && di_file->fd_state == FD_IN_USE && di_file->fd_unix_fd != -1 && !(di_io->io_open_flags & DI_O_NOT_LRU_MASK) && di_io->io_type == DI_IO_ASCII_ID && pathlength == di_io->io_l_pathname && MEcmp((PTR) di_io->io_pathname, path, pathlength) == 0 && oldlength == di_io->io_l_filename && MEcmp((PTR) di_io->io_filename, oldfilename, oldlength) == 0) { /* have a match, print out stats */ /* try to close it */ CS_synch_unlock(&di_file->fd_mutex); DIlru_close(di_io, &local_err); retry++; } else CS_synch_unlock(&di_file->fd_mutex); CS_synch_lock(&htb->htb_fd_list_mutex); } } CS_synch_unlock(&htb->htb_fd_list_mutex); } } while (retry); if (os_ret == -1) { if ((err_code->errnum == ENOTDIR) || (err_code->errnum == EACCES)) { ret_val = DI_DIRNOTFOUND; } else { ret_val = DI_BADRNAME; } } else CL_CLEAR_ERR( err_code ); return(ret_val); }
/* ** Name: psq_store_text - store query text in a contiguous block of QSF memory ** ** Description: ** Copy contents of a query text chain (prepended, if necessary with RANGE ** statements) into a contiguous block of QSF memory. Caller may specify ** that the text be stored in DB_TEXT_STRING format by setting ** return_db_text_string to TRUE; otherwise the function will return a i4 ** followed by query text. ** ** Input: ** rngtab if non-NULL, range statements will be ** generated for all entries of the range table ** that are active (pss_used && pss_rgno >= 0); ** should be non-NULL only for QUEL queries ** header Pointer to chain header ** mstream Pointer to opened memory stream ** return_db_text_string if TRUE, function will store text in ** DB_TEXT_STRING format; otherwise it will store ** it a a i4 (length) followed by text ** ** Output: ** result query text in specified format ** err_blk Filled in if an error happens ** ** Side efects: ** allocates memory ** ** Returns: ** E_DB_{OK,ERROR} ** ** History: ** 09-jan-93 (andre) ** written ** 29-jul-2001 (toumi01) ** problem found doing i64_aix port: ** (u_char *)'\n' should be (uchar)'\n' ** (u_char *)'\0' should be (uchar)'\0' ** 26-Oct-2009 (coomi01) b122714 ** Move psq_store_text() declarator to pshparse.h and make it public here. ** 24-Jun-2010 (kschendel) b123775 ** Correct a call to trim-whitespace. */ DB_STATUS psq_store_text( PSS_SESBLK *sess_cb, PSS_USRRANGE *rngtab, PTR header, PSF_MSTREAM *mstream, PTR *result, bool return_db_text_string, DB_ERROR *err_blk) { DB_STATUS status; i4 i; PSQ_THEAD *hp = (PSQ_THEAD *) header; i4 size = hp->psq_tsize; PSQ_TEXT *tp; PSS_RNGTAB *rngvar; u_char *out; if (rngtab) { /* ** allocate enough space for range statements. each range statement ** looks like range of 'rngname' is 'tabname'\n. ** Thus, max space is 14+2*DB_MAX_NAME. */ for (i = 0, rngvar = rngtab->pss_rngtab; i < PST_NUMVARS; i++, rngvar++) { /* Only look at range vars that are being used */ if (rngvar->pss_used && rngvar->pss_rgno >= 0) { size += ( 14 /* "range of is \n" */ + psf_trmwhite(DB_TAB_MAXNAME, rngvar->pss_rgname) + psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &rngvar->pss_tabname)); } } } if (return_db_text_string) { DB_TEXT_STRING *str; status = psf_malloc(sess_cb, mstream, size + sizeof(*str) - sizeof(u_char), result, err_blk); if (status != E_DB_OK) return (status); str = (DB_TEXT_STRING *) *result; /* ** store the total length of query text */ str->db_t_count = size; out = str->db_t_text; } else { /* Allocate a piece large enough for all the text + a i4 (count) */ status = psf_malloc(sess_cb, mstream, size + sizeof(i4), result, err_blk); if (status != E_DB_OK) return (status); out = (u_char *) *result; /* Copy the length into the buffer */ MEcopy((char *) &size, sizeof(size), (char *) out); out += sizeof(size); } /* Copy the pieces into the buffer; first put the range statements */ if (rngtab) { for (i = 0, rngvar = rngtab->pss_rngtab; i < PST_NUMVARS; i++, rngvar++) { /* Only look at range vars that are being used */ if (rngvar->pss_used && rngvar->pss_rgno >= 0) { i4 plen; STncpy( (char *)out, "range of ", 9); out += 9; /* add in range name */ plen = psf_trmwhite(DB_TAB_MAXNAME, rngvar->pss_rgname); STncpy( (char *)out, rngvar->pss_rgname, plen); out += plen; STncpy( (char *)out, " is ", 4); out += 4; plen = psf_trmwhite(DB_TAB_MAXNAME, rngvar->pss_tabname.db_tab_name); STncpy( (char *)out, (char *)&rngvar->pss_tabname, plen); out += plen; *out = (u_char)'\n'; out++; *out = (u_char)'\0'; } } } for (tp = hp->psq_first; tp != (PSQ_TEXT *) NULL; tp = tp->psq_next) { MEcopy((char *) tp->psq_tval, tp->psq_psize, (char *) out); out += tp->psq_psize; } return(E_DB_OK); }
DB_STATUS qen_fsmjoin( QEN_NODE *node, QEF_RCB *qef_rcb, QEE_DSH *dsh, i4 function ) { QEF_CB *qef_cb = dsh->dsh_qefcb; DMR_CB *dmrcb; QEN_NODE *out_node = node->node_qen.qen_sjoin.sjn_out; QEN_NODE *in_node = node->node_qen.qen_sjoin.sjn_inner; QEE_XADDRS *node_xaddrs = dsh->dsh_xaddrs[node->qen_num]; QEN_STATUS *qen_status = node_xaddrs->qex_status; ADE_EXCB *ade_excb; ADE_EXCB *jqual_excb = node_xaddrs->qex_jqual; QEN_HOLD *qen_hold; QEN_HOLD *ijFlagsHold = (QEN_HOLD *)NULL; QEN_SHD *qen_shd; QEN_SHD *ijFlagsShd; DB_STATUS status = E_DB_OK; bool reset = FALSE; bool out_reset = FALSE; bool in_reset = FALSE; bool ojoin = (node->node_qen.qen_sjoin.sjn_oj != NULL); bool ljoin = FALSE; bool rjoin = FALSE; bool innerTupleJoined; bool rematerializeInnerTuple = TRUE; /* During full joins, the last driving tuple may left ** join. This 0s all special eqcs from the ** re-scannable stream. The current re-scannable ** tuple will right join. To recover the state of ** its special eqcs, simply re-materialize the inner ** tuple. That's what this variable is for. */ i4 new_to_old; i4 join_result; i4 val1; i4 val2; TIMERSTAT timerstat; bool potential_card_violation = FALSE; #ifdef xDEBUG (VOID) qe2_chk_qp(dsh); #endif if (function != 0) { if (function & FUNC_RESET) { reset = in_reset = out_reset = TRUE; } /* Do open processing, if required. Only if this is the root node ** of the query tree do we continue executing the function. */ if ((function & TOP_OPEN || function & MID_OPEN) && !(qen_status->node_status_flags & QEN1_NODE_OPEN)) { status = (*out_node->qen_func)(out_node, qef_rcb, dsh, MID_OPEN); status = (*in_node->qen_func)(in_node, qef_rcb, dsh, MID_OPEN); qen_status->node_status_flags |= QEN1_NODE_OPEN; if (function & MID_OPEN) return(E_DB_OK); function &= ~TOP_OPEN; } /* Do close processing, if required. */ if (function & FUNC_CLOSE) { if (!(qen_status->node_status_flags & QEN8_NODE_CLOSED)) { /* Ideally we would clean up all of our own shd crap here ** instead of making qee do it... */ status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_CLOSE); status = (*in_node->qen_func)(in_node, qef_rcb, dsh, FUNC_CLOSE); qen_status->node_status_flags = (qen_status->node_status_flags & ~QEN1_NODE_OPEN) | QEN8_NODE_CLOSED; } return(E_DB_OK); } /* End of partition group call just gets passed down. */ if (function & FUNC_EOGROUP) { status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_EOGROUP); status = (*in_node->qen_func)(in_node, qef_rcb, dsh, FUNC_EOGROUP); return(E_DB_OK); } } /* if function */ /* If the trace point qe90 is turned on then gather cpu and dio stats */ if (dsh->dsh_qp_stats) { qen_bcost_begin(dsh, &timerstat, qen_status); } /* Check for cancel, context switch if not MT */ CScancelCheck(dsh->dsh_sid); if (QEF_CHECK_FOR_INTERRUPT(qef_cb, dsh) == E_DB_ERROR) return (E_DB_ERROR); dsh->dsh_error.err_code = E_QE0000_OK; qen_hold = dsh->dsh_hold[node->node_qen.qen_sjoin.sjn_hfile]; qen_shd = dsh->dsh_shd[dsh->dsh_qp_ptr->qp_sort_cnt + node->node_qen.qen_sjoin.sjn_hfile]; if( ojoin && node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile >= 0 ) { ijFlagsHold = dsh->dsh_hold[node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile]; ijFlagsShd = dsh->dsh_shd[dsh->dsh_qp_ptr->qp_sort_cnt + node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile]; } if ( ojoin ) switch(node->node_qen.qen_sjoin.sjn_oj->oj_jntype) { case DB_LEFT_JOIN: ljoin = TRUE; break; case DB_RIGHT_JOIN: rjoin = TRUE; break; case DB_FULL_JOIN: ljoin = TRUE; rjoin = TRUE; break; default: break; } /* If the node is to be reset, dump the hold file and reset the ** inner/outer nodes */ loop_reset: if (reset) { if (qen_status->node_status != QEN0_INITIAL && in_node->qen_type != QE_SORT) { /* reset in memory or dump dmf hold if it has been created */ status = qen_u9_dump_hold(qen_hold, dsh, qen_shd); if(status) goto errexit; qen_hold->hold_medium = HMED_IN_MEMORY; /* set back to mem */ } qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY; if ( qen_status->node_status != QEN0_INITIAL && ijFlagsHold ) { /* dump tid hold file if it has been created */ status = qen_u9_dump_hold( ijFlagsHold, dsh, ijFlagsShd ); if(status) goto errexit; ijFlagsHold->hold_medium = HMED_IN_MEMORY; /* set back to mem */ } qen_status->node_status = QEN0_INITIAL; /* reset = reintialize */ qen_status->node_u.node_join.node_inner_status = QEN0_INITIAL; qen_status->node_u.node_join.node_outer_status = QEN0_INITIAL; qen_status->node_u.node_join.node_outer_count = 0; qen_status->node_access = ( QEN_READ_NEXT_OUTER | QEN_READ_NEXT_INNER | QEN_OUTER_HAS_JOINED ); } if (qen_status->node_status == QEN0_INITIAL) { qen_status->node_u.node_join.node_outer_status = QEN0_INITIAL; /* set num entries in mem_hold in case we build one */ /* this may not be a hard number in future */ /* qen_shd->shd_tup_cnt = 20; */ /* by setting it to -1, the required memory will be configured to */ /* suit the condition. if it is < 20, it will use the dmf hold mem*/ /* ramra01 19-oct-94 */ qen_shd->shd_tup_cnt = -1; if( ijFlagsHold ) { ijFlagsHold->hold_status = HFILE0_NOFILE; /* default */ ijFlagsHold->hold_status2 = 0; /* default */ ijFlagsHold->hold_medium = HMED_IN_MEMORY; /* default */ /* in case we build a hold file ** tell qen_u1_append to calculate its size in memory ** or go to DMF hold */ ijFlagsShd->shd_tup_cnt = -1; } if(rjoin) { /* consistency check */ if( !ijFlagsHold ) { /* rjoin and no hold file for inner join flags */ dsh->dsh_error.err_code = E_QE0002_INTERNAL_ERROR; status = E_DB_ERROR; goto errexit; } } qen_status->node_access = ( QEN_READ_NEXT_OUTER | QEN_READ_NEXT_INNER | QEN_OUTER_HAS_JOINED ); } for (;;) /* The loop */ { status = E_DB_OK; /********************************************************* ** ** LOGIC TO READ FROM THE OUTER TUPLE STREAM ** ** ** *********************************************************/ if( qen_status->node_access & QEN_READ_NEXT_OUTER ) { /* ** If the previous outer tuple did not inner join with ** any inner tuples, then it's an outer join. Return ** it along with nulls for the right side if it passes ** the WHERE clause. */ if ( ljoin && !( qen_status->node_access & QEN_OUTER_HAS_JOINED ) ) { /* ** Set the "outer has joined" flag so that if we emit ** a left join, we won't come back into this conditional ** the next time through this fmsjoin node. */ qen_status->node_access |= QEN_OUTER_HAS_JOINED; /* now execute oj_lnull */ status = qen_execute_cx(dsh, node_xaddrs->qex_lnull); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ /* Execute jqual restriction, if any */ if ( jqual_excb == NULL) break; /* emit a left join */ else { status = qen_execute_cx(dsh, jqual_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb->excb_value == ADE_TRUE) break; /* emit a left join */ } } /* endif previous outer did not join */ qen_status->node_access &= ~( QEN_READ_NEXT_OUTER | QEN_OUTER_HAS_JOINED ); /* get a new outer */ newouter: if ( qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF ) { status = E_DB_WARN; dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS; } else /* this is where we actually read the outer stream! */ { status = (*out_node->qen_func)(out_node, qef_rcb, dsh, (out_reset) ? FUNC_RESET : NO_FUNC); if (status == E_DB_OK) qen_status->node_u.node_join.node_outer_count++; } out_reset = FALSE; /* a little error handling. check for end of outer stream */ if (status != E_DB_OK) { if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS || dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION && node->qen_flags & QEN_PART_SEPARATE) { /* If no outer rows were read and we're doing partition ** grouping, skip the next inner partition to re-sync. */ if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION && qen_status->node_u.node_join.node_outer_count <= 0) if (!rjoin) { if (node->qen_flags & QEN_PART_SEPARATE) { status = (*in_node->qen_func)(in_node, qef_rcb, dsh, FUNC_EOGROUP); if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) qen_status->node_status_flags |= QEN4_IPART_END; /* if just EOPartition, flag it */ } goto errexit; } qen_status->node_access |= QEN_OUTER_HAS_JOINED; qen_status->node_u.node_join.node_outer_status = QEN8_OUTER_EOF; if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) qen_status->node_status_flags |= QEN2_OPART_END; /* if just EOPartition, flag it */ /* ** If ( the inner stream is exhausted and there's nothing ** to rescan ) or we're not right joining, ** then there are no more tuples to return. This should ** be the only way to end this fsmjoin node. */ if ( INNER_STREAM_EXHAUSTED || rjoin == FALSE ) { qen_status->node_status = QEN4_NO_MORE_ROWS; break; /* done */ } else /* we must check some more inner tuples */ { dsh->dsh_error.err_code = 0; /*reset*/ if(qen_status->node_status == QEN0_INITIAL) /* empty */ { qen_status->node_status = QEN1_EXECUTED; qen_hold->hold_status = HFILE0_NOFILE; qen_hold->hold_status2 = 0; qen_hold->hold_medium = HMED_IN_MEMORY; if(in_node->qen_type == QE_SORT) { status = qen_u32_dosort(in_node, qef_rcb, dsh, qen_status, qen_hold, qen_shd, (in_reset) ? FUNC_RESET : NO_FUNC); if(status) goto errexit; } in_reset = FALSE; } /* endif first time through */ } /* endif no more inner tuples to check */ } else if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) { /* No more rows in partitioning group - read from ** next group. */ out_reset = TRUE; goto newouter; } else /* return error from reading outer stream */ { break; } } /* end of error handling for outer stream EOF */ if(qen_status->node_status == QEN0_INITIAL) { qen_status->node_status = QEN1_EXECUTED; /* init done */ qen_hold->hold_status = HFILE0_NOFILE; /* default */ qen_hold->hold_status2 = 0; /* default */ qen_hold->hold_medium = HMED_IN_MEMORY; /* default */ if(in_node->qen_type == QE_SORT) { status = qen_u32_dosort(in_node, qef_rcb, dsh, qen_status, qen_hold, qen_shd, (in_reset) ? FUNC_RESET : NO_FUNC); if(status) goto errexit; in_reset = FALSE; } /* now materialize the first join key */ status = qen_execute_cx(dsh, node_xaddrs->qex_okmat); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ } /* If not the first time */ else { if ( qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF ) { new_to_old = NEW_GT_OLD; } else /* outer not at EOF */ { /* compare the old outer key to the new one. */ new_to_old = ADE_1EQ2; if ((ade_excb = node_xaddrs->qex_kcompare) != NULL) { status = qen_execute_cx(dsh, ade_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ new_to_old = ade_excb->excb_cmp; } /* Materialize the new outer key if the old and the ** new outer keys are not equal */ if (new_to_old != ADE_1EQ2) { status = qen_execute_cx(dsh, node_xaddrs->qex_okmat); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ } else if ((node->qen_flags & QEN_CARD_MASK) == QEN_CARD_01L) { /* Right outer - note cardinality */ potential_card_violation = (new_to_old == ADE_1EQ2); } } /* endif outer not at EOF */ /* ** If there are inner tuples to rescan, decide whether ** to thumb through them again or dump them. */ if ( qen_status->node_access & QEN_RESCAN_MARKED ) { if ( new_to_old == ADE_1EQ2 ) { status = repositionInnerStream( node, dsh ); if(status != E_DB_OK) break; /* to error */ continue; } else /* key has changed */ { if ( rjoin ) { status = repositionInnerStream( node, dsh ); if(status != E_DB_OK) break; /* to error */ qen_status->node_access |= QEN_LOOKING_FOR_RIGHT_JOINS; continue; /* to get a new inner */ } else /* don't have to return right joins */ { status = clearHoldFiles( node, dsh ); if(status != E_DB_OK) break; /* to error */ } } /* endif comparison of new and old keys */ } /* endif there are inner tuples to rescan */ } /* end first or subsequent times */ } /* end if read_outer */ /********************************************************* ** ** LOGIC TO READ FROM THE INNER TUPLE STREAM ** ** *********************************************************/ if( qen_status->node_access & QEN_READ_NEXT_INNER ) { qen_status->node_access &= ~QEN_READ_NEXT_INNER; if ( !INNER_STREAM_EXHAUSTED ) { /* ** If we're rescanning the hold files and will eventually ** have to look for right joins, read from the hold file ** of inner join flags. */ if ( rjoin && ( ijFlagsHold->hold_status2 & HFILE_REPOSITIONED ) ) { if (qen_u40_readInnerJoinFlag( ijFlagsHold, dsh, ijFlagsShd, &innerTupleJoined ) != E_DB_OK) { /* ** If innerJoinFlags is exhausted and we were ** looking for right joins, then we've found ** all the right joins for this key. Dump the ** hold files. */ if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS) { /* Hold file ends, mark this. Continue reading. */ ijFlagsHold->hold_buffer_status = HFILE6_BUF_EMPTY; ijFlagsHold->hold_status = HFILE2_AT_EOF; if ( qen_status->node_access & QEN_LOOKING_FOR_RIGHT_JOINS ) { qen_status->node_access &= ~( QEN_LOOKING_FOR_RIGHT_JOINS | QEN_READ_NEXT_OUTER ); qen_status->node_access |= QEN_READ_NEXT_INNER; status = clearHoldFiles( node, dsh ); if(status != E_DB_OK) break; /* to error */ continue; /* get next inner */ } } else /* other errors are fatal */ { break; } } /* endif innerJoinFlags read wasn't OK */ } /* endif rjoin and rescanning hold files */ /* Read from hold file if it is positioned */ if (qen_hold->hold_status == HFILE3_POSITIONED) { if (qen_u4_read_positioned(qen_hold, dsh, qen_shd) != E_DB_OK) { if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS) { /* Hold file ends, must read from inner node */ qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY; qen_hold->hold_status = HFILE2_AT_EOF; dsh->dsh_error.err_code = 0; qen_status->node_access |= QEN_READ_NEXT_INNER; if (node->qen_flags & QEN_PART_SEPARATE && !(qen_hold->hold_status2 & HFILE_LAST_PARTITION)) { qen_status->node_status_flags |= QEN4_IPART_END; dsh->dsh_error.err_code = E_QE00A5_END_OF_PARTITION; } continue; /* to read a new inner */ } else /* other, presumably fatal error */ { break; } } /* end if hold end */ if(in_node->qen_type == QE_SORT) /* if hold from sort */ { /* Materialize the inner tuple from sort's row buffer into my row buffer. */ status = qen_execute_cx(dsh, node_xaddrs->qex_itmat); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ rematerializeInnerTuple = FALSE; } /* end if hold from sort */ qen_hold->hold_buffer_status = HFILE7_FROM_HOLD; } /* end if positioned */ /* if not EOF on stream */ else if (qen_status->node_u.node_join.node_inner_status != QEN11_INNER_ENDS) { if(qen_hold->unget_status) /* if occupied */ { /* put unget in row buffer */ MEcopy((PTR)qen_hold->unget_buffer, qen_shd->shd_width, (PTR)qen_shd->shd_row); qen_hold->unget_status = 0; /* set no unget */ qen_hold->hold_buffer_status = HFILE8_FROM_INNER; } else /* get new from stream */ { newinner: status = (*in_node->qen_func)(in_node, qef_rcb, dsh, (in_reset) ? FUNC_RESET : NO_FUNC); in_reset = FALSE; if (status != E_DB_OK) { if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS || dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION && (node->qen_flags & QEN_PART_SEPARATE)) { qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY; if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) qen_status->node_status_flags |= QEN4_IPART_END; /* if just EOPartition, flag it */ /* mark EOF on stream */ qen_status->node_u.node_join.node_inner_status = QEN11_INNER_ENDS; if(qen_hold->hold_status == HFILE2_AT_EOF || qen_hold->hold_status == HFILE0_NOFILE || qen_hold->hold_status == HFILE1_EMPTY ) { qen_status->node_u.node_join.node_hold_stream = QEN5_HOLD_STREAM_EOF; } } else if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) { /* No more rows in partitioning group - read ** from next group. */ in_reset = TRUE; goto newinner; } else { break; /* other, fatal error */ } } else /* inner tuple successfully read */ { /* Materialize the inner tuple into row buffer. */ status = qen_execute_cx(dsh, node_xaddrs->qex_itmat); if (status != E_DB_OK) goto errexit; qen_hold->hold_buffer_status = HFILE8_FROM_INNER; rematerializeInnerTuple = FALSE; } } /* end if unget occupied */ } /* end of read from hold/inner */ } /* endif inner stream not exhausted */ } /* end if read_inner */ /*************************************************************** ** ** LOOK FOR RIGHT JOINS ** ** ***************************************************************/ if ( qen_status->node_access & QEN_LOOKING_FOR_RIGHT_JOINS ) { qen_status->node_access &= ~QEN_READ_NEXT_OUTER; qen_status->node_access |= QEN_READ_NEXT_INNER; if ( innerTupleJoined == FALSE ) { status = qen_execute_cx(dsh, node_xaddrs->qex_rnull); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb == NULL) break; /* return right join */ else { status = qen_execute_cx(dsh, jqual_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb->excb_value == ADE_TRUE) break; /* to return right join */ } } /* endif inner tuple joined with some outer */ continue; /* evaluate next inner tuple for right joins */ } /* endif looking for right joins */ /*************************************************************** ** ** COMPARE THE INNER AND OUTER JOIN KEYS ** ** ***************************************************************/ if ( INNER_STREAM_EXHAUSTED || qen_hold->hold_buffer_status == HFILE6_BUF_EMPTY ) { join_result = OUTER_LT_INNER; } else if(qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF) { join_result = OUTER_GT_INNER; } else /* we have an inner and outer. join them on the join key. */ { join_result = ADE_1EQ2; if ((ade_excb = node_xaddrs->qex_joinkey) != NULL) { status = qen_execute_cx(dsh, ade_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ join_result = ade_excb->excb_cmp; } if (join_result == ADE_BOTHNULL) { join_result = OUTER_GT_INNER; } else if (join_result == ADE_1ISNULL) { join_result = OUTER_GT_INNER; } else if (join_result == ADE_2ISNULL) { join_result = OUTER_LT_INNER; } } /* endif we have inner and outer */ /*************************************************************** ** ** OUTER AND INNER KEYS NOW JOINED. PERFORM OTHER ** QUALIFICATIONS NOW. EMIT JOINS WHERE APPROPRIATE. ** ***************************************************************/ if (join_result == OUTER_LT_INNER) { qen_status->node_access |= QEN_READ_NEXT_OUTER; qen_status->node_access &= ~QEN_READ_NEXT_INNER; continue; /* get next outer */ } if ( join_result == OUTER_GT_INNER ) { qen_status->node_access &= ~QEN_READ_NEXT_OUTER; qen_status->node_access |= QEN_READ_NEXT_INNER; if ( rjoin ) { /* rematerialize inner tuple if the ultimate outer tuple ** just left joined. rematerialization will reset the ** special equivalence classes from the inner stream. */ if ( rematerializeInnerTuple == TRUE ) { status = qen_execute_cx(dsh, node_xaddrs->qex_itmat); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ rematerializeInnerTuple = FALSE; } /* execute oj_rnull */ status = qen_execute_cx(dsh, node_xaddrs->qex_rnull); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb == NULL) break; /* return right join */ else { status = qen_execute_cx(dsh, jqual_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb->excb_value == ADE_TRUE) break; /* to return right join */ } } continue; /* get next inner */ } /* endif outer greater than inner */ /* We come to this point when joinkey returns OUTER_EQ_INNER */ if ( join_result != OUTER_EQ_INNER ) { /* consistency check */ dsh->dsh_error.err_code = E_QE0002_INTERNAL_ERROR; status = E_DB_ERROR; goto errexit; } /* end consistency check */ if (qen_hold->hold_buffer_status == HFILE8_FROM_INNER) { /* append to hold */ status = qen_u1_append(qen_hold, qen_shd, dsh); if(status) break; /* to return error */ } /* If this is the first inner that joins with the current ** outer, save the hold file TID so we can reposition it later. */ if ( !( qen_status->node_access & QEN_RESCAN_MARKED ) ) { if ( qen_u5_save_position(qen_hold, qen_shd) ) goto errexit; qen_status->node_access |= QEN_RESCAN_MARKED; } else if ((node->qen_flags & QEN_CARD_MASK) == QEN_CARD_01R && (qen_status->node_access & QEN_OUTER_HAS_JOINED) != 0) { /* Left outer - note cardinality */ potential_card_violation = TRUE; } qen_status->node_access &= ~QEN_READ_NEXT_OUTER; qen_status->node_access |= QEN_READ_NEXT_INNER; /* execute OQUAL */ ade_excb = node_xaddrs->qex_onqual; status = qen_execute_cx(dsh, ade_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (ade_excb == NULL || ade_excb->excb_value == ADE_TRUE) { /* not OJ, or OQUAL succeeds. Remember that a join occurred. */ qen_status->node_access |= QEN_OUTER_HAS_JOINED; if ( rjoin ) { if ( status = qen_u41_storeInnerJoinFlag( ijFlagsHold, ijFlagsShd, dsh, innerTupleJoined, ( i4 ) TRUE ) ) goto errexit; /* error */ } /* set the special eqcs to "inner join" state */ status = qen_execute_cx(dsh, node_xaddrs->qex_eqmat); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ if (jqual_excb != NULL) { status = qen_execute_cx(dsh, jqual_excb); if (status != E_DB_OK) goto errexit; /* if ade error, return error */ } if( jqual_excb == NULL || jqual_excb->excb_value == ADE_TRUE) { /* JQUAL succeeds */ if(node->node_qen.qen_sjoin.sjn_kuniq) /* if kuniq */ { /* make next entry read new outer bit not new inner */ qen_status->node_access |= QEN_READ_NEXT_OUTER; qen_status->node_access &= ~QEN_READ_NEXT_INNER; } /* endif key unique */ if (potential_card_violation) { /* We only want to act on seltype violation after ** qualification and that is now. */ qen_status->node_status = QEN7_FAILED; dsh->dsh_error.err_code = E_QE004C_NOT_ZEROONE_ROWS; status = E_DB_ERROR; goto errexit; } break; /* emit inner join */ } } else /* OQUAL failed */ { if ( rjoin ) { if ( status = qen_u41_storeInnerJoinFlag( ijFlagsHold, ijFlagsShd, dsh, innerTupleJoined, ( i4 ) FALSE ) ) goto errexit; /* error */ } } /* end check of OQUAL status */ /* OQUAL or JQUAL failed. Get next inner. */ continue; } /* end of get loop */ /******************************************************************** ** ** CLEANUP. MATERIALIZE FUNCTION ATTRIBUTES. ERROR EXIT WHEN ** APPROPRIATE. ** ********************************************************************/ if (status == E_DB_OK) { status = qen_execute_cx(dsh, node_xaddrs->qex_fatts); if (status != E_DB_OK) goto errexit; /* Increment the count of rows that this node has returned */ qen_status->node_rcount++; /* print tracing information DO NOT xDEBUG THIS */ if (node->qen_prow != NULL && (ult_check_macro(&qef_cb->qef_trace, 100+node->qen_num, &val1, &val2) || ult_check_macro(&qef_cb->qef_trace, 99, &val1, &val2) ) ) { if (status == E_DB_OK) { status = qen_print_row(node, qef_rcb, dsh); if (status != E_DB_OK) { goto errexit; } } } #ifdef xDEBUG (VOID) qe2_chk_qp(dsh); #endif } else { if(in_node->qen_type == QE_SORT) /* if sort child */ /* release the memory now if in memory */ { qen_u31_release_mem(qen_hold, dsh, dsh->dsh_shd[in_node->node_qen.qen_sort.sort_shd] ); } else if(qen_hold) /* if hold file */ { /* release our hold file, if in memory */ qen_u31_release_mem(qen_hold, dsh, qen_shd ); } } errexit: if ((dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS || dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) && (qen_status->node_status_flags & (QEN2_OPART_END | QEN4_IPART_END))) { /* Restart using next partitioning group. */ out_reset = in_reset = reset = TRUE; qen_status->node_status_flags &= ~(QEN2_OPART_END | QEN4_IPART_END); goto loop_reset; } if (dsh->dsh_qp_stats) { qen_ecost_end(dsh, &timerstat, qen_status); } return (status); }
/*{ ** Name: opn_jmaps - set various join operator tree maps ** ** Description: {@comment_line@}... ** ** Inputs: ** subquery ptr to subquery being analyzed ** nodep ptr to current operator node which ** will have maps initialized ** ojmap NULL if no outer joins exist ** - map of outer joins which are ** evaluated in parent nodes. ** ** Outputs: ** nodep->opn_eqm set of equivalence class available ** from subtree ** nodep->opn_rlmap bitmap of relations in the subtree ** nodep->opn_rlasg order in which the relations in ** opn_rlmap are assigned to the leaves ** Returns: ** TRUE if there is a valid placement of subselect nodes with ** all required equivalence classes available for execution of the ** boolean factor used for the subselect ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 11-jun-86 (seputis) ** initial creation from setjmaps ** 29-mar-90 (seputis) ** fix byte alignment problems ** 2-apr-91 (seputis) ** only partially copied relation assignment causing run_all diffs ** fix for b35461, this would cause the OPF cache of query plans ** to not be effectively used, and could cause performance problems ** on non-VMS systems. This fix should be used whenever a poor ** query plan bug is reported which cannot be reproduced on VMS. ** 15-feb-94 (ed) ** - bug 59598 - correct mechanism in which boolean factors are ** evaluated at several join nodes ** 11-apr-94 (ed) ** - bug 59937 - E_OP0489 consistency check due to inner join ojid ** not being visible in boolean factor placement maps ** 31-jul-97 (inkdo01) ** Fix to force SVAR func atts ref'ed in ON clauses to materialize ** just before needed (to incorporate proper null semantics). ** 9-jul-99 (hayke02 for inkdo01) ** Set opn_eqm bits in lower level nodes (e.g. leaf nodes) when ** func attrs are found in higher level OJ nodes. This change ** fixes bug 92749. ** 20-Mar-02 (wanfr01) ** Bug 106678, INGSRV 1633 ** Confirm function attribute as an ojid before associating it ** with an outer join node. ** 16-sep-03 (hayke02) ** Check for OPN_OJINNERIDX in nodep->opn_jmask to indicate that ** placement of a coverqual inner index outer join needs to be ** checked. ** 28-apr-04 (hayke02) ** Modify previous change so that we return FALSE and reject the QEP ** if the index outer join node (node->LEFT) has a non-zero opn_nchild ** for node->LEFT->RIGHT. This will allow only QEPs where the OJ ** inner is the index only and not the index joined to another ** relation. This change fixes problem INGSRV 2808, bug 112211. ** 05-Oct-2004 (huazh01) ** Remove the above fix. The fix for 111627 handles b106678 ** as well. This fixes b113160, INGSRV2984. ** 14-mar-05 (hayke02) ** Modify the fix for problem INGSRV 2808, bug 112211 so that we now ** check for more than 1 var in the opl_ivmap. This now allows the ** correct rejection of plans that have had their opn_child's set up ** with left joins 'reversed' into right joins. This change fixes ** problems INGSRV 3049 and 3094, bugs 113457 and 113990. ** 23-sep-05 (hayke02) ** Check for a WHERE clause (OPL_NOOOUTER opz_ojid) OJSVAR func att, ** and make sure that all OJs that this func att is inner to are ** executed before the join involving this func att. This change fixes ** bug 114912. ** 30-jan-07 (hayke02) ** Disable the fix for bug 114912 for cart prod (OPL_BOJCARTPROD) OJs. ** This change fixes bug 117513. ** 27-Oct-2009 (kiria01) SIR 121883 ** Scalar sub-selects - protect subp->opv_eqcrequired from bad de-ref. [@history_line@]... */ bool opn_jmaps( OPS_SUBQUERY *subquery, OPN_JTREE *nodep, OPL_BMOJ *ojmap) { OPL_OUTER *outerp; if (nodep->opn_nleaves == 1) { /* leaf node */ OPV_IVARS varno; /* joinop range variable number ** of leaf */ varno = nodep->opn_prb[0]; /* by definition of leaf - only ** one variable in partition */ nodep->opn_rlasg[0] = varno; /* trivial ordering for leaf */ MEfill( sizeof(nodep->opn_rlmap), (u_char)0, (PTR)&nodep->opn_rlmap ); if (subquery->ops_oj.opl_lv > 0) { MEfill( sizeof(nodep->opn_ojinnermap), (u_char)0, (PTR)&nodep->opn_ojinnermap); MEfill( sizeof(nodep->opn_ojevalmap), (u_char)0, (PTR)&nodep->opn_ojevalmap); opl_sjij(subquery, varno, &nodep->opn_ojinnermap, ojmap); } BTset((i4)varno, (char *)&nodep->opn_rlmap); /* only one bit set for leaf*/ MEcopy ((PTR)&subquery->ops_vars.opv_base->opv_rt[varno]->opv_maps.opo_eqcmap, sizeof(nodep->opn_eqm), (PTR)&nodep->opn_eqm ); /* copy map of ** equivalence classes ** associated with varno of this ** leaf */ return(TRUE); } else { /* non-leaf node */ OPN_JTREE *leftchildp; /* ptr to left child node */ OPN_JTREE *rightchildp; /* ptr to right child node */ leftchildp = nodep->opn_child[OPN_LEFT]; rightchildp = nodep->opn_child[OPN_RIGHT]; if (ojmap) { MEfill(sizeof(nodep->opn_ojevalmap), (u_char)0, (PTR)&nodep->opn_ojevalmap); if( (nodep->opn_ojid >= 0) && !BTtest((i4)nodep->opn_ojid, (char *)ojmap)) { /* setup the outer join map which contains all outer joins ** which are completely evaluated within this subtree */ BTset((i4)nodep->opn_ojid, (char *)&nodep->opn_ojevalmap); BTset((i4)nodep->opn_ojid, (char *)ojmap); } } if (!opn_jmaps (subquery, leftchildp, ojmap)) /* get info on left child */ return(FALSE); if (!opn_jmaps (subquery, rightchildp, ojmap)) /* get info on right child */ return(FALSE); MEcopy ((PTR)&leftchildp->opn_rlmap, sizeof(nodep->opn_rlmap), (PTR)&nodep->opn_rlmap ); /* get var bitmap from left ** child */ BTor( (i4)BITS_IN(nodep->opn_rlmap), (char *)&rightchildp->opn_rlmap, (char *)&nodep->opn_rlmap ); /* "OR" rightchildp->opn_rlmap ** into nodep->opn_rlmap */ if (ojmap) { /* setup ojinnermap which contains all outer joins which are ** partially or totally evaluated within this subtree, but ** not at this node unless it is in the subtree */ MEcopy((PTR)&leftchildp->opn_ojinnermap, sizeof(leftchildp->opn_ojinnermap), (PTR)&nodep->opn_ojinnermap); BTor((i4)BITS_IN(rightchildp->opn_ojinnermap), (char *)&rightchildp->opn_ojinnermap, (char *)&nodep->opn_ojinnermap); if (leftchildp->opn_ojid >= 0) BTset((i4)leftchildp->opn_ojid, (char *)&nodep->opn_ojinnermap); if (rightchildp->opn_ojid >= 0) BTset((i4)rightchildp->opn_ojid, (char *)&nodep->opn_ojinnermap); if (subquery->ops_mask & OPS_IJCHECK) opl_ijcheck(subquery, &leftchildp->opn_ojinnermap, &rightchildp->opn_ojinnermap, &nodep->opn_ojinnermap, &leftchildp->opn_ojevalmap, &rightchildp->opn_ojevalmap, &nodep->opn_rlmap); /* map of outerjoins which are entirely evaluated within ** this subtree */ BTor((i4)BITS_IN(leftchildp->opn_ojevalmap), (char *)&leftchildp->opn_ojevalmap, (char *)&nodep->opn_ojevalmap); BTor((i4)BITS_IN(rightchildp->opn_ojevalmap), (char *)&rightchildp->opn_ojevalmap, (char *)&nodep->opn_ojevalmap); if ((nodep->opn_jmask & OPN_OJINNERIDX) && ((nodep->opn_ojid != nodep->opn_child[OPN_LEFT]->opn_ojid) || ((nodep->opn_ojid == nodep->opn_child[OPN_LEFT]->opn_ojid) && (nodep->opn_ojid >= 0) && (BTcount((char *)subquery->ops_oj.opl_base->opl_ojt [nodep->opn_ojid]->opl_ivmap, subquery->ops_vars.opv_rv) > 1)))) return(FALSE); } MEcopy ((PTR)leftchildp->opn_rlasg, leftchildp->opn_nleaves * sizeof(nodep->opn_rlasg[0]), (PTR)nodep->opn_rlasg); /* get relations from left ** child */ MEcopy ((PTR)rightchildp->opn_rlasg, rightchildp->opn_nleaves * sizeof(nodep->opn_rlasg[0]), (PTR)(nodep->opn_rlasg + leftchildp->opn_nleaves)); /* get ** relations from right child ** and place them beside the ** ones from the left child */ MEcopy ((PTR)&leftchildp->opn_eqm, sizeof(nodep->opn_eqm), (PTR)&nodep->opn_eqm ); /* copy equivalence class map ** from left child to this node */ BTor( (i4)BITS_IN(nodep->opn_eqm), (char *)&rightchildp->opn_eqm, (char *)&nodep->opn_eqm ); /* "OR" right child equivalence ** map into this to produce ** map for this node */ if (subquery->ops_joinop.opj_virtual) { /* there is a subselect in this query so check for availability ** of equivalence classes used for evaluation of boolean factors ** containing the subselect */ OPV_SUBSELECT *subp; /* ptr to subselect descriptor ** for range variable */ if ( (rightchildp->opn_nleaves == 1) && ( subp = subquery->ops_vars.opv_base-> opv_rt[rightchildp->opn_prb[0]]->opv_subselect ) ) { /* right child is a subselect so test for correct leaf ** placement */ if (!subp->opv_eqcrequired || !BTsubset( (char *)subp->opv_eqcrequired, (char *)&nodep->opn_eqm, (i4)BITS_IN(OPE_BMEQCLS) ) ) return (FALSE); /* cannot evaluate all boolean ** factors with SEJOIN nodes ** in this configuration */ else { /* check if all corelated variables are available in the ** outer - FIXME create another pointer field which ** contains this information so this loop is avoided */ for (;subp; subp = subp->opv_nsubselect) { if (!BTsubset( (char *)&subp->opv_eqcmp, (char *)&leftchildp->opn_eqm, (i4)BITS_IN(OPE_BMEQCLS) ) ) return (FALSE); /* not all correlated ** equivalence classes ** are available from the outer ** so return */ } } } if ( (leftchildp->opn_nleaves == 1) && (subp = subquery->ops_vars.opv_base-> opv_rt[leftchildp->opn_prb[0]]->opv_subselect) ) { if (!subp->opv_eqcrequired || !BTsubset( (char *)subp->opv_eqcrequired, (char *)&nodep->opn_eqm, (i4)BITS_IN(OPE_BMEQCLS) ) ) return (FALSE); /* cannot evaluate all boolean ** factors with SEJOIN nodes ** in this configuration */ else { /* check if all corelated variables are available in the ** outer - FIXME create another pointer field which ** contains this information so this loop is avoided */ for (;subp; subp = subp->opv_nsubselect) { if (!BTsubset( (char *)&subp->opv_eqcmp, (char *)&rightchildp->opn_eqm, (i4)BITS_IN(OPE_BMEQCLS) ) ) return (FALSE); /* not all correlated ** equivalence classes ** are available from the outer ** so return */ } } } } if ((subquery->ops_oj.opl_lv > 0) && (nodep->opn_ojid != OPL_NOOUTER)) { OPV_IVARS maxvar; /* this is an outer join function attribute which should ** only appear at the point that the outer join is ** actually performed */ maxvar = subquery->ops_vars.opv_rv; outerp = subquery->ops_oj.opl_base->opl_ojt[nodep->opn_ojid]; if ((outerp->opl_type == OPL_LEFTJOIN) || (outerp->opl_type == OPL_FULLJOIN)) { OPV_BMVARS tempvmap; OPV_IVARS innervar; MEcopy((PTR)outerp->opl_maxojmap, sizeof(tempvmap), (PTR)&tempvmap); BTand((i4)BITS_IN(tempvmap), (char *)&nodep->opn_rlmap, (char *)&tempvmap); for (innervar = -1; (innervar = BTnext((i4)innervar, (char *)&tempvmap, (i4)maxvar))>=0;) { OPE_IEQCLS ojeqcls; ojeqcls = subquery->ops_vars.opv_base->opv_rt [innervar]->opv_ojeqc; if (ojeqcls != OPE_NOEQCLS) BTset((i4)ojeqcls, (char *)&nodep->opn_eqm); } } } { /* determine multi-variable functions that can first be calculated ** at this node */ OPZ_IFATTS fattr; /* current function attribute ** being analyzed */ OPZ_IFATTS maxfattr; /* maximum number of function ** attributes defined */ OPZ_FT *fbase; /* ptr to base of array of ptrs ** to function attribute ** elements */ OPZ_AT *abase; /* ptr to base of array of ptrs ** to joinop attribute elements */ maxfattr = subquery->ops_funcs.opz_fv; /* number of function ** attributes defined */ fbase = subquery->ops_funcs.opz_fbase; /* ptr to base of array ** of function attributes */ abase = subquery->ops_attrs.opz_base; /* ptr to base of array ** of ptrs to joinop attribute ** elements */ for (fattr = 0; fattr < maxfattr ; fattr++) { OPZ_FATTS *fattrp; /* ptr to current function ** attribute being analyzed */ OPE_IEQCLS eqcls; /* equivalence class of the ** multi-variable function ** attribute */ fattrp = fbase->opz_fatts[fattr]; eqcls = abase->opz_attnums[fattrp->opz_attno]->opz_equcls; /* ** equivalence class associated ** with the multi-variable ** function attribute */ /* check for WHERE clause (OPL_NOOUTER opz_ojid) OJSVAR func att ** and then make sure that all OJs that this func att is inner ** to are executed before the join involving this func att */ if (fattrp->opz_type == OPZ_SVAR && (fattrp->opz_mask & OPZ_OJSVAR) && nodep->opn_ojid != OPL_NOOUTER && fattrp->opz_ojid == OPL_NOOUTER && (fattrp->opz_ijmap && BTtest((i4)nodep->opn_ojid, (char *)fattrp->opz_ijmap) && !(outerp->opl_mask & OPL_BOJCARTPROD))) { OPZ_IATTS attno; bool allinrlmap = TRUE; OPE_EQCLIST *eqclsp; eqclsp = subquery->ops_eclass.ope_base->ope_eqclist[eqcls]; for (attno = -1; (attno = BTnext((i4)attno, (PTR)&eqclsp->ope_attrmap, (i4)subquery->ops_attrs.opz_av)) != -1; ) { if (!BTtest((i4)abase->opz_attnums[attno]->opz_varnm, (char *)&nodep->opn_rlmap)) { allinrlmap = FALSE; break; } } if (allinrlmap) return(FALSE); } if ((fattrp->opz_type != OPZ_MVAR) && !(fattrp->opz_type == OPZ_SVAR && fattrp->opz_mask & OPZ_OJSVAR && fattrp->opz_ojid == nodep->opn_ojid) || (fattrp->opz_mask & OPZ_OJFA)) continue; /* only multi-variable ** functions are assigned here ** (and SVARs in ON clauses ** of OJs eval'd at this node) ** since others where assigned ** earlier, ... also outer join ** special eqc were assigned ** prior to this loop */ if (fattrp->opz_type == OPZ_SVAR) { /* Must be ON clause ref'ed SVAR. Set bit in whichever ** child node covers the SVAR eqcmap. */ if (BTsubset((char *)&fattrp->opz_eqcm, (char *)&nodep->opn_child[1]->opn_eqm, (i4)BITS_IN(nodep->opn_eqm))) BTset((i4)eqcls, (char *)&nodep->opn_child[1]->opn_eqm); else if (BTsubset((char *)&fattrp->opz_eqcm, (char *)&nodep->opn_child[0]->opn_eqm, (i4)BITS_IN(nodep->opn_eqm))) BTset((i4)eqcls, (char *)&nodep->opn_child[0]->opn_eqm); /* set fattr eqcls bit in ** proper child opn_eqm */ continue; } if (BTtest( (i4)eqcls, (char *)&nodep->opn_eqm)) continue; /* if function attribute has ** been added then ** continue */ if (BTsubset((char *)&fattrp->opz_eqcm, (char *)&nodep->opn_eqm, (i4)BITS_IN(nodep->opn_eqm)) ) BTset((i4)eqcls, (char *)&nodep->opn_eqm); /* set bit if all ** the required equivalence ** classes are available ** for the function attribute */ } } } return(TRUE); }
static void crsr_cols ( GCD_CCB *ccb, GCD_SCB *scb, GCD_RCB **rcb_ptr, u_i2 row, bool more_segments ) { IIAPI_DESCRIPTOR *desc = (IIAPI_DESCRIPTOR *)scb->column.desc; IIAPI_DATAVALUE *data = &((IIAPI_DATAVALUE *)scb->column.data)[ row * scb->column.max_cols ]; u_i2 end = scb->column.cur_col + scb->column.col_cnt; u_i2 col, len; for( col = scb->column.cur_col; col < end; col++ ) { if ( desc[ col ].ds_nullable && data[ col ].dv_null ) { gcd_put_i1( rcb_ptr, 0 ); /* No data - NULL value */ continue; } /* ** Write the data indicator byte, for BLOBs this is ** only done on the first segment (more_segments is ** saved below once the segment has been processed, ** so the saved value is FALSE on the first segment). */ if ( (desc[ col ].ds_dataType != IIAPI_LVCH_TYPE && desc[ col ].ds_dataType != IIAPI_LNVCH_TYPE && desc[ col ].ds_dataType != IIAPI_LBYTE_TYPE) || ! scb->column.more_segments ) gcd_put_i1( rcb_ptr, 1 ); switch( desc[ col ].ds_dataType ) { case IIAPI_INT_TYPE : switch( desc[ col ].ds_length ) { case 1 : gcd_put_i1p(rcb_ptr, (u_i1 *)data[col].dv_value); break; case 2 : gcd_put_i2p(rcb_ptr, (u_i1 *)data[col].dv_value); break; case 4 : gcd_put_i4p(rcb_ptr, (u_i1 *)data[col].dv_value); break; case 8 : gcd_put_i8p(rcb_ptr, (u_i1 *)data[col].dv_value); break; } break; case IIAPI_FLT_TYPE : switch( desc[ col ].ds_length ) { case 4 : gcd_put_f4p(rcb_ptr, (u_i1 *)data[col].dv_value); break; case 8 : gcd_put_f8p(rcb_ptr, (u_i1 *)data[col].dv_value); break; } break; case IIAPI_MNY_TYPE : { IIAPI_DESCRIPTOR idesc, ddesc; IIAPI_DATAVALUE idata, ddata; STATUS status; char dbuff[ 130 ]; /* varchar(128) */ char dec[ 8 ]; /* ** It would be nice to convert directly to ** varchar, but money formatting is nasty. ** So we first convert to decimal, then to ** varchar. */ idesc.ds_dataType = IIAPI_DEC_TYPE; idesc.ds_nullable = FALSE; idesc.ds_length = sizeof( dec ); idesc.ds_precision = 15; idesc.ds_scale = 2; idata.dv_null = FALSE; idata.dv_length = sizeof( dec ); idata.dv_value = (PTR)&dec; ddesc.ds_dataType = IIAPI_VCH_TYPE; ddesc.ds_nullable = FALSE; ddesc.ds_length = sizeof( dbuff ); ddesc.ds_precision = 0; ddesc.ds_scale = 0; ddata.dv_null = FALSE; ddata.dv_length = sizeof( dbuff ); ddata.dv_value = dbuff; if ( (status = gcd_api_format( ccb, &desc[ col ], &data[ col ], &idesc, &idata )) != OK || (status = gcd_api_format( ccb, &idesc, &idata, &ddesc, &ddata )) != OK ) { /* ** Conversion error. Send a zero-length ** string as error indication and log error. */ gcd_put_i2( rcb_ptr, 0 ); gcu_erlog( 0, GCD_global.language, status, NULL, 0, NULL ); } else { MEcopy( (PTR)dbuff, 2, (PTR)&len ); gcd_put_i2( rcb_ptr, len ); gcd_put_bytes( rcb_ptr, len, (u_i1 *)&dbuff[2] ); } } break; case IIAPI_BOOL_TYPE : gcd_put_i1p( rcb_ptr, (u_i1 *)data[ col ].dv_value ); break; case IIAPI_DEC_TYPE : /* These types are all sent in text format */ case IIAPI_DTE_TYPE : case IIAPI_DATE_TYPE : case IIAPI_TIME_TYPE : case IIAPI_TMWO_TYPE : case IIAPI_TMTZ_TYPE : case IIAPI_TS_TYPE : case IIAPI_TSWO_TYPE : case IIAPI_TSTZ_TYPE : case IIAPI_INTYM_TYPE : case IIAPI_INTDS_TYPE : { IIAPI_DESCRIPTOR ddesc; IIAPI_DATAVALUE ddata; STATUS status; char dbuff[ 130 ]; /* varchar(128) */ ddesc.ds_dataType = IIAPI_VCH_TYPE; ddesc.ds_nullable = FALSE; ddesc.ds_length = sizeof( dbuff ); ddesc.ds_precision = 0; ddesc.ds_scale = 0; ddata.dv_null = FALSE; ddata.dv_length = sizeof( dbuff ); ddata.dv_value = dbuff; status = gcd_api_format( ccb, &desc[ col ], &data[ col ], &ddesc, &ddata ); if ( status != OK ) { /* ** Conversion error. Send a zero-length ** string as error indication and log error. */ gcd_put_i2( rcb_ptr, 0 ); gcu_erlog( 0, GCD_global.language, status, NULL, 0, NULL ); } else { MEcopy( (PTR)dbuff, 2, (PTR)&len ); gcd_put_i2( rcb_ptr, len ); gcd_put_bytes( rcb_ptr, len, (u_i1 *)&dbuff[2] ); } } break; case IIAPI_CHA_TYPE : case IIAPI_CHR_TYPE : case IIAPI_BYTE_TYPE : gcd_put_bytes( rcb_ptr, desc[ col ].ds_length, (u_i1 *)data[ col ].dv_value ); break; case IIAPI_NCHA_TYPE : gcd_put_ucs2( rcb_ptr, desc[ col ].ds_length / sizeof( UCS2 ), (u_i1 *)data[ col ].dv_value ); break; case IIAPI_TXT_TYPE : case IIAPI_LTXT_TYPE : case IIAPI_VCH_TYPE : case IIAPI_VBYTE_TYPE : MEcopy( data[ col ].dv_value, 2, (PTR)&len ); gcd_put_i2( rcb_ptr, len ); gcd_put_bytes( rcb_ptr, len, (u_i1 *)data[ col ].dv_value + 2 ); break; case IIAPI_NVCH_TYPE : MEcopy( data[ col ].dv_value, 2, (PTR)&len ); gcd_put_i2( rcb_ptr, len ); gcd_put_ucs2( rcb_ptr, len, (u_i1 *)data[ col ].dv_value + 2 ); break; case IIAPI_LCLOC_TYPE : case IIAPI_LNLOC_TYPE : case IIAPI_LBLOC_TYPE : gcd_put_i4p( rcb_ptr, (u_i1 *)data[ col ].dv_value ); break; case IIAPI_LVCH_TYPE : case IIAPI_LNVCH_TYPE : case IIAPI_LBYTE_TYPE : { u_i1 *ptr; u_i2 seg_len, chrs, char_size = sizeof( char ); bool ucs2 = FALSE; if ( desc[ col ].ds_dataType == IIAPI_LNVCH_TYPE ) { ucs2 = TRUE; char_size = sizeof( UCS2 ); } if ( data[ col ].dv_length < 2 ) seg_len = 0; else { MEcopy( data[ col ].dv_value, 2, (PTR)&seg_len ); ptr = (u_i1 *)data[ col ].dv_value + 2; seg_len *= char_size; /* convert array len to byte len */ } /* ** Output data as long as there is sufficient ** data to fill the current message. Any data ** remaining is saved until additional data is ** received or the end of the BLOB is reached. ** ** We actually make sure that room for the ** segment (length indicator and data) and an ** end-of-segments indicator is left in the ** message buffer. This way the end-of-BLOB ** processing below does not need to worry ** about splitting the message. ** ** The test against the save buffer size is ** redundent since the buffer should be at ** least as big as a message buffer, but we ** make the test just to be safe. */ while( (seg_len + scb->seg_len + 4) > RCB_AVAIL(*rcb_ptr) || (seg_len + scb->seg_len) > scb->seg_max ) { /* ** Can a valid data segment be placed in the buffer? */ if ( RCB_AVAIL(*rcb_ptr) >= (2 + char_size) && (seg_len + scb->seg_len) >= char_size ) { len = min( seg_len + scb->seg_len, RCB_AVAIL( *rcb_ptr ) - 2 ); chrs = len / char_size; len = chrs * char_size; gcd_put_i2( rcb_ptr, chrs ); if ( GCD_global.gcd_trace_level >= 5 ) TRdisplay( "%4d GCD send segment: %d (%d,%d)\n", ccb->id, len, scb->seg_len, seg_len ); /* ** First, send saved data. */ if ( scb->seg_len >= char_size ) { len = min( scb->seg_len, RCB_AVAIL( *rcb_ptr ) ); chrs = len / char_size; len = chrs * char_size; if ( ! ucs2 ) gcd_put_bytes( rcb_ptr, len, scb->seg_buff ); else gcd_put_ucs2( rcb_ptr, chrs, scb->seg_buff ); scb->seg_len -= len; if ( scb->seg_len ) MEcopy( (PTR)(scb->seg_buff + len), scb->seg_len, (PTR)scb->seg_buff ); } /* ** Now send data from current segment. */ if ( seg_len >= char_size && RCB_AVAIL( *rcb_ptr ) >= char_size ) { len = min( seg_len, RCB_AVAIL( *rcb_ptr ) ); chrs = len / char_size; len = chrs * char_size; if ( ! ucs2 ) gcd_put_bytes( rcb_ptr, len, ptr ); else gcd_put_ucs2( rcb_ptr, chrs, ptr ); ptr += len; seg_len -= len; } } gcd_msg_end( rcb_ptr, FALSE ); gcd_msg_begin( ccb, rcb_ptr, MSG_DATA, 0 ); } /* ** Save any data left in the current segment. ** The preceding loop makes sure there is room ** in the buffer for the remainder of the ** current segment. */ if ( seg_len ) { if ( ! scb->seg_buff ) { scb->seg_buff = (u_i1 *)MEreqmem( 0, scb->seg_max, FALSE, NULL ); if ( ! scb->seg_buff ) { if ( GCD_global.gcd_trace_level >= 1 ) TRdisplay( "%4d GCD alloc fail seg: %d\n", ccb->id, scb->seg_max ); gcu_erlog( 0, GCD_global.language, E_GC4808_NO_MEMORY, NULL, 0, NULL ); gcd_sess_abort( ccb, E_GC4808_NO_MEMORY ); return; } } if ( GCD_global.gcd_trace_level >= 5 ) TRdisplay( "%4d GCD save segment: %d (total %d) \n", ccb->id, seg_len, scb->seg_len + seg_len ); MEcopy( (PTR)ptr, seg_len, (PTR)(scb->seg_buff + scb->seg_len) ); scb->seg_len += seg_len; } /* ** When the BLOB is complete, write the last ** segment and end-of-segments indicator. */ if ( ! (scb->column.more_segments = more_segments) ) { /* ** The processing loop above makes sure there ** is room for the last segment. */ if ( scb->seg_len ) { if ( GCD_global.gcd_trace_level >= 5 ) TRdisplay( "%4d GCD send segment: %d \n", ccb->id, scb->seg_len ); chrs = scb->seg_len / char_size; len = chrs * char_size; scb->seg_len = 0; gcd_put_i2( rcb_ptr, chrs ); if ( ! ucs2 ) gcd_put_bytes( rcb_ptr, len, scb->seg_buff ); else gcd_put_ucs2( rcb_ptr, chrs, scb->seg_buff ); } if ( GCD_global.gcd_trace_level >= 5 ) TRdisplay( "%4d GCD send end-of-segments\n", ccb->id ); gcd_put_i2( rcb_ptr, 0 ); } } break; default : /* Should not happen since checked in send_desc() */ if ( GCD_global.gcd_trace_level >= 1 ) TRdisplay( "%4d GCD invalid datatype: %d\n", ccb->id, desc[ col ].ds_dataType ); gcd_sess_abort( ccb, E_GC4812_UNSUPP_SQL_TYPE ); return; } } return; }
/*{ ** Name: psy_dpermit - Define a permit. ** ** INTERNAL PSF call format: status = psy_dpermit(&psy_cb, sess_cb); ** ** EXTERNAL call format: status = psy_call(PSY_DPERMIT, &psy_cb, sess_cb); ** ** Description: ** Given all of the parameters necessary to CREATE/DEFINE a permit on a ** table or view, this function will store the permission in the system ** catalogs. This will include storing the query tree in the tree table, ** storing the text of the query in the iiqrytext table (really done by ** QEF), storing a row in the protect table, and issuing an "alter table" ** operation to DMF to indicate that there are permissions on the given ** table. ** ** Inputs: ** psy_cb ** .psy_qrytext Id of query text as stored in QSF. ** .psy_cols[] Array of columns on which to grant ** permission ** .psy_numcols Number of columns listed above; 0 means ** give permission on all columns ** .psy_intree QSF id of query tree representing the ** where clause in the permit ** .psy_opctl Bit map of defined operations ** .psy_opmap Bit map of permitted operations ** .psy_user Name of user who will get permission ** .psy_terminal Terminal at which permission is given ** (blank if none specified) ** .psy_timbgn Time of day at which the permission ** begins (minutes since 00:00) ** .psy_timend Time of day at which the permission ends ** (minutes since 00:00) ** .psy_daybgn Day of week at which the permission ** begins (0 = Sunday) ** .psy_dayend Day of week at which the permission ends ** (0 = Sunday) ** .psy_grant ** PSY_CPERM CREATE/DEFINE PERMIT ** .psy_tblq head of table queue ** .psy_colq head of column queue ** .psy_usrq head of user queue ** .psy_qlen length of first iiqrytext ** .psy_flags useful info ** PSY_EXCLUDE_COLUMNS user specified a list of columns to ** which privilege should not apply ** sess_cb Pointer to session control block ** (Can be NULL) ** ** Outputs: ** psy_cb ** .psy_txtid Id of query text as stored in the ** iiqrytext system relation. ** .psy_error Filled in if error happens ** Returns: ** E_DB_OK Function completed normally. ** E_DB_WARN Function completed with warning(s); ** E_DB_ERROR Function failed; non-catastrophic error ** E_DB_FATAL Function failed; catastrophic error ** Exceptions: ** none ** ** Side Effects: ** Stores text of query in iiqrytext relation, query tree in tree ** relation, row in protect relation identifying the permit. Does ** an alter table DMF operation to indicate that there are permissions ** on the table. ** ** History: ** 02-oct-85 (jeff) ** written ** 03-sep-86 (seputis) ** changed some psy_cb. to psy_cb-> ** added .db_att_id reference ** changed rdr_cb. rdr_cb-> ** 02-dec-86 (daved) ** bug fixing. check for permit on tables owned by user and not ** view. ** 29-apr-87 (stec) ** Implemented changes for GRANT statement. ** 10-may-88 (stec) ** Make changes for db procs. ** 03-oct-88 (andre) ** Modified call to pst_rgent to pass 0 as a query mode since it is ** clearly not PSQ_DESTROY ** 06-feb-89 (ralph) ** Added support for 300 attributes: ** Use DB_COL_BITS in place of DB_MAX_COLS ** Loop over domset array using DB_COL_WORDS ** 06-mar-89 (ralph) ** GRANT Enhancements, Phase 1: ** Initialize new DB_PROTECTION fields, dbp_seq and dbp_gtype ** 03-apr-89 (ralph) ** GRANT Enhancements, Phase 2: ** Use DBGR_USER when initializing dbp_gtype ** 08-may-89 (ralph) ** Initialize reserved field to blanks (was \0) ** 04-jun-89 (ralph) ** Initialize dbp_fill1 to zero ** Fix unix portability problems ** 02-nov-89 (neil) ** Alerters: Allowed privileges for events. ** 1-mar-90 (andre) ** If processing a GRANT on tables, check if ** ALL-TO-ALL or RETRIEVE-TO-ALL has already been granted, and if so, ** mark psy_mask appropriately. ** If user tried to CREATE ALL/RETRIEVE-TO-ALL, and one already exists, ** skip to the exit. ** 12-mar-90 (andre) ** set rdr_2types_mask to 0. ** 22-may-90 (teg) ** init rdr_instr to RDF_NO_INSTR ** 08-aug-90 (ralph) ** Initialize new fields in iiprotect tuple ** 14-dec-90 (ralph) ** Disallow use of GRANT by non-DBA if xORANGE ** 11-jan-90 (ralph) ** Allow user "$ingres" to use GRANT if xORANGE. ** This was done for CREATEDB (UPGRADEFE). ** 20-feb-91 (andre) ** For CREATE/DEFINE PERMIT, grantee type was stored in ** psy_cb->psy_gtype. ** 24-jun-91 (andre) ** IIPROTECT tuples for table permits will contain exactly one ** privilege. IIQRYTEXT template built for table-wide privileges ** contains a placeholder for a privilege name which will be filled in ** with each of the table-wide privileges being granted, one at a time. ** PSY_CB.psy_opmap will be set to correspond with privilege name ** stored in the IIQRYTEXT permit. ** 16-jul-91 (andre) ** responsibility for splitting permit tuples will passed on to ** qeu_cprot(). If a permit specified only one privilege, we will ** substitute the appropriate privilege name here and will not ask ** qeu_cprot() to split tuples. ** 06-aug-91 (andre) ** before proceeding to CREATE a permit on a view owned by the current ** user, we will call psy_tbl_grant_check() to ensure that this user ** may create a permit on his view. If the object is not owned by the ** current user, we will not try to verify that the user may ** CREATE/DEFINE a permit since (until the relevant FE changes are ** made) we intend to continue allowing any user with CATUPD to ** CREATE/DEFINE permits on catalogs and the dba will be allowed to ** CREATE/DEFINE permits on extended catalogs ** 11-nov-91 (rblumer) ** merged from 6.4: 26-feb-91 (andre) ** PST_QTREE was changed to store the range table as an array of ** pointers to PST_RNGENTRY structure. ** 14-feb-92 (andre) ** we will no longer have to fill in privilege name for permits ** specifying one privilege - it will be handled in respective ** grammars. ** 15-jun-92 (barbara) ** For Sybil, change interface to pst_rgent(), Star returns from ** psy_dpermit before permits get stored. ** 07-jul-92 (andre) ** DB_PROTECTION tuple will contain an indicator of how the permit was ** created, i.e. whether it was created using SQL or QUEL and if the ** former, then whether it was created using GRANT statement. Having ** this information will facilitate merging similar and identical ** permit tuples. ** 14-jul-92 (andre) ** semantics of GRANT ALL [PRIVILEGES] is different from that of ** CREATE PERMIT ALL in that the former (as dictated by SQL92) means ** "grant all privileges which the current auth id posesses WGO" ** whereas the latter (as is presently interpreted) means "grant all ** privileges that can be defined on the object" which in case of ** tables and views means SELECT, INSERT, DELETE, UPDATE. ** psy_tbl_grant_check() (function responsible for determining whether ** a user may grant specified privilege on a specified table or view) ** will have to be notified whether we are processing GRANT ALL. Its ** behaviour will change as follows: ** - if processing GRANT ALL and psy_tbl_grant_check() determines ** that the user does not possess some (but not all) of the ** privileges passed to it by the caller it will not treat it as an ** error, but will instead inform the caller of privileges that the ** user does not posess, ** - if processing GRANT ALL and psy_tbl_grant_check() determines ** that the user does not possess any of the privileges passed to ** it by the caller it will treat it as an error ** - if processing a statement other than GRANT ALL and ** psy_tbl_grant_check() determines that the user does not possess ** some of the privileges passed to it by the caller it will treat ** it as an error ** 16-jul-92 (andre) ** if a permit being created depends on some privileges, build a ** structure describing these privileges and store its address in ** rdf_cb->rdr_indep. ** 18-jul-92 (andre) ** we will no longer be telling QEF to turn off DMT_ALL_PROT or ** DMT_RETRIEVE_PRO when a user creates ALL/RETRIEVE TO ALL permit. ** QEF will figure out on its own whether PUBLIC now has RETRIEVE or ** ALL on a table/view ** 20-jul-92 (andre) ** if user specified a list of columns to which privilege(s) should ** not apply, set dbp_domset correctly ** 03-aug-92 (barbara) ** Invalidate base table infoblk from RDF cache for CREATE PERMIT ** and CREATE SEC_ALARM. ** 16-sep-92 (andre) ** privilege maps are build using bitwise ops, so care should be ** exercised when accessing it using BT*() functions ** 17-jun-93 (andre) ** changed interface of psy_secaudit() to accept PSS_SESBLK ** 5-jul-93 (robf) ** changed interface of psy_secaudit() to accept security label ** 7-jan-94 (swm) ** Bug #58635 ** Added PTR cast for qsf_owner which has changed type to PTR. ** 06-mar-96 (nanpr01) ** Move the QSF request block initialization up. because if ** pst_rgnent returns a failure status code, subsequent QSF ** calls get bad control block error. */ DB_STATUS psy_dpermit( PSY_CB *psy_cb, PSS_SESBLK *sess_cb) { RDF_CB rdf_cb; register RDR_RB *rdf_rb = &rdf_cb.rdf_rb; QSF_RCB qsf_rb; DB_STATUS status; DB_STATUS stat; DB_PROTECTION ptuple; register DB_PROTECTION *protup = &ptuple; i4 *domset = ptuple.dbp_domset; register i4 i, j; i4 err_code; PSS_RNGTAB *rngvar; PSS_USRRANGE *rngtab; PST_PROCEDURE *pnode; PST_QTREE *qtree; DB_ERROR *err_blk = &psy_cb->psy_error; i4 textlen; i4 tree_lock = 0; i4 text_lock = 0; DB_TAB_ID tabids[PST_NUMVARS]; PSQ_INDEP_OBJECTS indep_objs; PSQ_OBJPRIV obj_priv; /* space for independent DELETE */ PSQ_COLPRIV col_privs[2]; /* ** space for independent INSERT and ** UPDATE */ PST_VRMAP varmap; PSY_TBL *psy_tbl; DB_TIME_ID timeid; /* ** For CREATE/DEFINE PERMIT execute code below. */ /* initialize the QSF control block */ qsf_rb.qsf_type = QSFRB_CB; qsf_rb.qsf_ascii_id = QSFRB_ASCII_ID; qsf_rb.qsf_length = sizeof(qsf_rb); qsf_rb.qsf_owner = (PTR)DB_PSF_ID; qsf_rb.qsf_sid = sess_cb->pss_sessid; rngtab = &sess_cb->pss_auxrng; /* table info is stored in the only entry in the table queue */ psy_tbl = (PSY_TBL *) psy_cb->psy_tblq.q_next; status = pst_rgent(sess_cb, rngtab, -1, "", PST_SHWID, (DB_TAB_NAME *) NULL, (DB_TAB_OWN *) NULL, &psy_tbl->psy_tabid, TRUE, &rngvar, (i4) 0, err_blk); if (DB_FAILURE_MACRO(status)) goto exit; /* In STAR, we do not actually store permits */ if (sess_cb->pss_distrib & DB_3_DDB_SESS) { qsf_rb.qsf_lk_state = QSO_EXLOCK; goto exit; } /* Fill in the RDF request block */ pst_rdfcb_init(&rdf_cb, sess_cb); /* The table which is receiving the permit */ STRUCT_ASSIGN_MACRO(psy_tbl->psy_tabid, rdf_rb->rdr_tabid); /* Tell RDF we're doing a permit definition */ rdf_rb->rdr_update_op = RDR_APPEND; rdf_rb->rdr_types_mask = RDR_PROTECT; rdf_rb->rdr_qrytuple = (PTR) protup; /* initialize independent object structure */ indep_objs.psq_objs = (PSQ_OBJ *) NULL; indep_objs.psq_objprivs = (PSQ_OBJPRIV *) NULL; indep_objs.psq_colprivs = (PSQ_COLPRIV *) NULL; indep_objs.psq_grantee = &sess_cb->pss_user; rdf_rb->rdr_indep = (PTR) &indep_objs; /* ** populate the IIPROTECT tuple */ /* Zero out the template */ (VOID)MEfill(sizeof(ptuple), (u_char) 0, (PTR) protup); /* store grantee type */ protup->dbp_gtype = psy_cb->psy_gtype; /* Init reserved block */ (VOID)MEfill(sizeof(protup->dbp_reserve), (u_char) ' ', (PTR) protup->dbp_reserve); /* Init obj name */ STRUCT_ASSIGN_MACRO(psy_tbl->psy_tabnm, protup->dbp_obname); /*@FIX_ME@ Where does this come from? */ protup->dbp_obstat = ' '; /* store the object type indicator */ if (psy_tbl->psy_mask & PSY_OBJ_IS_TABLE) { protup->dbp_obtype = DBOB_TABLE; } else if (psy_tbl->psy_mask & PSY_OBJ_IS_VIEW) { protup->dbp_obtype = DBOB_VIEW; } else { protup->dbp_obtype = DBOB_INDEX; } STRUCT_ASSIGN_MACRO(psy_tbl->psy_owner, protup->dbp_obown); STRUCT_ASSIGN_MACRO(sess_cb->pss_user, protup->dbp_grantor); TMnow((SYSTIME *)&timeid); protup->dbp_timestamp.db_tim_high_time = timeid.db_tim_high_time; protup->dbp_timestamp.db_tim_low_time = timeid.db_tim_low_time; /* The table on which we're giving permission */ STRUCT_ASSIGN_MACRO(psy_tbl->psy_tabid, protup->dbp_tabid); /* Beginning and ending times of day */ protup->dbp_pdbgn = psy_cb->psy_timbgn; protup->dbp_pdend = psy_cb->psy_timend; /* Beginning and ending days of week */ protup->dbp_pwbgn = psy_cb->psy_daybgn; protup->dbp_pwend = psy_cb->psy_dayend; if (psy_cb->psy_numcols != 0 && ~psy_cb->psy_flags & PSY_EXCLUDE_COLUMNS) { /* user specified a list of columns to which privilege(s) will apply */ /* Bit map of permitted columns */ psy_fill_attmap(domset, ((i4) 0)); for (i = 0; i < psy_cb->psy_numcols; i++) { BTset((i4)psy_cb->psy_cols[i].db_att_id, (char *) domset); } } else { /* ** user specified table-wide privilege(s) or a list of columns L s.t. ** privilege(s) will apply to the entire table except for columns in L */ psy_fill_attmap(domset, ~((i4) 0)); if (psy_cb->psy_flags & PSY_EXCLUDE_COLUMNS) { /* ** exclude specified columns from the list of columns to which ** privilege(s) will apply */ for (i = 0; i < psy_cb->psy_numcols; i++) { BTclear((i4) psy_cb->psy_cols[i].db_att_id, (char *) domset); } } } if (rngvar->pss_tabdesc->tbl_status_mask & DMT_VIEW) { /* ** if view is owned by the current user, psy_tbl_grant_check() will ** determine if the permit can, indeed, be created; as long as we are ** preserving the kludge that allows users with CATUPD create permits on ** catalogs and DBAs to create permits on extended catalogs, we shall ** not call psy_tbl_grant_check() on view not owned by the current user, ** since it is likely to result in psy_tbl_grant_check() complaining ** about inadequate permissions */ if (!MEcmp((PTR) &rngvar->pss_ownname, (PTR) &sess_cb->pss_user, sizeof(sess_cb->pss_user))) { i4 tbl_wide_privs; PSY_COL_PRIVS col_specific_privs, *csp, indep_col_specific_privs; DB_TAB_ID indep_id; i4 indep_tbl_wide_privs; bool insuf_privs, quel_view; i4 val1, val2; u_i4 u; /* ** build maps of table-wide and column-specific privileges for ** psy_tbl_grant_check() ** if a column list was specified with CREATE PERMIT and ** privileges specified in the statement include a set of ** privileges S s.t. for all P in S, P can only be specified as ** table-wide with GRANT statement (currently this includes ** SELECT, INSERT, DELETE), we will make ** psy_tbl_grant_check() think that privileges in S are ** table-wide. ** This will work correctly since if the view was defined over ** some objects owned by other user(s), for every P in S we ** would need table-wide privilege WGO on the underlying object. ** ** For the purposes of providing more descriptive output for ** trace point ps131, if column-list was specified, we will pass ** the map of attributes even if column-specific UPDATE was not ** specified */ if (psy_cb->psy_numcols != 0 && (psy_cb->psy_opmap & DB_REPLACE || ult_check_macro(&sess_cb->pss_trace, PSS_TBL_VIEW_GRANT_TRACE, &val1, &val2) ) ) { i4 *ip; csp = &col_specific_privs; /* ** column-specific UPDATE privilege will not be translated into ** a table-wide privilege since GRANT allows for specification ** of column-specific UPDATE privilege */ csp->psy_col_privs = psy_cb->psy_opmap & DB_REPLACE; tbl_wide_privs = psy_cb->psy_opmap & ~DB_REPLACE; /* ** if creating a permit on a set of columns and UPDATE is not ** one of the privileges named in the statement, store the ** attribute map in the first element of the attribute map list */ ip = (csp->psy_col_privs) ? csp->psy_attmap[PSY_UPDATE_ATTRMAP].map : csp->psy_attmap->map; /* copy the attribute map */ for (u = 0; u < DB_COL_WORDS; u++, ip++) { *ip = domset[u]; } } else { tbl_wide_privs = psy_cb->psy_opmap; csp = (PSY_COL_PRIVS *) NULL; } status = psy_tbl_grant_check(sess_cb, (i4) PSQ_PROT, &rngvar->pss_tabid, &tbl_wide_privs, csp, &indep_id, &indep_tbl_wide_privs, &indep_col_specific_privs, psy_cb->psy_flags, &insuf_privs, &quel_view, &psy_cb->psy_error); if (DB_FAILURE_MACRO(status)) { goto exit; } if (insuf_privs) { /* must audit failure to create a permit */ if ( Psf_srvblk->psf_capabilities & PSF_C_C2SECURE ) { DB_ERROR e_error; /* Must audit CREATE PERMIT failure. */ status = psy_secaudit(FALSE, sess_cb, (char *)&rngvar->pss_tabdesc->tbl_name, &rngvar->pss_tabdesc->tbl_owner, sizeof(DB_TAB_NAME), SXF_E_TABLE, I_SX2016_PROT_TAB_CREATE, SXF_A_FAIL | SXF_A_CREATE, &e_error); status = (status > E_DB_ERROR) ? status : E_DB_ERROR; } goto exit; } else if (quel_view) { goto exit; } /* ** If user is trying to grant one or more of ** INSERT/DELETE/UPDATE on his/her view whose underlying table ** or view is owned by another user, psy_tbl_grant_check() will ** return id of the underlying object along with map of ** privileges. We will convert maps of independent privileges ** into elements of independent privilege list and pass them ** along to QEF */ if ( indep_id.db_tab_base != (i4) 0 && ( indep_id.db_tab_base != rngvar->pss_tabid.db_tab_base || indep_id.db_tab_index != rngvar->pss_tabid.db_tab_index ) ) { if (indep_tbl_wide_privs & DB_DELETE) { /* ** the only expected independent table-wide privilege ** is DELETE */ obj_priv.psq_next = (PSQ_OBJPRIV *) NULL; obj_priv.psq_objtype = PSQ_OBJTYPE_IS_TABLE; obj_priv.psq_privmap = (i4) DB_DELETE; obj_priv.psq_objid.db_tab_base = indep_id.db_tab_base; obj_priv.psq_objid.db_tab_index = indep_id.db_tab_index; indep_objs.psq_objprivs = &obj_priv; } if (indep_col_specific_privs.psy_col_privs) { i4 i; u_i4 u; PSQ_COLPRIV *csp; i4 *att_map, *p; i4 priv_map = 0; /* ** privilege map is built using bitwise operators, but ** here using BTnext() makes code much more palatable, ** so convert a privilege map */ if (indep_col_specific_privs.psy_col_privs & DB_APPEND) BTset(DB_APPP, (char *) &priv_map); if (indep_col_specific_privs.psy_col_privs & DB_REPLACE) BTset(DB_REPP, (char *) &priv_map); for (i = -1, csp = col_privs; (i = BTnext(i, (char *) &priv_map, BITS_IN(priv_map))) != -1; csp++ ) { csp->psq_next = indep_objs.psq_colprivs; indep_objs.psq_colprivs = csp; csp->psq_objtype = PSQ_OBJTYPE_IS_TABLE; csp->psq_tabid.db_tab_base = indep_id.db_tab_base; csp->psq_tabid.db_tab_index = indep_id.db_tab_index; switch (i) { case DB_APPP: /* INSERT privilege */ { csp->psq_privmap = (i4) DB_APPEND; att_map = indep_col_specific_privs. psy_attmap[PSY_INSERT_ATTRMAP].map; break; } case DB_REPP: { csp->psq_privmap = (i4) DB_REPLACE; att_map = indep_col_specific_privs. psy_attmap[PSY_UPDATE_ATTRMAP].map; break; } } for (p = csp->psq_attrmap, u = 0; u < DB_COL_WORDS; u++) { *p++ = *att_map++; } } } } } else { /* ** either this is a catalog and the user has CATUPD or ** this is an extended catalog and the user is the DBA; ** since we may be allowing a user to create a permit by ** circumventing the permit system, we only need to ascertain that ** this is an SQL view */ i4 issql = 0; status = psy_sqlview(rngvar, sess_cb, err_blk, &issql); if (status) { goto exit; } if (!issql) { /* can only have permits on SQL views */ psf_error(3598L, 0L, PSF_USERERR, &err_code, err_blk, 1, psf_trmwhite(sizeof(rngvar->pss_tabname), (char *) &rngvar->pss_tabname), &rngvar->pss_tabname); status = E_DB_ERROR; goto exit; } } } /* Name of user getting permission */ STRUCT_ASSIGN_MACRO(psy_cb->psy_user, protup->dbp_owner); /* Terminal at which permission given */ STRUCT_ASSIGN_MACRO(psy_cb->psy_terminal, protup->dbp_term); /* Give RDF pointer to query tree, if any */ if (!psy_cb->psy_istree) { rdf_rb->rdr_qry_root_node = (PTR) NULL; } else { PST_VRMAP varset; i4 j; STRUCT_ASSIGN_MACRO(psy_cb->psy_intree, qsf_rb.qsf_obj_id); qsf_rb.qsf_lk_state = QSO_EXLOCK; status = qsf_call(QSO_LOCK, &qsf_rb); if (DB_FAILURE_MACRO(status)) { (VOID) psf_error(E_PS0D19_QSF_INFO, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, err_blk, 0); goto exit; } tree_lock = qsf_rb.qsf_lk_id; pnode = (PST_PROCEDURE *) qsf_rb.qsf_root; qtree = (PST_QTREE *) pnode->pst_stmts->pst_specific.pst_tree; rdf_rb->rdr_qry_root_node = (PTR) pnode; /* check for no views in the qualification. */ (VOID)psy_varset(qtree->pst_qtree, &varset); j = BTnext(-1, (char *) &varset, BITS_IN(varset)); for ( ; j >= 0; j = BTnext(j, (char *) &varset, BITS_IN(varset))) { status = pst_rgent(sess_cb, rngtab, -1, "", PST_SHWID, (DB_TAB_NAME *) NULL, (DB_TAB_OWN *) NULL, &qtree->pst_rangetab[j]->pst_rngvar, TRUE, &rngvar, (i4) 0, err_blk); if (status) goto exit; if (rngvar->pss_tabdesc->tbl_status_mask & DMT_VIEW) { psf_error(3597L, 0L, PSF_USERERR, &err_code, err_blk, 1, psf_trmwhite(sizeof(rngvar->pss_tabname), (char *) &rngvar->pss_tabname), &rngvar->pss_tabname); status = E_DB_ERROR; goto exit; } } } /* Give RDF a pointer to the query text to be stored in iiqrytext */ STRUCT_ASSIGN_MACRO(psy_cb->psy_qrytext, qsf_rb.qsf_obj_id); qsf_rb.qsf_lk_state = QSO_EXLOCK; status = qsf_call(QSO_LOCK, &qsf_rb); if (DB_FAILURE_MACRO(status)) { (VOID) psf_error(E_PS0D19_QSF_INFO, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, err_blk, 0); goto exit; } text_lock = qsf_rb.qsf_lk_id; MEcopy((char *) qsf_rb.qsf_root, sizeof(i4), (char *) &textlen); rdf_rb->rdr_l_querytext = textlen; rdf_rb->rdr_querytext = ((char *) qsf_rb.qsf_root) + sizeof(i4); rdf_rb->rdr_status = (sess_cb->pss_lang == DB_SQL) ? DB_SQL : 0; /* determine if the permit specifies exactly one privilege */ if (BTcount((char *) &psy_cb->psy_opmap, BITS_IN(psy_cb->psy_opmap)) > 1) { /* ** if permit specified more than one privilege, notify QEF that it will ** have to split the permit into multiple IIPROTECT tuples */ rdf_rb->rdr_instr |= RDF_SPLIT_PERM; } else if (psy_cb->psy_opmap & DB_RETRIEVE) { /* ** if qeu_cprot() will not be splitting the permit into multiple tuples ** and RETRIEVE is the privilege mentioned in it, set the two bits ** associated with DB_RETRIEVE */ psy_cb->psy_opmap |= DB_TEST | DB_AGGREGATE; psy_cb->psy_opctl |= DB_TEST | DB_AGGREGATE; } /* Null out the DMU control block pointer, just in case */ rdf_rb->rdr_dmu_cb = (PTR) NULL; /* produce list of dependent tables */ rdf_rb->rdr_cnt_base_id = 0; if (psy_cb->psy_istree && qtree->pst_qtree) { j = 0; (VOID)psy_varset(qtree->pst_qtree, &varmap); for (i = -1; (i = BTnext(i, (char*) &varmap, PST_NUMVARS)) > -1;) { /* if this is the table that is getting the permit, ignore */ if (qtree->pst_rangetab[i]->pst_rngvar.db_tab_base != psy_tbl->psy_tabid.db_tab_base || qtree->pst_rangetab[i]->pst_rngvar.db_tab_index != psy_tbl->psy_tabid.db_tab_index ) { rdf_rb->rdr_cnt_base_id++; STRUCT_ASSIGN_MACRO(qtree->pst_rangetab[i]->pst_rngvar, tabids[j++]); } } rdf_rb->rdr_base_id = tabids; } protup->dbp_popctl = psy_cb->psy_opctl; protup->dbp_popset = psy_cb->psy_opmap; /* ** store an indication of whether this permit is being created using SQL or ** QUEL */ protup->dbp_flags = (sess_cb->pss_lang == DB_SQL) ? DBP_SQL_PERM : (i2) 0; protup->dbp_flags |= DBP_65_PLUS_PERM; /* Now let RDF do all the work of the permit definition */ status = rdf_call(RDF_UPDATE, (PTR) &rdf_cb); if (DB_FAILURE_MACRO(status)) { if (rdf_cb.rdf_error.err_code == E_RD0002_UNKNOWN_TBL) { (VOID) psf_error(E_PS0903_TAB_NOTFOUND, 0L, PSF_USERERR, &err_code, err_blk, 1, psf_trmwhite(sizeof(psy_tbl->psy_tabnm), (char *) &psy_tbl->psy_tabnm), &psy_tbl->psy_tabnm); } else { (VOID) psf_rdf_error(RDF_UPDATE, &rdf_cb.rdf_error, &psy_cb->psy_error); } goto exit; } /* ** Invalidate base object's infoblk from RDF cache. */ pst_rdfcb_init(&rdf_cb, sess_cb); STRUCT_ASSIGN_MACRO(psy_cb->psy_tables[0], rdf_rb->rdr_tabid); status = rdf_call(RDF_INVALIDATE, (PTR) &rdf_cb); if (DB_FAILURE_MACRO(status)) { (VOID) psf_rdf_error(RDF_INVALIDATE, &rdf_cb.rdf_error, &psy_cb->psy_error); } exit: qsf_rb.qsf_lk_state = QSO_EXLOCK; if (psy_cb->psy_istree) { /* Destroy query tree */ STRUCT_ASSIGN_MACRO(psy_cb->psy_intree, qsf_rb.qsf_obj_id); if ((qsf_rb.qsf_lk_id = tree_lock) == 0) { stat = qsf_call(QSO_LOCK, &qsf_rb); if (DB_FAILURE_MACRO(stat)) { (VOID) psf_error(E_PS0D18_QSF_LOCK, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, &psy_cb->psy_error, 0); if (!status || stat == E_DB_FATAL) status = stat; } tree_lock = qsf_rb.qsf_lk_id; } stat = qsf_call(QSO_DESTROY, &qsf_rb); if (DB_FAILURE_MACRO(stat)) { (VOID) psf_error(E_PS0D1A_QSF_DESTROY, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, &psy_cb->psy_error, 0); if (!status || stat == E_DB_FATAL) status = stat; } tree_lock = 0; } /* Destroy query text */ STRUCT_ASSIGN_MACRO(psy_cb->psy_qrytext, qsf_rb.qsf_obj_id); if ((qsf_rb.qsf_lk_id = text_lock) == 0) { stat = qsf_call(QSO_LOCK, &qsf_rb); if (DB_FAILURE_MACRO(stat)) { (VOID) psf_error(E_PS0D18_QSF_LOCK, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, &psy_cb->psy_error, 0); if (!status || stat == E_DB_FATAL) status = stat; } text_lock = qsf_rb.qsf_lk_id; } stat = qsf_call(QSO_DESTROY, &qsf_rb); if (DB_FAILURE_MACRO(stat)) { (VOID) psf_error(E_PS0D1A_QSF_DESTROY, qsf_rb.qsf_error.err_code, PSF_INTERR, &err_code, &psy_cb->psy_error, 0); if (!status || stat == E_DB_FATAL) status = stat; } return (status); }
DB_STATUS dm1ch_compress( DMP_ROWACCESS *rac, char *rec, i4 rec_size, char *crec, i4 *crec_size) { u_char *pIn; u_char *pOut; i4 iInSize; i4 iOutSize; BIT_FILE BitFile; i2 Character; u_i2 StringCode; u_i2 index; i4 rc; DICTCONTROL DictCtrl; DICTIONARY Dict[TABLE_SIZE]; i4 i; i2 att_ln, str_ln; i2 type; i2 abs_type; i4 dt_bits; DB_ATTS *att; DB_ATTS **attpp; ADF_CB adf_scb; MEfill(sizeof(ADF_CB),0,(PTR)&adf_scb); adf_scb.adf_maxstring = DB_MAXSTRING; /* preen the input record of garbage */ pIn = (u_char *)rec; attpp = rac->att_ptrs; i = rac->att_count; while (--i >= 0) { att = *attpp++; att_ln = att->length; type = att->type; if (att->ver_dropped > att->ver_added) continue; /* ** Obtain info about the data type. ** It's used to check for coupons then later for variable len */ abs_type = (type < 0) ? -type: type; adi_dtinfo(&adf_scb, abs_type, &dt_bits); if ((type < 0) && !(dt_bits & AD_PERIPHERAL)) { /* nullable */ if (pIn[--att_ln] & ADF_NVL_BIT) { /* it is null so zap the value */ MEfill(att_ln, 0x00, pIn); pIn += att->length; continue; } type = (-type); } /* ** either not a nullable datatype or ** nullable but not null right now */ switch (type) { /* ** currently only interested in variable length ** attributes ; it is these which potentially have ** garbage past their end - all others are either complete ** or blank padded already */ case DB_VCH_TYPE: case DB_TXT_TYPE: case DB_VBYTE_TYPE: /* get length and adjust */ I2ASSIGN_MACRO(((DB_TEXT_STRING *)pIn)->db_t_count, str_ln); str_ln += DB_CNTSIZE; if (str_ln < att_ln) MEfill((att_ln - str_ln), 0x00, (pIn + str_ln)); break; case DB_NVCHR_TYPE: /* get length and adjust */ I2ASSIGN_MACRO(((DB_TEXT_STRING *)pIn)->db_t_count, str_ln); str_ln = str_ln * sizeof(UCS2) + DB_CNTSIZE; if (str_ln < att_ln) MEfill((att_ln - str_ln), 0x00, (pIn + str_ln)); break; default: break; } /* point to next attribute */ pIn += att->length; } /* now compress the preened record */ pIn = (u_char*)rec; pOut = (u_char*)crec; iInSize = rec_size; /* write compression indicator */ *pOut++ = (char)TRUE; dm1ch_InitDictionary(&Dict[0], &DictCtrl); BitFile.pDataPtr = pOut; BitFile.cbDataLen = (u_i2)(rec_size); BitFile.cResidual = 0; BitFile.Residual = 0L; StringCode = pIn[0]; pIn++; iInSize--; while (iInSize > 0) { Character = pIn[0]; pIn++; iInSize--; index = dm1ch_FindChildNode(&Dict[0], StringCode, Character); if (Dict[index].CodeValue != UNUSED) { StringCode = Dict[index].CodeValue; } else { Dict[index].CodeValue = DictCtrl.NextCode++; Dict[index].ParentCode = StringCode; Dict[index].Character = (char) Character; rc = dm1ch_OutputBits(&BitFile, (u_i4) StringCode, DictCtrl.CurrentCodeBits); if (rc == -1) break; /* Compression didn't compress */ StringCode = Character; if (DictCtrl.NextCode > (u_i2) MAX_CODE) { rc = dm1ch_OutputBits(&BitFile, (u_i4) FLUSH_CODE, DictCtrl.CurrentCodeBits); if (rc == -1) break; /* Compression didn't compress */ dm1ch_InitDictionary(&Dict[0], &DictCtrl); } /* Dictionary has filled up */ else if (DictCtrl.NextCode > DictCtrl.NextBumpCode) { rc = dm1ch_OutputBits(&BitFile, (u_i4) BUMP_CODE, DictCtrl.CurrentCodeBits); if (rc == -1) break; /* Compression didn't compress */ DictCtrl.CurrentCodeBits++; DictCtrl.NextBumpCode <<= 1; DictCtrl.NextBumpCode |= 1; } /* Increase the number of code bits */ } } if (rc == 0) rc = dm1ch_OutputBits(&BitFile, (u_i4) StringCode, DictCtrl.CurrentCodeBits); if (rc == 0) rc = dm1ch_OutputBits(&BitFile, (u_i4) END_OF_STREAM, DictCtrl.CurrentCodeBits); if (rc == 0) rc = dm1ch_CloseBitFile(&BitFile); if (rc == 0) { iOutSize = rec_size - BitFile.cbDataLen + 1; } else /* compression failed to reduce the size, so use original */ { /* write compression indicator */ pOut = (u_char*)crec; *pOut++ = (char)FALSE; /* write original data */ MEcopy(rec, rec_size, pOut); iOutSize = rec_size + 1; } *crec_size = iOutSize; /* Return the size of compressed data */ return(E_DB_OK); } /* dm1ch_compress */
/*{ ** Name: opa_checkopt - check for possibility of optimization ** ** Description: ** This routine will create variable map of what the outer aggregate ** would appear like if all possible substitutions of the inner aggregate ** bylist attributes were made. The map of the all the inner aggregate ** bylist elements used in the substitution is also created. Thus, if ** no substitutions are made then this map would be empty. ** ** Inputs: ** global global state variable ** root root of query tree which will ** be analyzed for possible substitutions ** - this root is a subtree of the outer ** aggregate. ** bylist base of bylist which will be ** used for substitutions ** ** Outputs: ** usedmap ptr to map of all variables found ** in the "hit list" i.e. map of variables ** which will be replaced if the ** substitution were actually made ** newmap ptr to varmap of root, filled in if ** the substitutions were actually made ** Returns: ** varmap of root if the substitutions were actually made ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 15-apr-86 (seputis) ** initial creation ** 4-dec-02 (inkdo01) ** Return variable changed to a call-by-ref parm to support ** range table expansion. [@history_line@]... */ static VOID opa_checkopt( OPS_STATE *global, PST_QNODE *bylist, PST_QNODE *root, OPV_GBMVARS *usedmap, OPV_GBMVARS *newmap) { PST_QNODE *node; /* ptr to current by list element being ** substituted */ MEfill(sizeof(*newmap), 0, (char *)newmap); /* init the bit map */ if ( root ) { for(node = bylist; /* get first element of by list */ node && node->pst_sym.pst_type != PST_TREE; /* at the end of ** the bylist ? */ node = node->pst_left) /* get next bydom attribute */ { if ( opv_ctrees( global, root, node->pst_right ) ) { opv_mapvar( root, usedmap); /* update map of global range ** variables used in subtree */ return; /* substitution is made so no vars will ** be contributed to the map of the tree ** after substitution i.e. return 0 */ } } /* try the subtrees if none of the bylist elements matched */ { OPV_GBMVARS tempmap; /* used to create bit map for var node*/ if (root->pst_sym.pst_type == PST_VAR) { OPV_GRV *gvarp; /* global range var associated ** with this node */ BTset( (i4)root->pst_sym.pst_value.pst_s_var.pst_vno, (char *)newmap); gvarp = global->ops_rangetab.opv_base-> opv_grv[root->pst_sym.pst_value.pst_s_var.pst_vno]; if (gvarp->opv_gsubselect && gvarp->opv_gsubselect->opv_subquery) { MEcopy((char *)&gvarp->opv_gsubselect->opv_subquery-> ops_correlated, sizeof(tempmap), (char *)&tempmap); BTor(OPV_MAXVAR, (char *)&gvarp->opv_gsubselect->opv_subquery-> ops_fcorelated, (char *)&tempmap); if (BTcount((char *)&tempmap, OPV_MAXVAR)) { /* if a correlated subquery is referenced then the ** correlated variables cannot be substituted ** - FIXME need to find all correlated vars, and see if ** enough attributes exist to supply correlated values ** this can be done by running opa_subselect in OPAFINAL.C ** prior to this optimization ** ... also need to make sure the correlated subquery ** is not used in another context as determined by ** opa_compat ** - for now do not substitute correlated variables */ BTor( (i4)BITS_IN(OPV_GBMVARS), (char *) &gvarp->opv_gsubselect->opv_subquery->ops_correlated, (char *)newmap); BTor( (i4)BITS_IN(OPV_GBMVARS), (char *) &gvarp->opv_gsubselect->opv_subquery->ops_fcorelated, (char *)newmap); } } return; /* return map with bit set for this var ** since a substition will not be made, so ** the var will appear in the optimized tree ** if it is created */ } else { opa_checkopt( global, bylist, root->pst_left, usedmap, newmap); MEcopy((char *)newmap, sizeof(tempmap), (char *)&tempmap); BTand(OPV_MAXVAR, (char *)usedmap, (char *)&tempmap); if (BTcount((char *)&tempmap, OPV_MAXVAR)) /* if the maps have an intersection then abort the search ** since there will not be a commit made */ return; /* traverse the right side of the tree */ opa_checkopt( global, bylist, root->pst_right,usedmap, &tempmap); BTor(OPV_MAXVAR, (char *)&tempmap, (char *)newmap); return; } } } MEfill(sizeof(*newmap), 0, (char *)newmap); /* return empty map */ return; }
DB_STATUS adu_like_all( ADF_CB *adf_scb, DB_DATA_VALUE *src_dv, DB_DATA_VALUE *pat_dv, DB_DATA_VALUE *esc_dv, u_i4 pat_flags, i4 *rcmp) { DB_STATUS db_stat = E_DB_OK; AD_PAT_SEA_CTX _sea_ctx; AD_PAT_SEA_CTX *sea_ctx = &_sea_ctx; AD_PATDATA _patdata; AD_PATDATA *patdata; AD_PAT_DA_CTX da_ctx; DB_DATA_VALUE dv_tmp1; DB_DATA_VALUE dv_tmp2; DB_DATA_VALUE *s1 = src_dv, *p1 = pat_dv; i4 rcmp1 = 0; i4 long_seen = 0; DB_STATUS db_stat1 = E_DB_OK; char tmp[2000]; i2 saved_uninorm_flag = adf_scb->adf_uninorm_flag; static struct { ADU_PATCOMP_FUNC *compile; ADU_PATEXEC_FUNC *execute; } rtns[] = { {adu_patcomp_like, adu_pat_execute}, {adu_patcomp_like, adu_pat_execute_col}, {adu_patcomp_like_uni, adu_pat_execute_uni}, }; enum rtns_idx { LIKE, LIKE_COLLATION, LIKE_UNICODE } form = LIKE; if (pat_dv->db_datatype == DB_PAT_TYPE) { patdata = (AD_PATDATA*)pat_dv->db_data; if (ME_ALIGN_MACRO(patdata, sizeof(i2)) != (PTR)patdata) { if ((i2)sizeof(_patdata) >= pat_dv->db_length) patdata = &_patdata; else { patdata = (AD_PATDATA*)MEreqmem(0, pat_dv->db_length, FALSE, &db_stat); if (!patdata || db_stat) return db_stat; } MEcopy(pat_dv->db_data, pat_dv->db_length, patdata); } pat_flags = patdata->patdata.flags| (patdata->patdata.flags2<<16); /* Pre-compiled pattern */ { i4 i; /* ->patdata is passed in preset with a valid [PATDATA_LENGTH] - ** save it now as we will clear the lot */ MEfill(sizeof(*sea_ctx), 0, (PTR)sea_ctx); sea_ctx->patdata = patdata; /*sea_ctx->buffer = NULL;*/ /*sea_ctx->bufend = NULL;*/ /*sea_ctx->buftrueend = NULL;*/ /*sea_ctx->seg_offset = 0;*/ /*sea_ctx->buflen = 0;*/ /*sea_ctx->at_bof = FALSE;*/ /*sea_ctx->at_eof = FALSE;*/ /*sea_ctx->trace = FALSE;*/ /*sea_ctx->force_fail = FALSE;*/ /*sea_ctx->cmplx_lim_exc = FALSE;*/ /*sea_ctx->stalled = NULL;*/ /*sea_ctx->pending = NULL;*/ /*sea_ctx->free = NULL;*/ /*sea_ctx->setbuf = NULL;*/ #if PAT_DBG_TRACE>0 /*sea_ctx->nid = 0;*/ /*sea_ctx->infile = NULL;*/ /*sea_ctx->outfile = NULL;*/ #endif /*sea_ctx->nctxs_extra = 0;*/ sea_ctx->nctxs = DEF_THD; for (i = DEF_THD-1; i >= 0; i--) { sea_ctx->ctxs[i].next = sea_ctx->free; sea_ctx->free = &sea_ctx->ctxs[i]; } } db_stat = adu_patcomp_set_pats(sea_ctx); /* If prior patcomp flagged force_fail - obey it */ if (patdata->patdata.flags2 & AD_PAT2_FORCE_FAIL) sea_ctx->force_fail = TRUE; } else { patdata = &_patdata; /* Tell compiler size we are prepared for */ patdata->patdata.length = sizeof(_patdata)/sizeof(_patdata.vec[0]); sea_ctx->patdata = patdata; /* ** To allow for default processing we have the user specified ** case flags to deal with. If AD_PAT_WITH_CASE or AD_PAT_WO_CASE ** is set then we use that setting to override any collation ** case request. If neither are set we obey the collation. */ if (pat_flags & AD_PAT_WITH_CASE) { pat_flags &= ~AD_PAT_WO_CASE; } else if (!(pat_flags & AD_PAT_WO_CASE) && (src_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL || pat_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL)) { pat_flags |= AD_PAT_WO_CASE; } /* ** From this point on, the AD_PAT_WITH_CASE flag is ignored as ** its state has been folded into the AD_PAT_WO_CASE flag. */ } if (ADU_pat_legacy == -1) pat_flags &= ~AD_PAT_WO_CASE; else if (ADU_pat_legacy == -2) pat_flags |= AD_PAT_WO_CASE; dv_tmp1.db_data = NULL; dv_tmp2.db_data = NULL; switch (abs(src_dv->db_datatype)) { case DB_LNVCHR_TYPE: case DB_LNLOC_TYPE: long_seen = 1; case DB_NVCHR_TYPE: case DB_NCHR_TYPE: case DB_UTF8_TYPE: case DB_NQTXT_TYPE: form = LIKE_UNICODE; /* All handled directly by DA */ break; case DB_LVCH_TYPE: case DB_LCLOC_TYPE: if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) form = LIKE_UNICODE; case DB_LBYTE_TYPE: case DB_LBLOC_TYPE: long_seen = 1; case DB_BYTE_TYPE: case DB_VBYTE_TYPE: /* All handled directly by DA */ break; case DB_CHR_TYPE: pat_flags |= AD_PAT_BIGNORE; /* Ignore blanks */ /*FALLTHROUGH*/ case DB_CHA_TYPE: case DB_VCH_TYPE: case DB_TXT_TYPE: case DB_LTXT_TYPE: if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) { form = LIKE_UNICODE; dv_tmp1.db_datatype = DB_NVCHR_TYPE; dv_tmp1.db_length = src_dv->db_length * 4 + DB_CNTSIZE; dv_tmp1.db_prec = 0; dv_tmp1.db_collID = -1; if (dv_tmp1.db_length < (i2)sizeof(tmp)) dv_tmp1.db_data = tmp; else { dv_tmp1.db_data = (char *) MEreqmem (0, dv_tmp1.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_nvchr_fromutf8(adf_scb, src_dv, &dv_tmp1); adf_scb->adf_uninorm_flag = saved_uninorm_flag; src_dv = &dv_tmp1; } break; default: return(adu_error(adf_scb, E_AD9999_INTERNAL_ERROR, 0)); } /* ** See how the pattern looks and coerce appropriatly */ switch (abs(pat_dv->db_datatype)) { case DB_LNVCHR_TYPE: case DB_LNLOC_TYPE: long_seen = 1; case DB_NVCHR_TYPE: case DB_NCHR_TYPE: case DB_UTF8_TYPE: case DB_NQTXT_TYPE: if (form != LIKE_UNICODE) { DB_DATA_VALUE dv_tmp3; form = LIKE_UNICODE; dv_tmp1.db_datatype = DB_NVCHR_TYPE; dv_tmp1.db_length = src_dv->db_length * 3 + DB_CNTSIZE; dv_tmp1.db_prec = 0; dv_tmp1.db_collID = -1; dv_tmp3.db_datatype = DB_NVCHR_TYPE; dv_tmp3.db_length = src_dv->db_length * 3 + DB_CNTSIZE; dv_tmp3.db_prec = 0; dv_tmp3.db_collID = -1; if (dv_tmp1.db_length < (i2)sizeof(tmp)) dv_tmp1.db_data = tmp; else { dv_tmp1.db_data = (char *) MEreqmem (0, dv_tmp1.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } dv_tmp3.db_data = (char *) MEreqmem (0, dv_tmp3.db_length, TRUE, &db_stat); if (db_stat) return db_stat; if (db_stat = adu_nvchr_coerce(adf_scb, src_dv, &dv_tmp3)) { if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_unorm(adf_scb, &dv_tmp3, &dv_tmp1); adf_scb->adf_uninorm_flag = saved_uninorm_flag; MEfree((char *)dv_tmp3.db_data); if (db_stat) { if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); return db_stat; } src_dv = &dv_tmp1; } break; case DB_CHR_TYPE: pat_flags |= AD_PAT_BIGNORE; /* Ignore blanks */ /*FALLTHROUGH*/ case DB_LVCH_TYPE: case DB_LCLOC_TYPE: case DB_LBYTE_TYPE: case DB_LBLOC_TYPE: case DB_BYTE_TYPE: case DB_VBYTE_TYPE: case DB_CHA_TYPE: case DB_VCH_TYPE: case DB_TXT_TYPE: case DB_LTXT_TYPE: if (form == LIKE_UNICODE) { DB_DATA_VALUE dv_tmp3; dv_tmp2.db_datatype = DB_NVCHR_TYPE; dv_tmp2.db_length = pat_dv->db_length * 4 + DB_CNTSIZE; dv_tmp2.db_prec = 0; dv_tmp2.db_collID = -1; dv_tmp3.db_datatype = DB_NVCHR_TYPE; dv_tmp3.db_length = pat_dv->db_length * 3 + DB_CNTSIZE; dv_tmp3.db_prec = 0; dv_tmp3.db_collID = -1; if (dv_tmp2.db_length < (i2)sizeof(tmp) && dv_tmp1.db_data != tmp) dv_tmp2.db_data = tmp; else { dv_tmp2.db_data = (char *) MEreqmem (0, dv_tmp2.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) { adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_nvchr_fromutf8(adf_scb, pat_dv, &dv_tmp2); adf_scb->adf_uninorm_flag = saved_uninorm_flag; } else { dv_tmp3.db_data = (char *) MEreqmem (0, dv_tmp3.db_length, TRUE, &db_stat); if (db_stat) return db_stat; if (db_stat = adu_nvchr_coerce(adf_scb, pat_dv, &dv_tmp3)) { if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_unorm(adf_scb, &dv_tmp3, &dv_tmp2); adf_scb->adf_uninorm_flag = saved_uninorm_flag; MEfree((char *)dv_tmp3.db_data); if (db_stat) { if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat; } } pat_dv = &dv_tmp2; } break; case DB_PAT_TYPE: if (patdata->patdata.flags2 & AD_PAT2_UNICODE) form = LIKE_UNICODE; else if (patdata->patdata.flags2 & AD_PAT2_COLLATE) form = LIKE_COLLATION; break; default: return(adu_error(adf_scb, E_AD9999_INTERNAL_ERROR, 0)); } if (abs(pat_dv->db_datatype) != DB_PAT_TYPE) { if (form == LIKE && adf_scb->adf_collation) { form = LIKE_COLLATION; pat_flags |= (AD_PAT2_COLLATE<<16); } else if (form == LIKE_UNICODE) pat_flags |= (AD_PAT2_UNICODE<<16); if (ADU_pat_legacy > 0 && !long_seen && (pat_flags & AD_PAT_FORM_MASK) == AD_PAT_FORM_LIKE) { if (form == LIKE_UNICODE) db_stat1 = adu_ulike(adf_scb, s1, p1, (UCS2*)(esc_dv?esc_dv->db_data:0), &rcmp1); else db_stat1 = adu_like(adf_scb, s1, p1, (u_char*)(esc_dv?esc_dv->db_data:0), &rcmp1); if (ADU_pat_legacy == 3) { /* Look no further */ *rcmp = rcmp1; if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat1; } } /* Compile the input pattern */ db_stat = (rtns[form].compile)(adf_scb, pat_dv, esc_dv, pat_flags, sea_ctx); } if (!db_stat && sea_ctx) { if (sea_ctx->force_fail) *rcmp = 1; else { /* Init the data access */ if (!(db_stat = adu_patda_init(adf_scb, src_dv, sea_ctx, &da_ctx))) { /* Do the search */ db_stat = (rtns[form].execute)(sea_ctx, &da_ctx, rcmp); } /* Cleanup the data access */ (VOID)adu_patda_term(&da_ctx); if (!db_stat && sea_ctx->cmplx_lim_exc) db_stat = adu_error(adf_scb, E_AD1026_PAT_TOO_CPLX, 0); } } adu_patcomp_free(sea_ctx); if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); if (patdata != &_patdata && (PTR)patdata != pat_dv->db_data) MEfree((PTR)patdata); if (ADU_pat_legacy > 0 && !long_seen && (pat_flags & AD_PAT_FORM_MASK) == AD_PAT_FORM_LIKE) { if (db_stat1 && db_stat) { /* Old unsupported? - just report */ TRdisplay("%s old fail - %d %d osts=%d nsts=%d\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat1, db_stat); } else if (db_stat1 && !db_stat) { /* NEW SOLUTION - */ TRdisplay("%s new support - %d %d osts=%d\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat1); } else if (!db_stat1 && db_stat) { i4 sl, pl; char *s, *p; /*NEW PROB - report & fixup */ adu_lenaddr(adf_scb, s1, &sl, &s); adu_lenaddr(adf_scb, p1, &pl, &p); TRdisplay("%s new problem - %d %d nsts=%d ores=%d nres=%d '%.#s' '%.#s'\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat, rcmp1, *rcmp, sl,s,pl,p); if (ADU_pat_legacy > 1) { *rcmp = rcmp1; db_stat = db_stat1; } } else if (*rcmp != rcmp1) { i4 sl, pl; char *s, *p; /*NEW PROB - report & fixup */ adu_lenaddr(adf_scb, s1, &sl, &s); adu_lenaddr(adf_scb, p1, &pl, &p); TRdisplay("%s bad? %d %d ores=%d nres=%d '%.#s' '%.#s'\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, rcmp1, *rcmp, sl,s,pl,p); if (ADU_pat_legacy > 1) { *rcmp = rcmp1; db_stat = db_stat1; } } } return db_stat; }
DB_STATUS adu_like_comp_uni( ADF_CB *adf_scb, DB_DATA_VALUE *pat_dv, DB_DATA_VALUE *ret_dv, DB_DATA_VALUE *esc_dv, u_i4 pat_flags) { DB_STATUS db_stat = E_DB_OK; AD_PAT_SEA_CTX _sea_ctx; AD_PAT_SEA_CTX *sea_ctx = &_sea_ctx; AD_PATDATA *patdata = (AD_PATDATA*)ret_dv->db_data; AD_PATDATA _patdata; if (ME_ALIGN_MACRO(patdata, sizeof(i2)) != (PTR)patdata) { if ((i2)sizeof(_patdata) >= ret_dv->db_length) patdata = &_patdata; else { patdata = (AD_PATDATA*)MEreqmem(0, ret_dv->db_length, FALSE, &db_stat); if (!patdata || db_stat) return db_stat; } } patdata->patdata.length = ret_dv->db_length/sizeof(patdata->vec[0]); sea_ctx->patdata = patdata; /* ** To allow for default processing we have the user specified ** case flags to deal with. If AD_PAT_WITH_CASE or AD_PAT_WO_CASE ** is set then we use that setting to override any collation ** case request. If neither are set we obey the collation. */ if (pat_flags & AD_PAT_WITH_CASE) { pat_flags &= ~AD_PAT_WO_CASE; } else if (!(pat_flags & AD_PAT_WO_CASE) && pat_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL) { pat_flags |= AD_PAT_WO_CASE; } /* ** From this point on, the AD_PAT_WITH_CASE flag is ignored as ** its state has been folded into the AD_PAT_WO_CASE flag. */ if (ADU_pat_legacy == -1) pat_flags &= ~AD_PAT_WO_CASE; else if (ADU_pat_legacy == -2) pat_flags |= AD_PAT_WO_CASE; pat_flags |= (AD_PAT2_UNICODE<<16); /* Compile the input pattern into output parameter */ db_stat = adu_patcomp_like_uni(adf_scb, pat_dv, esc_dv, pat_flags, sea_ctx); /* Map the forced fail so the executor sees it */ if (sea_ctx->force_fail) patdata->patdata.flags2 |= AD_PAT2_FORCE_FAIL; adu_patcomp_free(sea_ctx); if (patdata != (AD_PATDATA*)ret_dv->db_data) { MEcopy((PTR)patdata, patdata->patdata.length*sizeof(i2), ret_dv->db_data); if (patdata != &_patdata) MEfree((PTR)patdata); } return db_stat; }
/*{ ** Name: LGadd - Add Database. ** ** Description: ** Add database to logging system for a process. ** ** This routine adds a database to the logging system. This service ** is used to inform the logging system that records recorded in the log ** file should be associated with this database. A database can be ** marked as journaled by setting the LG_JOURNAL flag. The fact that a ** database is journaled is used by the logging system to recognize the ** need to copy log records from the log file to a journal file. ** ** NOTE on adding databases that are being recovered: ** When a database requires REDO recovery, the LDB for that database ** is marked LDB_RECOVER. This routine will return LG_DB_INCONSISTENT ** (signifying that the database is inconsistent) if anyone tries ** to add the db while it is being recovered. This is not a very ** on-line solution. ** ** A better solution is to make sure that servers that want to open ** a database that is currently being recovered are forced to wait ** until the db is fully recovered, then they should be able to ** proceed. ** ** Inputs: ** lg_id Log identifier. ** flag Zero or ** LG_JOURNAL: if a journaled DB. ** LG_NOTDB: not a DB; administrative ** LG_PRETEND_CONSISTENT: used by verifydb ** LG_FCT: fast commit ** LG_READONLY: a readonly database ** buffer Database information buffer. ** l_buffer Length of buffer. ** ** Outputs: ** db_id Database identifier. Unique ** identifier associated with this ** instantiation of the logging/locking ** server. After logging/locking ** restarted, a database can have ** a different id. ** sys_err Reason for error return status. ** Returns: ** OK Success. ** LG_BADPARAM Bad parameters to call. ** LG_DB_INCONSISTENT Inconsistent database. ** LG_EXCEED_LIMIT Out of LDB's. ** LG_SHUTTING_DOWN Shutdown has occured (or pending). ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** Summer, 1992 (bryanp) ** Working on the new portable logging and locking system. ** 18-jan-1993 (rogerk) ** Removed LG_WILLING_COMMIT flag - now only LG_ADDONLY is used ** during recovery processing. Add ldb_j_last_la, ldb_d_last_la ** fields. ** 15-mar-1993 (rogerk) ** Reduced Logging - Phase IV: ** Removed LG_ADDONLY flag. Recovery processing now adds db with ** a normal LGadd call and alters it via LG_A_DBCONTEXT to ** reestablish its context. ** 26-apr-1993 (bryanp) ** 6.5 Cluster Support: ** Add ldb_sback_lsn field to the LDB. ** Make sure that lpd_type is set so that LPDs can be deallocated ** properly upon error. ** 26-jul-1993 (bryanp) ** When adding a database which is associated with a remote log file, ** do not signal a local opening of the database. This occurs ** when the CSP process on one node is recovering the work ** performed by another node; in this case we do NOT wish to ** signal to the RCP that a local open is being performed, since ** in fact no local access is implied by adding this database. ** When adding the notdb again, increment the ldb_lpd_count even if ** the ldb_buffer info doesn't match. The notdb is always the notdb ** 26-jul-1993 (rogerk) ** Changed journal and dump window tracking in the logging system. ** Use new journal and dump log address fields. ** 12-oct-1993 (tad) ** Bug #56449 ** Changed %x to %p for pointer values. ** 30-Jan-1996 (jenjo02) ** Reorganized LG_add() such that if NOTDB is wanted, ** the search of the ldb queue is bypassed; after all, ** we know it's buried in the LGD and easy to find. ** 11-Sep-1996 (jenjo02) ** Fix a bug in LG_add() search of lgd_ldb_q which was looping ** if more that 2 LDBs were extant. ** 13-jun-1997 (wonst02) ** Added LG_READONLY and LDB_READONLY for readonly databases. ** 12-nov-1998 (kitch01) ** Bug 90140. If the database is currently pending a close then ** mark the open as in CLOSE_WAIT. This will ensure that the close ** is processed before this open and prevent locking errors on the journals ** 7-oct-2004 (thaju02) ** Use SIZE_TYPE to allow memory pools > 2Gig. 21-Jun-2006 (hanal04) Bug 116272 ** Take the lgd_mutex before the ldb_mutex in order to ensure ** the acquisition order is consistent with LG_archive_complete() ** and LG_event(). Flag LG_signal_event() that we already have the ** lgd_mutex. ** 01-Nov-2006 (jonj) ** Use consistent ldb_q_mutex, ldb_mutex ordering thoughout the code. ** Don't put LDB on queue until it's completely initialized. ** 15-Jan-2010 (jonj) ** SIR 121619 MVCC: Initialize new ldb_active_lxbq. ** 09-aug-2010 (maspa05) b123189, b123960 ** Pass flag to indicate a readonly database LDB_RODB, so that it ** gets picked up by LGshow */ STATUS LGadd( LG_LGID external_lg_id, i4 flag, char *buffer, i4 l_buffer, LG_DBID *external_db_id, CL_ERR_DESC *sys_err) { register LGD *lgd = (LGD *)LGK_base.lgk_lgd_ptr; register LPB *lpb; register LDB *ldb; register LFB *lfb; register LPD *lpd; LDB *next_ldb; LPD *next_lpd; SIZE_TYPE end_offset; SIZE_TYPE ldb_offset; SIZE_TYPE *lpbb_table; SIZE_TYPE *ldbb_table; i4 err_code; bool initialize_ldb = FALSE; STATUS status; LG_I4ID_TO_ID lg_id; LG_ID *db_id = (LG_ID*)external_db_id; LFB *cur_db_lfb; i4 SignalEvent = 0; /* ** If the logging system is already in "shutdown" mode, then no new ** LGadd calls are permitted */ LG_WHERE("LGadd") CL_CLEAR_ERR(sys_err); if ((lgd->lgd_status & (LGD_START_SHUTDOWN | LGD_IMM_SHUTDOWN)) != 0) return (LG_SHUTTING_DOWN); if (l_buffer == 0 || l_buffer > sizeof(ldb->ldb_buffer)) { uleFormat(NULL, E_DMA411_LGADD_BAD_LEN, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, l_buffer, 0, sizeof(ldb->ldb_buffer)); return (LG_BADPARAM); } /* Check the lg_id. */ lg_id.id_i4id = external_lg_id; if (lg_id.id_lgid.id_id == 0 || (i4)lg_id.id_lgid.id_id > lgd->lgd_lpbb_count) { uleFormat(NULL, E_DMA40F_LGADD_BAD_ID, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, lg_id.id_lgid.id_id, 0, lgd->lgd_lpbb_count); return (LG_BADPARAM); } lpbb_table = (SIZE_TYPE *)LGK_PTR_FROM_OFFSET(lgd->lgd_lpbb_table); lpb = (LPB *)LGK_PTR_FROM_OFFSET(lpbb_table[lg_id.id_lgid.id_id]); if (status = LG_mutex(SEM_EXCL, &lpb->lpb_mutex)) return(status); if (lpb->lpb_type != LPB_TYPE || lpb->lpb_id.id_instance != lg_id.id_lgid.id_instance) { (VOID)LG_unmutex(&lpb->lpb_mutex); uleFormat(NULL, E_DMA410_LGADD_BAD_PROC, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 3, 0, lpb->lpb_type, 0, lpb->lpb_id.id_instance, 0, lg_id.id_lgid.id_instance); return (LG_BADPARAM); } /* ** Allocate an LPD, causing lpd_type to be set to LPD_TYPE. */ if ((lpd = (LPD *)LG_allocate_cb(LPD_TYPE)) == 0) { (VOID)LG_unmutex(&lpb->lpb_mutex); return (LG_EXCEED_LIMIT); } /* ** CLEANUP: error returns after this point must free the lpd before ** returning! */ lfb = (LFB *)LGK_PTR_FROM_OFFSET(lpb->lpb_lfb_offset); /* ** If this isn't a real user database, but is instead the "NOTDB" ** database which is used by system processes such as the DMFRCP and ** DMFACP daemons, then it has a special reserved LDB slot and does not ** get located by its database information buffer, therefore we ** can skip locking and scanning the ldb queue. */ end_offset = LGK_OFFSET_FROM_PTR(&lgd->lgd_ldb_next); /* ** When both the lgd_ldb_q and ldb must be mutexed, always take ** the lgd_ldb_q_mutex, then ldb_mutex. */ /* Lock and hold the ldb queue mutex */ if (status = LG_mutex(SEM_EXCL, &lgd->lgd_ldb_q_mutex)) return(status); if (flag & LG_NOTDB) { ldbb_table = (SIZE_TYPE *)LGK_PTR_FROM_OFFSET(lgd->lgd_ldbb_table); ldb = (LDB *)LGK_PTR_FROM_OFFSET(ldbb_table[1]); if (status = LG_mutex(SEM_EXCL, &ldb->ldb_mutex)) return(status); /* ** IF the notdb has already been initialized, then we have some ** caller who is adding the notdb with a different buffer, thus ** we didn't match when we searched the database list for a ** matching ldb_buffer field. Since we really don't care about the ** ldb_buffer for the notdb (the notdb is the notdb, after all), ** we'll treat this case as though the ldb buffer fields matched. */ if (ldb->ldb_type == LDB_TYPE) { /* Count new reference to LDB. */ ldb->ldb_lpd_count++; } else { /* ** first use of NOTDB; initialize it. */ lgd->lgd_ldb_inuse++; initialize_ldb = TRUE; } } else { /* ** Scan database list to see if this database is already known. Each ** database is identified by a "database information buffer", which DMF ** passes in. This buffer contains items such as the database name, owner ** name, etc. If the database information buffer passed to LGadd exactly ** matches the database information buffer of an existing LDB, then this ** database is already known (has already been added by another logging ** system process). */ for (ldb_offset = lgd->lgd_ldb_next; ldb_offset != end_offset;) { ldb = (LDB *)LGK_PTR_FROM_OFFSET(ldb_offset); if (ldb->ldb_l_buffer != l_buffer || MEcmp(ldb->ldb_buffer, buffer, l_buffer)) { ldb_offset = ldb->ldb_next; continue; } if ( CXcluster_enabled() ) { /* ** Node recovery must use distinct ldb context per node log file */ cur_db_lfb = (LFB *)LGK_PTR_FROM_OFFSET(ldb->ldb_lfb_offset); if ((lfb->lfb_l_nodename || cur_db_lfb->lfb_l_nodename) && (lfb->lfb_l_nodename != cur_db_lfb->lfb_l_nodename || MEcmp(lfb->lfb_nodename, cur_db_lfb->lfb_nodename, lfb->lfb_l_nodename))) { ldb_offset = ldb->ldb_next; #ifdef xDEBUG TRdisplay("%@ RCP-P1: Recovering %~t, ignore ldb for %~t %x\n", lfb->lfb_l_nodename, lfb->lfb_nodename, cur_db_lfb->lfb_l_nodename, cur_db_lfb->lfb_nodename, flag & LG_CSP_RECOVER); #endif continue; } } if (status = LG_mutex(SEM_EXCL, &ldb->ldb_mutex)) return(status); /* ** Check again after semaphore wait. ** If LDB is no longer a match (it was in the ** process of being eradicated while we waited for ** the ldb_mutex), and start the search again from ** the top of the queue. */ if (ldb->ldb_type != LDB_TYPE || ldb->ldb_l_buffer != l_buffer || MEcmp(ldb->ldb_buffer, buffer, l_buffer)) { (VOID)LG_unmutex(&ldb->ldb_mutex); ldb_offset = lgd->lgd_ldb_next; continue; } break; } if (ldb_offset != end_offset) { /* ** LDB exists. If the database is already known to be inconsistent, ** then no new adds of the database are permitted, unless the caller ** acknowledges that it "knows" that the database is inconsistent by ** passing the "pretend consistent" flag (used by verifydb). */ if (ldb->ldb_status & LDB_INVALID) { if ( (flag & LG_PRETEND_CONSISTENT) == 0 ) { (VOID)LG_unmutex(&ldb->ldb_mutex); (VOID)LG_unmutex(&lgd->lgd_ldb_q_mutex); LG_deallocate_cb(LPD_TYPE, (PTR)lpd); (VOID)LG_unmutex(&lpb->lpb_mutex); return (LG_DB_INCONSISTENT); } } /* ** If the database reference count is zero, then the database ** must be opened by the RCP before the server can use it. ** Mark the status opendb_pending - this will suspend any thread ** making an LGwrite call on this database (note that the first ** thing a server does after opening a database is to write an ** OPENDB log record) until the RCP has finished opening it. ** ** If the database reference count is not zero, but the database ** is undergoing REDO recovery, then we cannot allow new servers ** to access the database until recovery is complete. Set the ** database status to opendb_pending and opn_wait. ** ** NOTE that if we begin to support READ-ONLY databases and servers ** are able to open databases without writing an OPENDB record, then ** we must come up with a new method of suspending database openers ** until recovery is complete. */ /* Bug 90140. If the database is currently pending a close then ** mark the open as in CLOSE_WAIT. This will ensure that the close ** is processed before this open and prevent locking errors on the journals */ if (ldb->ldb_lpd_count == 0) { if ((ldb->ldb_status & LDB_PURGE) == 0) { if ((ldb->ldb_status & LDB_OPENDB_PEND) == 0) { ldb->ldb_status |= LDB_OPENDB_PEND; if (ldb->ldb_status & LDB_CLOSEDB_PEND) ldb->ldb_status |= LDB_CLOSE_WAIT; if (flag & LG_PRETEND_CONSISTENT) ldb->ldb_status |= LDB_PRETEND_CONSISTENT; if (flag & LG_READONLY) ldb->ldb_status |= LDB_READONLY; if (flag & LG_RODB) ldb->ldb_status |= LDB_RODB; SignalEvent = LGD_OPENDB; } } else ldb->ldb_status &= ~(LDB_PURGE); } else if (ldb->ldb_status & LDB_RECOVER) { /* ** The database is open, but is being recovered. ** Set the opendb_pending and opn_wait flags - this will ** prevent any new transactions from proceeding on this ** database until recovery is complete. Marking this ** database as OPENDB_PEND will not cause the database to ** be processed in count_opens because of the opn_wait flag. */ ldb->ldb_status |= (LDB_OPENDB_PEND | LDB_OPN_WAIT); } /* Count new reference to LDB. */ ldb->ldb_lpd_count++; } else { /* ** This database is NOT known. ** ** If the caller has passed special flags indicating that they ** require that the newly-added database must have a particular DB_ID ** assigned to it, then ensure that the new LDB gets the right ID. ** ** Otherwise, just pick the next LDB off the free list. */ /* ** Allocate a new LDB ** returning with the ldb_mutex held ** and lgd_ldb_inuse incremented. */ if ((ldb = (LDB *)LG_allocate_cb(LDB_TYPE)) == 0) { LG_deallocate_cb(LPD_TYPE, (PTR)lpd); (VOID)LG_unmutex(&lgd->lgd_ldb_q_mutex); (VOID)LG_unmutex(&lpb->lpb_mutex); return (LG_EXCEED_LIMIT); } initialize_ldb = TRUE; } } #ifdef xDEBUG /* ** For a while, we were having problems with corruption of the LFB/LDB ** large block queues, and this debugging code helped to track those ** problems down. */ if (ldb->ldb_id.id_id == 0) { TRdisplay("%@ LGadd: args were:(%d,%d).%x.%p.%x.%p\n", lg_id.id_lgid.id_id, lg_id.id_lgid.id_instance, flag, buffer, l_buffer, db_id); LG_debug_wacky_ldb_found(lgd, ldb); return (LG_BADPARAM); } #endif /* ** NOTE: Be careful about adding error returns after this point, ** because any such error return must first free up BOTH the LPD AND ** the LDB, if an LDB was actually allocated. */ /* ** Initialize the LDB, if one was allocated ** or if first use of NOTDB LDB. */ if (initialize_ldb) { MEcopy((PTR)buffer, l_buffer, (PTR)ldb->ldb_buffer); ldb->ldb_l_buffer = l_buffer; ldb->ldb_type = LDB_TYPE; ldb->ldb_status = LDB_ACTIVE; ldb->ldb_stat.read = 0; ldb->ldb_stat.write = 0; ldb->ldb_stat.begin = 0; ldb->ldb_stat.wait = 0; ldb->ldb_stat.force = 0; ldb->ldb_stat.end = 0; ldb->ldb_lxbo_count = 0; ldb->ldb_lxb_count = 0; ldb->ldb_lpd_count = 1; ldb->ldb_lfb_offset = lpb->lpb_lfb_offset; ldb->ldb_j_first_la.la_sequence = 0; ldb->ldb_j_first_la.la_block = 0; ldb->ldb_j_first_la.la_offset = 0; ldb->ldb_j_last_la.la_sequence = 0; ldb->ldb_j_last_la.la_block = 0; ldb->ldb_j_last_la.la_offset = 0; ldb->ldb_d_first_la.la_sequence = 0; ldb->ldb_d_first_la.la_block = 0; ldb->ldb_d_first_la.la_offset = 0; ldb->ldb_d_last_la.la_sequence = 0; ldb->ldb_d_last_la.la_block = 0; ldb->ldb_d_last_la.la_offset = 0; ldb->ldb_sbackup.la_sequence = 0; ldb->ldb_sbackup.la_block = 0; ldb->ldb_sbackup.la_offset = 0; ldb->ldb_sback_lsn.lsn_high = 0; ldb->ldb_sback_lsn.lsn_low = 0; ldb->ldb_eback_lsn.lsn_high = 0; ldb->ldb_eback_lsn.lsn_low = 0; /* ** Assume no simulated MVCC journal writes. ** ** This may be changed by LGalter(LG_A_JFIB) */ MEfill(sizeof(ldb->ldb_jfib), 0, &ldb->ldb_jfib); /* ** Set last_commit, last_lsn, and first_la to ** the current values from the header. */ ldb->ldb_last_commit = lfb->lfb_header.lgh_last_lsn; ldb->ldb_last_lsn = lfb->lfb_header.lgh_last_lsn; ldb->ldb_first_la = lfb->lfb_header.lgh_end; /* ** Initialize active transaction queue to empty. */ ldb->ldb_active_lxbq.lxbq_next = ldb->ldb_active_lxbq.lxbq_prev = LGK_OFFSET_FROM_PTR(&ldb->ldb_active_lxbq.lxbq_next); ldb->ldb_lgid_low = 0; ldb->ldb_lgid_high = 0; /* ** Extract the external Database Id from the info buffer to ** put in an accessable place of the ldb. */ I4ASSIGN_MACRO(ldb->ldb_buffer[DB_DB_MAXNAME+DB_OWN_MAXNAME], ldb->ldb_database_id); if (flag & LG_NOTDB) { ldb->ldb_status |= LDB_NOTDB; } else { if (flag & LG_JOURNAL) ldb->ldb_status |= LDB_JOURNAL; if (flag & LG_PRETEND_CONSISTENT) ldb->ldb_status |= LDB_PRETEND_CONSISTENT; if (flag & LG_READONLY) ldb->ldb_status |= LDB_READONLY; if (flag & LG_RODB) ldb->ldb_status |= LDB_RODB; } if ((ldb->ldb_status & LDB_NOTDB) == 0) { if ((lfb->lfb_status & LFB_USE_DIIO) == 0) { /* ** signal to the RCP that local use of this database is ** beginning. The database remains in pending-open state ** until the RCP acknowledges the open. */ ldb->ldb_status |= LDB_OPENDB_PEND; SignalEvent = LGD_OPENDB; } } } /* ** The LPD (Logging system Process-Database connection block) contains ** pointers to its associated database and process blocks, and contains ** a list of all transactions which this process has begun within this ** database: */ lpd->lpd_ldb = LGK_OFFSET_FROM_PTR(ldb); lpd->lpd_lpb = LGK_OFFSET_FROM_PTR(lpb); lpd->lpd_lxbq.lxbq_next = lpd->lpd_lxbq.lxbq_prev = LGK_OFFSET_FROM_PTR(&lpd->lpd_lxbq.lxbq_next); lpd->lpd_lxb_count = 0; /* Change various counters. */ lpb->lpb_lpd_count++; lgd->lgd_stat.add++; /* Queue LPD to the LPB. */ lpd->lpd_next = lpb->lpb_lpd_next; lpd->lpd_prev = LGK_OFFSET_FROM_PTR(&lpb->lpb_lpd_next); next_lpd = (LPD *)LGK_PTR_FROM_OFFSET(lpb->lpb_lpd_next); next_lpd->lpd_prev = lpb->lpb_lpd_next = LGK_OFFSET_FROM_PTR(lpd); /* ** If the adding process uses fast commit, then mark the database ** as open with FC protocols. Should a crash occur, all updates to ** this db since the last Consistency Point will need to be redone. */ if ((flag & LG_FCT) && (lpb->lpb_status & LPB_FCT)) ldb->ldb_status |= LDB_FAST_COMMIT; /* If opener wants MVCC, ensure that it is on, found or not */ if ( flag & LG_MVCC ) ldb->ldb_status |= LDB_MVCC; /* Return identifier. */ *db_id = lpd->lpd_id; if ( initialize_ldb ) { /* Lastly, insert LDB on the active queue. */ ldb->ldb_next = lgd->lgd_ldb_next; ldb->ldb_prev = end_offset; next_ldb = (LDB *)LGK_PTR_FROM_OFFSET(lgd->lgd_ldb_next); next_ldb->ldb_prev = lgd->lgd_ldb_next = LGK_OFFSET_FROM_PTR(ldb); } /* ** Unwind the mutexes */ (VOID)LG_unmutex(&ldb->ldb_mutex); (VOID)LG_unmutex(&lgd->lgd_ldb_q_mutex); (VOID)LG_unmutex(&lpb->lpb_mutex); /* If any events to signal, do so */ if ( SignalEvent ) LG_signal_event(SignalEvent, 0, FALSE); return (OK); }
/*{ ** Name: psq_bgn_session - Begin a parser session. ** ** INTERNAL PSF call format: status = psq_bgn_session(&psq_cb, &sess_cb); ** ** EXTERNAL call format: status = psq_call(PSQ_BGN_SESSION, &psq_cb, &sess_cb); ** ** Description: ** The psq_bgn_session function begins a parser session. It should be ** called each time a new user connects to a server. There may be ** many parser sessions per database server. There should be one parser ** session for each invocation of the database system that is connected ** to the server. When starting a parser session, one has to tell it ** what query language to use, and other session parameters. ** ** Inputs: ** psq_cb ** .psq_qlang The query language to use. ** .psq_decimal ** .psf_decspec TRUE indicates that the decimal marker ** has been specified. FALSE means use the ** default (a "."). ** .psf_decimal The character to use as a decimal marker ** (if specified). ** .psq_distrib Indicator for whether distributed ** statements and constructs should be ** accepted. ** .psq_sessid Session id ** .psq_server address of server control block ** .psq_adf_cb Pointer to session's ADF_CB ** .psq_dbid Database id for this session. ** .psq_user User name of ** .psq_dba User name of dba ** .psq_group Group id of session ** .psq_aplid Application id of session ** .psq_flag bitmask containing the following flags: ** .psq_catupd TRUE means catalogs updateable ** .psq_warnings Set to TRUE if user wishes to see ** warnings on unsupported commands ** .psq_idxstruct Structure for creating new indexes ** (e.g. DB_ISAM_STORE) ** .psq_udbid Unique database id for this session. ** .psq_ustat User status flags from SCS_ICS ** .psq_dbxlate Case translation semantics for the db ** sess_cb Pointer to session control block ** (Can be NULL) ** ** Outputs: ** psq_cb ** .psq_error Error information ** .err_code What error occurred ** E_PS0000_OK Success ** E_PS0001_INTERNAL_ERROR Internal PSF problem ** E_PS0201_BAD_QLANG Bad query language specifier ** E_PS0203_NO_DECIMAL No decimal marker specified ** E_PS0204_BAD_DISTRIB Bad distributed ** specification ** E_PS0205_SRV_NOT_INIT Server not initialized ** E_PS0206_TOO_MANY_SESS Too many sessions at one ** time ** Returns: ** E_DB_OK Function completed normally. ** E_DB_WARN Function completed with warning(s) ** E_DB_ERROR Function failed; non-catastrophic error ** E_DB_SEVERE Session is to be aborted ** E_DB_FATAL Function failed; catastrophic error ** Exceptions: ** none ** ** Side Effects: ** Causes memory to be allocated. ** Increments the session count in the server control block. ** ** History: ** 01-oct-85 (jeff) ** written ** 28-jul-86 (jeff) ** Added initialization of pss_catupd and pss_idxstruct ** 26-aug-86 (seputis) ** Removed definition of yaccstream ** 13-apr-87 (puree) ** Initialize prototype list for dynamic SQL. ** 24-apr-87 (stec) ** init pss_project. ** 11-may-87 (stec) ** store psq_udbid to pss_dbid. ** 04-sep-87 (stec) ** Added critical region code where needed. ** 02-oct-87 (stec) ** Added pss_journaling initialization. ** 13-jun-88 (stec) ** Added initialization of pss_ruset for DB procs. ** 08-mar-89 (andre) ** Copy dba_drop_all from PSQ_CB to PSS_SESBLK. ** 15-mar-89 (ralph) ** GRANT Enhancements, Phase 1: ** Copy psq_aplid to pss_aplid; ** Copy psq_group to pss_group. ** 16-mar-89 (neil) ** Initialized rule field. ** 27-jul-89 (jrb) ** Copy numeric literals flag into session cb. ** 27-oct-89 (ralph) ** Copy user status flags to session control block. ** 11-oct-89 (ralph) ** Initialize pss_rgset and pss_raset. ** 28-dec-89 (andre) ** Copy fips_mode from PSQ_CB to PSS_SESBLK. ** 13-feb-90 (andre) ** set scf_stype to SCU_EXCLUSIVE before calling scu_swait. ** 12-sep-90 (sandyh) ** Added support for session memory value calculated from psf ** memory startup parameter. ** 15-nov-90 (andre) ** check the return status after calling SCF to acquire or to release a ** semaphore. ** If an error occurred when trying to acquire the semaphore, return ** E_DB_SEVERE to abort the session. ** If an error occurred when trying to release the semaphore, return ** E_DB_FATAL to bring down the server. ** 17-may-91 (andre) ** store DBA name into sess_cb->pss_dbaname and NULL-terminate. ** 08-nov-91 (rblumer) ** merged from 6.4: 25-jul-91 (andre) ** if (psq_cb->psq_flag & PSQ_STRIP_NL_IN_STRCONST), set bit ** PSS_STRIP_NL_IN_STRCONST in sess_cb->pss_ses_flag. this will ** indicate that we are connected to an older FE, so the scanners ** will continue to strip NLs inside quoted strings; ** this is required to fix bug 38098 ** 14-jan-92 (barbara) ** Included ddb.h for Star. Updated to check for distributed ** specification. ** 26-feb-92 (andre) ** if PSQ_REPAIR_SYSCAT is set in psq_cb->psq_flag, set ** PSS_REPAIR_SYSCAT in sess_cb->pss_ses_flags ** 30-mar-1992 (bryanp) ** Fill in pss_sess_owner with a session-unique owner name for use ** by temporary tables which are owned by this session. ** 02-jun-92 (andre) ** initialize pss_dependencies_stream to NULL to avloid use of illegal ** address throughout the parser. ** 24-nov-92 (ralph) ** CREATE SCHEMA: ** Initialize pss_prvgoval ** 22-dec-92 (rblumer) ** initialize pointer for statement-level rule list. ** 14-jan-93 (andre) ** remember whether we are running UPGRADEDB - this will enable us to ** decide whether IIDEVICES can be dropped - which is needed by ** UPGRADEDB ** 15-mar-93 (ralph) ** DELIM_IDENT: initialize pss_dbxlate to zero ** 08-apr-93 (andre) ** names of rule list headers in sess_cb have changed (and their ** number has doubled) ** 26-mar-93 (ralph) ** DELIM_IDENT: Must initialize pss_dbxlate from psq_cb.psq_dbxlate ** and pss_cat_owner from psq_cat_owner. ** 10-aug-93 (andre) ** fixed cause of a compiler warning ** 08-sep-93 (swm) ** Changed sizeof(DB_SESSID) to sizeof(CS_SID) to reflect recent CL ** interface revision. ** 20-sep-93 (rogerk) ** Changed default table create semantics to be WITH JOURNALING. ** Initialized the pss_ses_flag setting to include PSS_JOURNALING ** which mimics the user requesting "set journaling" to indicate that ** tables created should be journaled. ** 08-oct-93 (rblumer) ** increased values allowed in pss_trace vector, using PSS_TVALS. ** 18-oct-93 (rogerk) ** Added support for journal default override. Check psf server ** control block flag for PSF_NO_JNL_DEFAULT override before setting ** the session parse flag to assume journaling on table creates. ** 15-nov-93 (andre) ** add code to initialize a newly added sess_cb->pss_flattening_flags ** 01-nov-93 (anitap) ** if PSQ_INGRES_PRIV is set in psq_cb->psq_flag, set ** PSS_INGRES_PRIV in sess_cb->pss_ses_flags. ** 17-dec-93 (rblumer) ** "FIPS mode" no longer exists. It was replaced some time ago by ** several feature-specific flags (e.g. flatten_nosingleton and ** direct_cursor_mode). So I removed all FIPS_MODE flags. ** 02-jan-94 (andre) ** if starting a local session, call DMF to determine whether the ** database to which we are connected is being journaled and record ** that information by setting (or not setting) PSS_JOURNALED_DB bit ** in pss_ses_flags ** 7-jan-94 (swm) ** Bug #58635 ** Added PTR cast for pss_owner which has changed type to PTR. ** 17-mar-94 (robf) ** Add support for PSQ_SELECT_ALL flag ** 13-Feb-1995 (canor01) ** initialize the pss_audit field in the session control block ** 09-Oct-1998 (jenjo02) ** Removed SCF semaphore functions, inlining the CS calls instead. ** 23-mar-1999 (thaju02) ** Modified '$Sess' to use #define DB_SESS_TEMP_OWNER. (B94067) ** 01-Dec-2000 (hanal04) Bug 100680 INGSRV 1123 ** If PSQ_RULE_UPD_PREFETCH is set turn on PSS_RULE_UPD_PREFETCH ** in the session control block to signify that we should use ** the prefetch stategy required to ensure consitent behaviour in ** updating rules fired by updates. ** 10-Jan-2001 (jenjo02) ** Remove callback to SCF to get session id and ADF_CB; ** *ADF_CB now supplied by scsinit in PSQ_CB. ** 30-Jan-2004 (schka24) ** Get rid of a type-cast warning on adf cb. ** 3-Feb-2005 (schka24) ** Num-literals renamed to parser-compat, fix here. ** 15-june-06 (dougi) ** Add support for "before" triggers. ** 30-aug-06 (thaju02) ** If PSQ_RULE_DEL_PREFETCH is set turn on PSS_RULE_DEL_PREFETCH ** in the session control block, for prefetch strategy to ** be applied for deletes. (B116355) ** 26-Oct-2009 (kiria01) SIR 121883 ** Scalar sub-query support: Added copy of ** psq_flag.PSQ_NOCHK_SINGLETON_CARD to session flag ** for defaulting SET CARDINALITY_CHECK ** November 2009 (stephenb) ** Batch execution; initilization of new fields. ** 29-apr-2010 (stephenb) ** Init batch_copy_optim. ** 04-may-2010 (miket) SIR 122403 ** Init new sess_cb->pss_stmt_flags2. ** 19-May-2010 (kiria01) b123766 ** Get cardinality check default from server block not psq_cb ** 21-Jul-2010 (kschendel) SIR 124104 ** Initialize default compression from facility cb. ** 14-Oct-2010 (kschendel) SIR 124544 ** Initialize default result structure from facility cb. ** 19-Nov-2010 (kiria01) SIR 124690 ** Add support for setting installation wide collation defaults. */ DB_STATUS psq_bgn_session( register PSQ_CB *psq_cb, register PSS_SESBLK *sess_cb) { i4 err_code; i4 i; DB_STATUS status = E_DB_OK; STATUS sem_status; i4 sem_errno; bool leave_loop = TRUE; ULM_RCB ulm_rcb; /* ** No error to begin with. */ psq_cb->psq_error.err_code = E_PS0000_OK; /* ** Do as much validity checking as possible before allocating any memory. ** That way, there won't be any cleaning up to do for the majority of ** errors. */ /* ** Check for server initialized. This code could be placed within ** critical region, but this is not necessary, since this is a flag ** test. */ if (!Psf_srvblk->psf_srvinit) { (VOID) psf_error(E_PS0205_SRV_NOT_INIT, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); return (E_DB_ERROR); } /* ** Check for valid language spec. */ if (psq_cb->psq_qlang != DB_QUEL && psq_cb->psq_qlang != DB_SQL) { (VOID) psf_error(E_PS0201_BAD_QLANG, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); return (E_DB_ERROR); } /* ** Check whether language is allowed in this server. This will be useful ** when we have configurable servers, where some query languages can be ** used and some can't. This code could be placed within a critical region ** but it is not necessary, since this is a flag test only. */ if ((psq_cb->psq_qlang & Psf_srvblk->psf_lang_allowed) == 0) { (VOID) psf_error(E_PS0202_QLANG_NOT_ALLOWED, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); return (E_DB_ERROR); } /* ** Make sure that the decimal character is actually specified. */ if (!psq_cb->psq_decimal.db_decspec) { (VOID) psf_error(E_PS0203_NO_DECIMAL, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); return (E_DB_ERROR); } /* Check distributed specification ** ** a=local_server, b=distrib_server, c=distrib_session ** ** a,b ** ** 00 01 11 10 ** ----------------- ** c | | | | | ** 0 | 1 | 1 | 0 | 0 | ** | | | | | ** ----------------- ==> ERROR ** | | | | | ** 1 | 1 | 0 | 0 | 1 | ** | | | | | ** ----------------- */ if ( !(psq_cb->psq_distrib & (DB_1_LOCAL_SVR | DB_3_DDB_SESS)) || ((~psq_cb->psq_distrib & DB_2_DISTRIB_SVR) && (psq_cb->psq_distrib & DB_3_DDB_SESS)) ) { psf_error(E_PS0204_BAD_DISTRIB, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error,0); return (E_DB_ERROR); } /* ** Check for too many sessions in server at one time. ** This code must be executed as a critical region. */ do /* something to break out of */ { /* get the semaphore */ if (sem_status = CSp_semaphore(1, &Psf_srvblk->psf_sem)) /* exclusive */ { status = E_DB_SEVERE; /* abort the session */ sem_errno = E_PS020A_BGNSES_GETSEM_FAILURE; break; } if (Psf_srvblk->psf_nmsess >= Psf_srvblk->psf_mxsess) { (VOID) psf_error(E_PS0208_TOO_MANY_SESS, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); status = E_DB_ERROR; break; } /* Increment the session count */ Psf_srvblk->psf_nmsess++; sess_cb->pss_psessid = ++Psf_srvblk->psf_sess_num; /* leave_loop has already been set to TRUE */ } while (!leave_loop); /* if semaphore has been successfully acquired, try to release it */ if (sem_status == OK) { if (sem_status = CSv_semaphore(&Psf_srvblk->psf_sem)) { status = E_DB_FATAL; /* bring down the server */ sem_errno = E_PS020B_BGNSES_RELSEM_FAILURE; } } /* ** if an error was encountered while trying to get or to release a ** semaphore, report it here */ if (sem_status != OK) { (VOID) psf_error(sem_errno, sem_status, PSF_INTERR, &err_code, &psq_cb->psq_error, 0); } if (DB_FAILURE_MACRO(status)) { return(status); } /* ** Initialize the case translation semantics stuff */ sess_cb->pss_dbxlate = psq_cb->psq_dbxlate; sess_cb->pss_cat_owner = psq_cb->psq_cat_owner; /* ** Copy the user name and dba name to the session control block. */ STRUCT_ASSIGN_MACRO(psq_cb->psq_user.db_tab_own, sess_cb->pss_user); STRUCT_ASSIGN_MACRO(psq_cb->psq_dba, sess_cb->pss_dba); STRUCT_ASSIGN_MACRO(psq_cb->psq_group, sess_cb->pss_group); STRUCT_ASSIGN_MACRO(psq_cb->psq_aplid, sess_cb->pss_aplid); /* copy DBA name into sess_cb->pss_dbaname and NULL-terminate */ { u_i2 dba_name_len; dba_name_len = (u_i2) psf_trmwhite((u_i4) sizeof(sess_cb->pss_dba), (char *) &sess_cb->pss_dba); MEcopy((PTR) &sess_cb->pss_dba, dba_name_len, (PTR) sess_cb->pss_dbaname); sess_cb->pss_dbaname[dba_name_len] = EOS; } /* ** Build a DB_OWN_NAME which contains a session-unique owner name. This ** owner name will be used for temporary tables which are owned by this ** session. */ { char temp_sess_id[10]; STmove(DB_SESS_TEMP_OWNER, ' ', sizeof(sess_cb->pss_sess_owner), (char *)&sess_cb->pss_sess_owner); /* ** We can't convert directly into the sess_owner field because CVlx ** null-terminates the result, and we don't want the trailing null */ CVlx(sess_cb->pss_psessid, temp_sess_id); MEcopy(temp_sess_id, 8, &sess_cb->pss_sess_owner.db_own_name[5]); } /* ** Start with per-user quota of memory. Note that user may have overridden ** the default value at server startup in which case we will use calculated ** amount (pool/sessions); otherwise, default amount will be used. */ sess_cb->pss_memleft = (Psf_srvblk->psf_sess_mem) ? Psf_srvblk->psf_sess_mem : PSF_SESMEM; /* ** Initialize the user range table. */ if (pst_rginit(&sess_cb->pss_usrrange) != E_DB_OK) { return (E_DB_FATAL); } /* ** Initialize the auxiliary range table. */ if (pst_rginit(&sess_cb->pss_auxrng) != E_DB_OK) { return (E_DB_FATAL); } /* ** Open a memory stream for the symbol table. The symbol table is ** composed of a list of blocks. ** Allocate the symbol table at the same time. */ ulm_rcb.ulm_facility = DB_PSF_ID; ulm_rcb.ulm_poolid = Psf_srvblk->psf_poolid; ulm_rcb.ulm_blocksize = sizeof(PSS_SYMBLK); ulm_rcb.ulm_memleft = &sess_cb->pss_memleft; /* Set pointer to stream handle for ULM */ ulm_rcb.ulm_streamid_p = &sess_cb->pss_symstr; /* Open a private, thread-safe stream */ ulm_rcb.ulm_flags = ULM_PRIVATE_STREAM | ULM_OPEN_AND_PALLOC; ulm_rcb.ulm_psize = sizeof(PSS_SYMBLK); if (ulm_openstream(&ulm_rcb) != E_DB_OK) { if (ulm_rcb.ulm_error.err_code == E_UL0005_NOMEM) { (VOID) psf_error(E_PS0F02_MEMORY_FULL, 0L, PSF_CALLERR, &err_code, &psq_cb->psq_error, 0); } else { (VOID) psf_error(E_PS0A02_BADALLOC, ulm_rcb.ulm_error.err_code, PSF_INTERR, &err_code, &psq_cb->psq_error, 0); } return((ulm_rcb.ulm_error.err_code == E_UL0004_CORRUPT) ? E_DB_FATAL : E_DB_ERROR); } sess_cb->pss_symtab = (PSS_SYMBLK*) ulm_rcb.ulm_pptr; sess_cb->pss_symtab->pss_sbnext = (PSS_SYMBLK *) NULL; /* ** Allocate the YACC_CB. */ if ((status = psl_yalloc(sess_cb->pss_symstr, &sess_cb->pss_memleft, (PTR *) &sess_cb->pss_yacc, &psq_cb->psq_error)) != E_DB_OK) { /* ** If the allocation failed, remember to close the streams, so the ** memory associated with it will be freed. */ (VOID) ulm_closestream(&ulm_rcb); return (status); } /* ** Fill in the control block header. */ sess_cb->pss_next = (PSS_SESBLK *) NULL; sess_cb->pss_prev = (PSS_SESBLK *) NULL; sess_cb->pss_length = sizeof(PSS_SESBLK); sess_cb->pss_type = PSS_SBID; sess_cb->pss_owner = (PTR)DB_PSF_ID; sess_cb->pss_ascii_id = PSSSES_ID; /* ** Initialize the session control block. */ /* Save the session id */ sess_cb->pss_sessid = psq_cb->psq_sessid; /* Set pointer to session's ADF_CB */ sess_cb->pss_adfcb = (ADF_CB *) psq_cb->psq_adfcb; /* No cursors yet */ sess_cb->pss_numcursors = 0; /* Language has already been validated */ sess_cb->pss_lang = psq_cb->psq_qlang; /* Decimal spec has already been validated */ sess_cb->pss_decimal = psq_cb->psq_decimal.db_decimal; /* Distributed spec has already been validated */ sess_cb->pss_distrib = psq_cb->psq_distrib; /* Save the database id */ sess_cb->pss_dbid = psq_cb->psq_dbid; /* Save the unique database id */ sess_cb->pss_udbid = psq_cb->psq_udbid; /* Initialize QSF_RCB for use by psfmem.c functions */ sess_cb->pss_qsf_rcb.qsf_type = QSFRB_CB; sess_cb->pss_qsf_rcb.qsf_ascii_id = QSFRB_ASCII_ID; sess_cb->pss_qsf_rcb.qsf_length = sizeof(sess_cb->pss_qsf_rcb); sess_cb->pss_qsf_rcb.qsf_owner = (PTR)DB_PSF_ID; sess_cb->pss_qsf_rcb.qsf_sid = sess_cb->pss_sessid; /* ** so session reset all bit flags */ sess_cb->pss_stmt_flags = sess_cb->pss_stmt_flags2 = sess_cb->pss_dbp_flags = sess_cb->pss_ses_flag = 0L; sess_cb->pss_flattening_flags = 0; /* ** Default table create semantics are to assume journaling unless ** the PSF_NO_JNL_DEFAULT override is set. */ if ((Psf_srvblk->psf_flags & PSF_NO_JNL_DEFAULT) == 0) sess_cb->pss_ses_flag |= PSS_JOURNALING; /* catalog update flag */ if (psq_cb->psq_flag & PSQ_CATUPD) sess_cb->pss_ses_flag |= PSS_CATUPD; /* warnings on unsupported commands */ if (psq_cb->psq_flag & PSQ_WARNINGS) sess_cb->pss_ses_flag |= PSS_WARNINGS; /* INDICATE if the DBA may DROP everyone's tables */ if (psq_cb->psq_flag & PSQ_DBA_DROP_ALL) sess_cb->pss_ses_flag |= PSS_DBA_DROP_ALL; /* INDICATE if the session may SELECT everyone's tables */ if (psq_cb->psq_flag & PSQ_SELECT_ALL) sess_cb->pss_ses_flag |= PSS_SELECT_ALL; /* ** indicate that the session is allowed to INSERT/DELETE/UPDATE an index ** which is a catalog (but not an extended catalog */ if (psq_cb->psq_flag & PSQ_REPAIR_SYSCAT) sess_cb->pss_ses_flag |= PSS_REPAIR_SYSCAT; /* ** indicate that the session allows $ingres to drop/add constraint on ** tables owned by other users */ if (psq_cb->psq_flag & PSQ_INGRES_PRIV) sess_cb->pss_ses_flag |= PSS_INGRES_PRIV; if (psq_cb->psq_flag & PSQ_ROW_SEC_KEY) sess_cb->pss_ses_flag |= PSS_ROW_SEC_KEY; /* See if passwords, roles allowed */ if (psq_cb->psq_flag & PSQ_PASSWORD_NONE) sess_cb->pss_ses_flag |= PSS_PASSWORD_NONE; if (psq_cb->psq_flag & PSQ_ROLE_NONE) sess_cb->pss_ses_flag |= PSS_ROLE_NONE; if (psq_cb->psq_flag & PSQ_ROLE_NEED_PW) sess_cb->pss_ses_flag |= PSS_ROLE_NEED_PW; /* remember whether we are running UPGRADEDB */ if (psq_cb->psq_flag & PSQ_RUNNING_UPGRADEDB) sess_cb->pss_ses_flag |= PSS_RUNNING_UPGRADEDB; /* Pick up serverwide default for card check */ if (Psf_srvblk->psf_flags & PSF_NOCHK_SINGLETON_CARD) sess_cb->pss_ses_flag |= PSS_NOCHK_SINGLETON_CARD; /* Initialize pss_project. */ sess_cb->pss_ses_flag |= PSS_PROJECT; /* pss_project = TRUE */ /* init last statement */ sess_cb->pss_last_sname[0] = EOS; /* batch optimization switch starts undefined */ sess_cb->batch_copy_optim = PSS_BATCH_OPTIM_UNDEF; /* ** if starting a local session, determine whether the database is being ** journaled */ if (~psq_cb->psq_distrib & DB_3_DDB_SESS) { DMC_CB dmc_cb, *dmc = &dmc_cb; DMC_CHAR_ENTRY dmc_char; MEfill(sizeof(dmc_cb), (u_char) 0, (PTR) dmc); dmc->type = DMC_CONTROL_CB; dmc->length = sizeof(*dmc); dmc->dmc_op_type = DMC_DATABASE_OP; dmc->dmc_session_id = (PTR) sess_cb->pss_sessid; dmc->dmc_flags_mask = DMC_JOURNAL; dmc->dmc_char_array.data_address= (PTR) &dmc_char; dmc->dmc_char_array.data_out_size = sizeof(dmc_char); dmc->dmc_db_id = (char *) sess_cb->pss_dbid; status = dmf_call(DMC_SHOW, (PTR) dmc); if (DB_FAILURE_MACRO(status)) { (VOID) psf_error(E_PS020E_CANT_GET_DB_JOUR_STATUS, dmc->error.err_code, PSF_INTERR, &err_code, &psq_cb->psq_error, 0); return(status); } if (dmc_char.char_value == DMC_C_ON) { sess_cb->pss_ses_flag |= PSS_JOURNALED_DB; } } /* Save the storage structure for indexes */ sess_cb->pss_idxstruct = psq_cb->psq_idxstruct; /* Make session copy of parser compatability settings */ sess_cb->pss_parser_compat = psq_cb->psq_parser_compat; /* remember if NLs inside string constants need to be stripped */ if (psq_cb->psq_flag & PSQ_STRIP_NL_IN_STRCONST) sess_cb->pss_ses_flag |= PSS_STRIP_NL_IN_STRCONST; /* no rule tree yet */ sess_cb->pss_row_lvl_usr_rules = sess_cb->pss_row_lvl_sys_rules = sess_cb->pss_stmt_lvl_usr_rules = sess_cb->pss_stmt_lvl_sys_rules = sess_cb->pss_row_lvl_usr_before_rules = sess_cb->pss_row_lvl_sys_before_rules = sess_cb->pss_stmt_lvl_usr_before_rules = sess_cb->pss_stmt_lvl_sys_before_rules = (PST_STATEMENT *) NULL; if (psq_cb->psq_flag & PSQ_RULE_DEL_PREFETCH) sess_cb->pss_ses_flag |= PSS_RULE_DEL_PREFETCH; if(psq_cb->psq_flag2 & PSQ_RULE_UPD_PREFETCH) sess_cb->pss_ses_flag |= PSS_RULE_UPD_PREFETCH; /* copy user status flags to session control block */ sess_cb->pss_ustat = psq_cb->psq_ustat; /* ** Initialize lots of pointer to NULL because nothing is happening yet. */ sess_cb->pss_qbuf = sess_cb->pss_nxtchar = sess_cb->pss_prvtok = sess_cb->pss_bgnstmt = sess_cb->pss_endbuf = sess_cb->pss_prvgoval = (u_char *) NULL; /* initialize pss_audit */ sess_cb->pss_audit = NULL; for (i = 0; i < PSS_CURTABSIZE; i++) { sess_cb->pss_curstab.pss_curque[i] = (PSC_CURBLK *) NULL; } /* initialize prototype list for dynamic SQL */ sess_cb->pss_proto = (PST_PROTO *) NULL; /* ** pss_dependencies_stream, when not NULL, is expected to point at a valid ** stream descriptor. After closing the stream we always reset ** pss_dependencies_stream to NULL, but in some cases we may end up checking ** pss_dependencies_stream before ever opening (and closing it). As a ** result, you may end up using invalid address as a stream pointer. ** Initializing it here to NULL will ensure that it is non-NULL iff it ** points at a valid open stream descriptor. */ sess_cb->pss_dependencies_stream = (PSF_MSTREAM *) NULL; /* No trace flags set */ /* expect lint message */ ult_init_macro(&sess_cb->pss_trace, PSS_TBITS, PSS_TVALS, PSS_TVAO); /* Cursor id set to 0, no cursors open yet */ sess_cb->pss_crsid = 0; sess_cb->pss_create_compression = Psf_srvblk->psf_create_compression; /* SCF can pass a client requested result_structure, but if it ** doesn't, init from server default. */ if (psq_cb->psq_result_struct != 0) { sess_cb->pss_result_struct = psq_cb->psq_result_struct; sess_cb->pss_result_compression = psq_cb->psq_result_compression; } else { sess_cb->pss_result_struct = Psf_srvblk->psf_result_struct; sess_cb->pss_result_compression = Psf_srvblk->psf_result_compression; } if (psq_cb->psq_def_coll > DB_NOCOLLATION) sess_cb->pss_def_coll = psq_cb->psq_def_coll; else sess_cb->pss_def_coll = Psf_srvblk->psf_def_coll; if (psq_cb->psq_def_unicode_coll > DB_NOCOLLATION) sess_cb->pss_def_unicode_coll = psq_cb->psq_def_unicode_coll; else sess_cb->pss_def_unicode_coll = Psf_srvblk->psf_def_unicode_coll; return (E_DB_OK); }
VOID r_p_let() { i4 tok_type; DB_DATA_VALUE exp_type; i4 state = WANT_VAR; char *name; ITEM item; DB_DATA_VALUE *dbdv; LET *let; i4 status; DB_DATA_VALUE *con; while ((tok_type = r_g_eskip()) != TK_ENDSTRING) { switch(state) { case WANT_VAR: if ((tok_type == TK_ALPHA) || ((tok_type == TK_QUOTE) && (St_xns_given))) { name = r_g_ident(FALSE); _VOID_ IIUGdlm_ChkdlmBEobject(name,name,FALSE); status = r_p_tparam(name, FALSE, &item, &exp_type); } if (((tok_type != TK_ALPHA) && (tok_type != TK_QUOTE)) || ((tok_type == TK_QUOTE) && (!St_xns_given)) || (status == NO_EXP)) { r_error(0x3C, NONFATAL, Cact_tname, Cact_attribute, Cact_command, Cact_rtext, NULL); return; } dbdv = &(item.item_val.i_v_par->par_value); state = WANT_COLON; break; case WANT_COLON: if (tok_type == TK_COLON) { Tokchar++; state = WANT_EQUAL; break; } /* fall through */ case WANT_EQUAL: if (tok_type != TK_EQUALS) { r_error(0x3C, NONFATAL, Cact_tname, Cact_attribute, Cact_command, Cact_rtext, NULL); return; } Tokchar++; state = WANT_EXPR; break; case WANT_EXPR: status = r_g_expr(&item, &exp_type); switch (status) { case NO_EXP: r_error(0x3E, NONFATAL, Cact_tname, Cact_attribute, Tokchar, Cact_command, Cact_rtext, NULL); return; case BAD_EXP: return; case GOOD_EXP: if (exp_type.db_datatype == DB_BOO_TYPE) { IIUGerr(E_RW003D_r_p_let_No_boolean,UG_ERR_FATAL,0); } break; case NULL_EXP: if (!AFE_NULLABLE_MACRO(dbdv->db_datatype)) { r_error(0x41, NONFATAL, Cact_tname, Cact_attribute, Cact_command, Cact_rtext, NULL); return; } item.item_type = I_CON; con = (DB_DATA_VALUE *) MEreqmem(0,sizeof(DB_DATA_VALUE),TRUE, (STATUS *) NULL); item.item_val.i_v_con = con; con->db_datatype = dbdv->db_datatype; con->db_length = dbdv->db_length; con->db_prec = dbdv->db_prec; con->db_data = (PTR) MEreqmem(0,con->db_length,TRUE, (STATUS *) NULL); adc_getempty(Adf_scb, con); break; } Cact_tcmd->tcmd_code = P_LET; let = (LET *) MEreqmem(0,sizeof(LET),TRUE,(STATUS *) NULL); Cact_tcmd->tcmd_val.t_v_let = let; let->let_left = dbdv; MEcopy((PTR)&item, (u_i2)sizeof(ITEM), (PTR)&(let->let_right)); return; } } }
DB_STATUS qeu_15_views( QEF_RCB *i_qer_p, QEUQ_CB *i_quq_p, QEC_LINK *v_lnk_p) { DB_STATUS status; QES_DDB_SES *dds_p = & i_qer_p->qef_cb->qef_c2_ddb_ses; QED_DDL_INFO *ddl_p = v_lnk_p->qec_1_ddl_info_p; DD_LDB_DESC *cdb_p = & dds_p->qes_d4_ddb_p->dd_d3_cdb_info.dd_i1_ldb_desc; QEUQ_DDB_CB *ddu_p = & i_quq_p->qeuq_ddb_cb; QEF_DATA *data_p = i_quq_p->qeuq_qry_tup; QEC_L17_VIEWS views, *views_p = & views; QEQ_1CAN_QRY *ins_p = v_lnk_p->qec_22_insert_p; i4 seq; /* 1. set up constant values for all entries */ qed_u0_trimtail( ddl_p->qed_d1_obj_name, DB_OBJ_MAXNAME, views_p->l17_1_tab_name); qed_u0_trimtail( ddl_p->qed_d2_obj_owner, DB_OWN_MAXNAME, views_p->l17_2_tab_owner); if (ddu_p->qeu_1_lang == DB_SQL) views_p->l17_3_dml[0] = 'S'; else if (ddu_p->qeu_1_lang == DB_QUEL) views_p->l17_3_dml[0] = 'Q'; else { status = qed_u2_set_interr(E_QE0018_BAD_PARAM_IN_CB, & i_qer_p->error); return(status); } views_p->l17_3_dml[1] = EOS; if (ddu_p->qeu_2_view_chk_b) views_p->l17_4_chk_option[0] = 'Y'; else views_p->l17_4_chk_option[0] = 'N'; views_p->l17_4_chk_option[1] = EOS; for (seq = 0; seq < i_quq_p->qeuq_cq; seq++) { /* 2. fill in specific information for current entry */ views_p->l17_5_sequence = seq + 1; if (data_p->dt_size > QEK_256_VIEW_SIZE) { status = qed_u2_set_interr(E_QE0018_BAD_PARAM_IN_CB, & i_qer_p->error); return(status); } else { MEcopy((char *) data_p->dt_data, data_p->dt_size, views_p->l17_6_txt_seg); views_p->l17_6_txt_seg[data_p->dt_size] = EOS; views_p->l17_7_txt_size = data_p->dt_size; } data_p = data_p->dt_next; /* advance to next item */ /* 3. insert into IIDD_VIEWS */ ins_p->qeq_c1_can_id = INS_632_DD_VIEWS; ins_p->qeq_c3_ptr_u.l17_views_p = views_p; ins_p->qeq_c4_ldb_p = cdb_p; status = qel_i1_insert(i_qer_p, v_lnk_p); if (status) return(status); } return(E_DB_OK); }
/*{ ** Name: opa_obylist - replace variables in outer by inner ** ** Description: ** This procedure will attempt to replace the variables in the outer ** aggregate by using bylist attributes of the inner aggregate. ** ** Inputs: ** global global state variable ** inner inner function aggregate subquery ** whose bylist will be used to attempt ** to replace the outer aggregate variables ** outer outer aggregate subquery whose variables ** will possibly be replaced by the inner ** ** Outputs: ** Returns: ** VOID ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 15-apr-86 (seputis) ** initial creation ** 16-may-96 (inkdo01) ** Change 409554 has been backed out to fix bug 74793. It claimed to ** eliminate obsolete code (because of change 409457), but the code ** appears to have still been necessary for queries involving outer ** joins of aggregate views. ** 3-dec-02 (inkdo01) ** Changes for range table expansion. ** 23-nov-05 (inkdo01) ** Fix a bug in one of the more complex expressions that derived from ** the range table expansion. [@history_line@]... */ static VOID opa_obylist( OPS_STATE *global, OPS_SUBQUERY *inner, OPS_SUBQUERY *outer) { OPV_GBMVARS outermap; /* var map of outer aggregate */ opv_smap(outer); /* get variable map of outer aggregate */ MEcopy((char *)&outer->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm, sizeof(outermap), (char *)&outermap); BTor(OPV_MAXVAR, (char *)&outer->ops_root->pst_sym.pst_value.pst_s_root.pst_rvrm, (char *)&outermap); opv_smap(inner); /* get variable map of inner aggregate */ BTand(OPV_MAXVAR, (char *)&inner->ops_agg.opa_blmap, (char *)&outermap); if (BTcount((char *)&outermap, OPV_MAXVAR) == 0) /* if the outer aggregate and the inner aggregate do not have any ** variables in common then there can be no replacement so return. */ return; BTor(OPV_MAXVAR, (char *)&inner->ops_root->pst_sym.pst_value.pst_s_root.pst_tvrm, (char *)&outer->ops_aggmap); BTor(OPV_MAXVAR, (char *)&inner->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm, (char *)&outer->ops_aggmap); BTor(OPV_MAXVAR, (char *)&inner->ops_root->pst_sym.pst_value.pst_s_root.pst_rvrm, (char *)&outer->ops_aggmap); /* this set of variables could be substituted ** in the outer, so they are not to be ** assumed to be in the from list for ** a cartesean product as in the query ** "select r.a from r,s" */ { OPV_GBMVARS usedmap; /* var map of variables which were ** replaced by substituting the ** bylist attributes of the inner */ OPV_GBMVARS newmap; /* var map of the outer aggregate ** after the inner aggregate bylist ** used to substitute expressions ** in the outer */ OPV_GBMVARS tempmap; MEfill(sizeof(usedmap), 0, (char *)&usedmap); /* initialize var map */ opa_checkopt(global, inner->ops_agg.opa_byhead->pst_left, outer->ops_root, &usedmap, &newmap); /* this routine will return information ** on what the query tree would ** look like if the inner aggregate ** was substituted (without actually ** doing the substitution) */ MEcopy((char *)&newmap, sizeof(newmap), (char *)&tempmap); BTand(OPV_MAXVAR, (char *)&usedmap, (char *)&tempmap); /* This replaces the old (32 bit varmap) test of "usedmap && ** !(newmap & usedmap)". */ if (BTcount((char *)&usedmap, OPV_MAXVAR) != 0 && /* non-zero implies some optimizations ** were found */ BTcount((char *)&tempmap, OPV_MAXVAR) == 0) /* the substitution would eliminate ** those variables entirely */ { /* COMMIT THE CHANGES ** the usedmap is non-zero so some variable subtitutions were ** found. Moreover, the substitutions would entirely eliminate ** the variables since newmap is non-zero ** ** First making a copy of the bylist for the outer aggregate ** if it exists (and if it is not the main query). This ** is done to avoid the problem of optimizing away the links ** made by the outer aggregate. For example, in the query ** RETRIEVE SUPPLIERS WHO SUPPLY ALL PARTS ** ret( s.sname, s.s) where any(p.p by s.s where any(sp.tid ** by p.p,s.s where p.p=sp.p and s.s=sp.s)=0)=0 ** In this query the outer aggregate references only attributes ** in the BY list of the inner aggregate, and won't have the ** aggregate result linked to the main query ... if we did ** not make a copy of the bylist ... remember that the outer ** aggregate was linked to the main query by using the by list ** subtrees directly! ** FIXME - OPA_LINK will copy the bylists anyways so this copy ** is not needed */ OPV_IGVARS innervarno; /* var number of inner ** aggregate which will be ** referenced for substitution */ if (outer->ops_sqtype == OPS_MAIN) global->ops_gmask |= OPS_TCHECK; else if (outer->ops_agg.opa_byhead) /* outer aggregate has a by list */ { PST_QNODE *bylist; /* used to traverse the bylist */ /* traverse the bylist and copy the subtrees */ for ( bylist = outer->ops_agg.opa_byhead->pst_left; bylist && bylist->pst_sym.pst_type != PST_TREE; bylist = bylist->pst_left) opv_copytree( global, &bylist->pst_right ); } /* Traverse the tree and actually perform the substitutions instead ** of only checking for them */ innervarno = (*inner->ops_agg.opa_graft)->pst_sym.pst_value. pst_s_var.pst_vno; if (outer->ops_global->ops_qheader->pst_numjoins > 0) { /* make sure that all the outer joins semantics are ** the same for all the relations referenced, or else ** semantics are lost, i.e. cannot substitute if variables ** have different maps */ PST_J_MASK pinner; PST_J_MASK pouter; bool first_time; OPV_IGVARS gvar; PST_J_MASK *ijmaskp; PST_J_MASK *ojmaskp; OPL_PARSER *pinnerp; OPL_PARSER *pouterp; first_time = TRUE; pinnerp = outer->ops_oj.opl_pinner; pouterp = outer->ops_oj.opl_pouter; for (gvar = -1; (gvar = BTnext((i4)gvar, (char *)&usedmap, (i4)BITS_IN(usedmap)))>=0;) { if (first_time) { MEcopy((PTR)&pinnerp->opl_parser[gvar], sizeof(pinner), (PTR)&pinner); MEcopy((PTR)&pouterp->opl_parser[gvar], sizeof(pouter), (PTR)&pouter); } else { if (MEcmp((PTR)&pinnerp->opl_parser[gvar], (PTR)&pinner, sizeof(pinner)) || MEcmp((PTR)&pouterp->opl_parser[gvar], (PTR)&pouter, sizeof(pouter)) ) return; /* outer joins semantics of ** variables to be substituted are ** different, FIXME, try to substitute ** one variable instead of 2 */ } } /* copy the outer join semantics to the substituted variable */ ijmaskp = &pinnerp->opl_parser[innervarno]; ojmaskp = &pouterp->opl_parser[innervarno]; if ((BTnext((i4)-1, (char *)ijmaskp, (i4)BITS_IN(*ijmaskp)) >= 0) || (BTnext((i4)-1, (char *)ojmaskp, (i4)BITS_IN(*ojmaskp)) >= 0) ) opx_error(E_OP0288_OJAGG); /* should not already have an ** outer join defined on this aggregate ** in this query */ MEcopy((PTR)&pinner, sizeof(*ijmaskp), (PTR)ijmaskp); MEcopy((PTR)&pouter, sizeof(*ojmaskp), (PTR)ojmaskp); } outer->ops_vmflag = FALSE; /* bitmaps need to be updated if a ** substitution on the outer is made */ opa_commit(global, inner->ops_agg.opa_byhead->pst_left, &outer->ops_root, innervarno); /* this routine will traverse ** the tree in the same way as ** opa_checkopt except that ** substitutions will actually be made */ global->ops_gmask &= (~OPS_TCHECK); } } }
DB_STATUS qeu_16_tree( QEF_RCB *i_qer_p, QEUQ_CB *i_quq_p, QEC_LINK *v_lnk_p) { DB_STATUS status; QES_DDB_SES *dds_p = & i_qer_p->qef_cb->qef_c2_ddb_ses; /* QED_DDL_INFO *ddl_p = v_lnk_p->qec_1_ddl_info_p; */ DD_LDB_DESC *cdb_p = & dds_p->qes_d4_ddb_p->dd_d3_cdb_info.dd_i1_ldb_desc; QEC_D6_OBJECTS *objects_p = v_lnk_p->qec_13_objects_p; QEF_DATA *data_p = i_quq_p->qeuq_tre_tup; QEC_D10_TREE tree, *tree_p = & tree; QEQ_1CAN_QRY *ins_p = v_lnk_p->qec_22_insert_p; i4 seq; /* 1. set up constant values for all entries */ tree_p->d10_1_treetabbase = objects_p->d6_3_obj_id.db_tab_base; tree_p->d10_2_treetabidx = objects_p->d6_3_obj_id.db_tab_index; tree_p->d10_3_treeid1 = (i4) objects_p->d6_4_qry_id.db_qry_high_time; tree_p->d10_4_treeid2 = (i4) objects_p->d6_4_qry_id.db_qry_low_time; tree_p->d10_6_treemode = DB_VIEW; tree_p->d10_7_treevers = DD_0TV_UNKNOWN; #ifdef DDB_1TV_VAX tree_p->d10_7_treevers = DD_1TV_VAX; /* VAX binary representation */ #endif for (seq = 0; seq < i_quq_p->qeuq_ct; seq++) { /* 2. fill in specific information for current entry */ tree_p->d10_5_treeseq = seq; /* starts from 0 */ if (data_p->dt_size > QEK_1024_TREE_SIZE + 2) /* include 2-byte length */ { status = qed_u2_set_interr(E_QE0018_BAD_PARAM_IN_CB, & i_qer_p->error); return(status); } else { MEcopy((char *) data_p->dt_data, data_p->dt_size, tree_p->d10_8_treetree); tree_p->d10_8_treetree[data_p->dt_size] = EOS; tree_p->d10_9_treesize = data_p->dt_size; } data_p = data_p->dt_next; /* advance to next item */ /* 3. insert into IIDD_DDB_TREE */ ins_p->qeq_c1_can_id = INS_619_DD_DDB_TREE; ins_p->qeq_c3_ptr_u.d10_tree_p = tree_p; ins_p->qeq_c4_ldb_p = cdb_p; status = qel_i1_insert(i_qer_p, v_lnk_p); if (status) return(status); } return(E_DB_OK); }
/*{ ** Name: psy_print - Format query text to send to user ** ** Description: ** This function decodes query text that comes from the iiqrytext relation ** and formats it to send to the user. It is useful for the "help permit", ** "help integrity", and "help view" commands. ** ** Query text stored in the iiqrytext relation consists of human-readable ** text and special symbols. Some of these special symbols are numbers ** and strings sent from EQUEL or ESQL programs. Others stand for table ** names, range variables, and column numbers. This functio decodes all ** this stuff, and puts the result into a chain of PSY_QTEXT blocks to be ** sent back to the user. ** ** Inputs: ** mstream Memory stream to allocate blocks from ** map map of range var numbers to those ** in rngtab. ** block Current query text block ** text Pointer to the query text ** length Length of the query text ** rngtab The range table (used for decoding the ** special symbol standing for range ** variable numbers, and column numbers). ** The result range variable should stand ** for the table we're getting help on. ** err_blk Filled in if an error happens ** ** Outputs: ** err_blk Filled in if an error happened ** Returns: ** E_DB_OK Success ** E_DB_ERROR Non-catastrophic failure ** E_DB_FATAL Catastrophic failure ** Exceptions: ** none ** ** Side Effects: ** Sends query text to user ** ** History: ** 15-jul-86 (jeff) ** written ** 14-jul-93 (ed) ** replacing <dbms.h> by <gl.h> <sl.h> <iicommon.h> <dbdbms.h> */ DB_STATUS psy_print( PSF_MSTREAM *mstream, i4 map[], PSY_QTEXT **block, u_char *text, i4 length, PSS_USRRANGE *rngtab, DB_ERROR *err_blk) { char buf[1024 + DB_TAB_MAXNAME]; i4 i4align; i2 i2align; f8 f8align; register u_char *p; register i4 j; register i4 i; PSS_RNGTAB *lastvar = (PSS_RNGTAB *) NULL; DB_STATUS status; i4 slength; /* Put out range statements */ for (i = 0; i < PST_NUMVARS; i++) { /* Only look at range vars that are being used */ if (!rngtab->pss_rngtab[i].pss_used || rngtab->pss_rngtab[i].pss_rgno < 0) { continue; } status = psy_put(mstream, (char*) "range of ", (i4) sizeof("range of ") - 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* NULL terminate and trim blanks from range variable name */ MEcopy(rngtab->pss_rngtab[i].pss_rgname, DB_TAB_MAXNAME, buf); buf[DB_TAB_MAXNAME] = '\0'; slength = STtrmwhite(buf); status = psy_put(mstream, buf, slength, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); status = psy_put(mstream, " is ", (i4) sizeof(" is ") - 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* NULL terminate and trim blanks from table name */ MEcopy((char *) &rngtab->pss_rngtab[i].pss_tabname, DB_TAB_MAXNAME, buf); buf[DB_TAB_MAXNAME] = '\0'; slength = STtrmwhite(buf); status = psy_put(mstream, buf, slength, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Newline after every range statement */ status = psy_put(mstream, (char*) "\n", (i4) 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } for (p = text; p < (u_char*) text + length;) { switch (*p++) { case PSQ_HVSTR: /* String sent from user program */ slength = STlength((char*) p); /* Emit opening quote */ status = psy_put(mstream, (char *) "\"", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); for (j = slength; j > 0; j--, p++) { if (*p == '"') { /* Escape any quote characters */ status = psy_put(mstream, "\\\"", 2, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } else if (!CMprint(p)) { /* Non-printing characters show up as escape sequence */ STprintf(buf, "\\%o", *p); status = psy_put(mstream, buf, (i4) STlength(buf), block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } else { status = psy_put(mstream, (char*) p, 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } } break; case PSQ_HVF8: /* f8 sent from user program */ MEcopy((char *) p, sizeof(f8align), (char *) &f8align); STprintf(buf, "%f", f8align); status = psy_put(mstream, buf, (i4) STlength(buf), block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); p += sizeof(f8align); break; case PSQ_HVI4: /* i4 sent from user program */ MEcopy((char *) p, sizeof(i4align), (char *) &i4align); CVla(i4align, buf); status = psy_put(mstream, buf, (i4) STlength(buf), block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); p += sizeof(i4align); break; case PSQ_HVI2: /* i2 sent from user program */ MEcopy((char *) p, sizeof(i2align), (char *) &i2align); CVla((i4) i2align, buf); status = psy_put(mstream, buf, (i4) STlength(buf), block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); p += sizeof(i2align); break; case DB_INTRO_CHAR: /* Intro char for special sequence */ switch (*p++) { case DB_RNG_VAR: /* Put a blank before the table name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Get the range variable number */ MEcopy((char *) p, sizeof(i4align), (char *) &i4align); p += sizeof(i4align); i4align = map[i4align]; /* Look up range variable number */ for (j = 0; j < PST_NUMVARS; j++) { if (rngtab->pss_rngtab[j].pss_used && rngtab->pss_rngtab[j].pss_rgno == i4align) { break; } } /* If found, give variable name, otherwise question marks */ if (j < PST_NUMVARS) { /* trim trailing blanks and NULL terminate */ MEcopy(rngtab->pss_rngtab[j].pss_rgname, DB_TAB_MAXNAME, buf); buf[DB_TAB_MAXNAME] = '\0'; slength = STtrmwhite(buf); status = psy_put(mstream, buf, slength, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Remember the range variable for when we get the colnum */ lastvar = &rngtab->pss_rngtab[j]; } else { lastvar = (PSS_RNGTAB*) 0; /* Put question marks if not found */ status = psy_put(mstream, "???", 3, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } break; case DB_COL_NUM: /* Column number for a range var */ /* Get the column number */ MEcopy((char *) p, sizeof(i4align), (char *) &i4align); p += sizeof(i4align); /* If there was no range variable, put question marks */ if (lastvar != (PSS_RNGTAB *) NULL) { status = psy_put(mstream, lastvar->pss_attdesc[i4align]->att_nmstr, lastvar->pss_attdesc[i4align]->att_nmlen, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } else { /* Don't know column name, just give question marks */ status = psy_put(mstream, "???", 3, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); } /* Put a blank after the column name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); break; case DB_TBL_NM: /* The table name is in the result slot of the range table */ /* Put a blank before the table name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* NULL terminate and trim blanks from the table name */ MEcopy((char *) &rngtab->pss_rsrng.pss_tabname, sizeof(DB_TAB_NAME), buf); buf[DB_TAB_MAXNAME] = '\0'; slength = STtrmwhite(buf); status = psy_put(mstream, buf, slength, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Put a blank after the table name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); break; case DB_RES_COL: /* Result column: column in result table */ /* Put a blank before the column name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Get the column number for the result column */ MEcopy((char *) p, sizeof(i4align), (char *) &i4align); p += sizeof(i4align); /* Get the column name from the result range variable */ lastvar = &rngtab->pss_rsrng; status = psy_put(mstream, lastvar->pss_attdesc[i4align]->att_nmstr, lastvar->pss_attdesc[i4align]->att_nmlen, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); /* Put a blank after the column name */ status = psy_put(mstream, " ", 1, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); break; default: /* Unknown special sequence: just put out as it came in */ status = psy_put(mstream, (char*) p - 1, 2, block, err_blk); if (DB_FAILURE_MACRO(status)) return (status); break; } break; default: /* No special case, just put the char */ status = psy_put(mstream, (char*) p - 1, 1, block, err_blk); break; } } /* Put a newline just after the statement */ status = psy_put(mstream, "\n", 1, block, err_blk); return (status); }
/*{ ** Name: opv_smap - map query tree associated with subquery ** ** Description: ** This routine will map the query tree associated with the subquery. A ** subquery is typically associated with a PST_AGHEAD, or PST_ROOT node. ** A flag is check to see if the variable map was invalidated by any ** previous substitution. For now this will be a consistency check ** and the tree will be mapped anyways. FIXME later change this to ** avoid mapping the tree if there has been no substitutions. ** ** Inputs: ** subquery ptr to subquery which will be mapped ** ** Outputs: ** subquery->ops_root this PST_RT_NODE will have the bitmaps ** updated ** Returns: ** VOID ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 3-jul-86 (seputis) ** initial creation ** 22-apr-90 (seputis) ** fix rohm-haas bug (no bug number), aggregate on a select distinct view ** 24-jun-91 (seputis) ** turn off consistency check to avoid traversal of parse tree ** 5-dec-02 (inkdo01) ** Changes for range table expansion. ** 31-Aug-2006 (kschendel) ** Watch for HFAGG as well as RFAGG. */ VOID opv_smap( OPS_SUBQUERY *subquery) { OPV_GBMVARS map; /* range var map of query tree fragment */ if (subquery->ops_vmflag) /* TRUE if varmap is up-to-date */ { #ifdef E_OP0388_VARBITMAP /* check if this var map is valid as claimed */ #ifdef xDEBUG MEfill(sizeof(map), 0, (char *)&map); opv_mapvar(subquery->ops_root->pst_left, &map); if (MEcmp((char *)&map, (char *)&subquery->ops_root->pst_sym.pst_value. pst_s_root.pst_lvrm, sizeof(map)) != 0) opx_error( E_OP0388_VARBITMAP); /* bit map ** inconsistent with left side */ MEfill(sizeof(map), 0, (char *)&map); opv_mapvar(subquery->ops_root->pst_right, &map); if (MEcmp((char *)&map, (char *)&subquery->ops_root->pst_sym.pst_value. pst_s_root.pst_rvrm, sizeof(map)) != 0) opx_error( E_OP0388_VARBITMAP); /* bit map ** inconsistent with right side */ #endif return; #endif } if ((subquery->ops_sqtype == OPS_FAGG) || (subquery->ops_sqtype == OPS_HFAGG) || (subquery->ops_sqtype == OPS_RFAGG) ) { /* map the bylist for the function aggregate */ MEfill(sizeof(map), 0, (char *)&map); opv_mapvar(subquery->ops_agg.opa_byhead->pst_left, &map); /* map ** the bylist portion of the function ** aggregate */ MEcopy((char *)&map, sizeof(map), (char *)&subquery->ops_agg.opa_blmap); if (subquery->ops_root->pst_left == subquery->ops_agg.opa_byhead) { OPV_GBMVARS aopmap; /* map of AOP operator */ MEfill(sizeof(aopmap), 0, (char *)&aopmap); opv_mapvar(subquery->ops_agg.opa_aop, &aopmap); /* map the AOP node ** of the function aggregate */ MEcopy((char *)&map, sizeof(map), (char *)&subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm); BTor(OPV_MAXVAR, (char *)&aopmap, (char *)&subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm); } else { /* non-printing resdoms exist above the byhead so ** the entire tree needs to be scanned */ MEfill(sizeof(PST_J_MASK), 0, (char *)&subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm); opv_mapvar(subquery->ops_root->pst_left, &subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm ); /* map the AOP node ** of the function aggregate */ } } else { /* map the left side of the tree */ MEfill(sizeof(map), 0, (char *)&map); opv_mapvar(subquery->ops_root->pst_left, &map); MEcopy((char *)&map, sizeof(map), (char *)&subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_lvrm); } /* map the right side of the tree */ MEfill(sizeof(map), 0, (char *)&map); opv_mapvar(subquery->ops_root->pst_right, &map); MEcopy((char *)&map, sizeof(map), (char *)&subquery->ops_root->pst_sym.pst_value.pst_s_root.pst_rvrm); subquery->ops_vmflag = TRUE; /* bit maps are now valid */ }
STATUS ERsend(i4 flag, char *message, i4 msg_length, CL_ERR_DESC *err_code) { # ifdef NT_GENERIC static bool er_init = FALSE; static bool is_w95 = FALSE; # else /* !NT_GENERIC */ static int er_ifi = -2; static int ar_ifi = -2; # endif /* !NT_GENERIC */ STATUS status; char tmp_buf[ER_MAX_LEN]; char* logmsg = message; /* Check for bad paramters. */ CL_CLEAR_ERR( err_code ); if ((message == 0 || msg_length == 0) && flag != ER_AUDIT_MSG) return (ER_BADPARAM); if ((flag != ER_ERROR_MSG) && (flag != ER_AUDIT_MSG) && ( flag != ER_OPER_MSG)) return (ER_BADPARAM); # ifndef NT_GENERIC if (flag & ER_AUDIT_MSG) { key_t msg_key; char *ipc_number; struct { long mtype; char mtext[ER_MAX_LEN]; } msg; if (ar_ifi == -2) { NMgtAt("II_AUDIT_IPC", &ipc_number); if (ipc_number && ipc_number[0]) { CVal(ipc_number, &msg_key); ar_ifi = msgget(msg_key, 0); if (ar_ifi == -1) { SETCLERR(err_code, 0, ER_open); return(ER_NO_AUDIT); } } else { SETCLERR(err_code, 0, ER_open); return(ER_NO_AUDIT); } } /* Handle special case to connect only but not send message. */ if (msg_length == 0 && message == 0) return (OK); MEcopy(message, msg_length, msg.mtext); msg.mtype = 1; if (msgsnd(ar_ifi, &msg, msg_length, 0)) { SETCLERR(err_code, 0, ER_open); return(ER_BADSEND); } return (OK); } else # endif /* ! NT_GENERIC */ if (flag & ER_OPER_MSG) { char hostname[GL_MAXNAME]; STATUS status; message[msg_length] = EOS; TRdisplay("ER Operator:\"%s\"\n",message); if (!ERsysinit) ERinitsyslog(); # ifdef NT_GENERIC { wchar_t *wmessage = NULL; /* ** Update the ReportEvent to report information in the event log. */ if ( ReportEvent( EventLog, (WORD) EVENTLOG_INFORMATION_TYPE, (WORD) 0, /* event category */ (DWORD) I_ING_INFO, /* event identifier */ (PSID) NULL, (WORD) 1, /* number of strings */ (DWORD) 0, &message, NULL ) == FALSE) status = GetLastError(); if ( !er_init ) { char VersionString[256]; FUNC_EXTERN BOOL GVosvers(char *OSVersionString); GVosvers(VersionString); is_w95 = ( STstrindex(VersionString, "Microsoft Windows 9", 0, FALSE) != NULL ) ? TRUE : FALSE; if ( !is_w95 ) /* netapi32 only on NT */ { HANDLE hDll; if ((hDll = LoadLibrary(TEXT("netapi32.dll"))) != NULL) { pNetMessageNameAdd = (NET_API_STATUS (*)(LPCWSTR,LPCWSTR)) GetProcAddress(hDll, TEXT("NetMessageNameAdd")); pNetMessageNameDel = (NET_API_STATUS (*)(LPCWSTR,LPCWSTR)) GetProcAddress(hDll, TEXT("NetMessageNameDel")); pNetMessageBufferSend = (NET_API_STATUS (*)(LPCWSTR,LPCWSTR,LPCWSTR,LPBYTE,DWORD)) GetProcAddress(hDll, TEXT("NetMessageBufferSend")); } /* if any problem, pretend we don't support it */ if ( pNetMessageNameAdd == NULL || pNetMessageNameDel == NULL || pNetMessageBufferSend == NULL ) is_w95 = TRUE; } } if ( !is_w95 ) { /* ** Now, send the message to the server console, ** putting up a message box (if the messenger service ** is running. Everything must be in Unicode. */ if ( whostname[0] == 0 ) { unsigned int len = sizeof(hostname); /* ** get the hostname in Unicode format for use ** by messenger service */ GetComputerName( (char *)hostname, &len ); MultiByteToWideChar( GetACP(), 0, hostname, sizeof(hostname), whostname, sizeof(whostname) ); } /* initialize the messenger service */ status = (*pNetMessageNameAdd)( whostname, msgname ); if ( status != NERR_Success ) status = GetLastError(); /* Allocate a buffer for the Unicode */ wmessage = (wchar_t *) MEreqmem( 0, msg_length * sizeof(wchar_t), TRUE, &status ); if ( wmessage ) { /* copy the message to the Unicode buffer */ MultiByteToWideChar( GetACP(), 0, message, msg_length, wmessage, msg_length * sizeof(wchar_t) ); status = (*pNetMessageBufferSend)( whostname, msgname, NULL, (LPBYTE) wmessage, msg_length*sizeof(wchar_t) ); if ( status != NERR_Success ) status = GetLastError(); MEfree( (PTR)wmessage ); } /* re-initialize the messenger service */ status = (*pNetMessageNameDel)( whostname, msgname ); if ( status != NERR_Success ) status = GetLastError(); } } # elif defined(OS_THREADS_USED) && defined(any_aix) syslog_r( LOG_ALERT|LOG_ERR, message ); # else syslog( LOG_ALERT|LOG_ERR, message ); # endif /* NT_GENERIC */ } if (flag & ER_OPER_MSG) { i4 msglen = 0; char* host = PMhost(); MEfill( ER_MAX_LEN, 0, tmp_buf ); /* ** Format the message string for the event log. As the source is ** not known a fixed string of INGSYSLOG is used. */ TRformat( NULL, 0, tmp_buf, ER_MAX_LEN - 1, "%8.8t::[INGSYSLOG , 00000000]: %@ ", STlength(host), host ); msglen = STlength(tmp_buf); STcat( tmp_buf, message ); /* append original message */ msg_length += msglen; logmsg = tmp_buf; } status = ERlog( logmsg, msg_length, err_code ); return( status ); }
/*{ ** Name: gwf_init - initialize the gateway facility ** ** Description: ** This function performs general gateway initialization. Facility global ** structures are allocated and initialized. ** ** A ULM memory stream is set up for GWF for allocating the various GWF ** data structures. ** ** Gateway initialization exits are called to initialize the gateway. The ** identity of the initialization exits are obtained from Gwf_itab. ** ** Inputs: ** gw_rcb-> Standard GWF control block ** gwr_dmf_cptr The address of function "dmf_call()", so that ** we can call back to DMF for, e.g., extended ** catalog access. ** ** Output: ** gw_rcb-> Standard GWF control block ** gwr_out_vdata1 Release id of Gateway. ** gwr_scfcb_size size of CB for SCF to allocate per session. ** gwr_server set to the GwF_facility, for SCF to know. ** error-> ** err_code One of the following error numbers. ** E_GW0200_GWF_INIT_ERROR ** E_GW0600_NO_MEM ** ** Returns: ** E_DB_OK Function completed normally. ** E_DB_ERROR Cannot allocate Gwf_facility. ** E_DB_WARN Success, informational status sent back to DMF ** in the error.err_code field (either that there ** is no gateway initialized, or that none of the ** gateways needs transaction notification). ** ** History: ** 21-Apr-1989 (alexh) ** Created. ** 14-Dec-1989 (linda) ** Extended catalog table names were being filled in incorrectly; see ** comments below. ** 23-dec-89 (paul) ** Changed memory allocation strategy for the gateway. See comments ** embedded in code. ** 26-mar-90 (linda) ** Changed error handling. Changed to have one return point. ** 5-apr-90 (bryanp) ** Added improved calculation of GWF memory pool. Pool size is now ** scaled by number of users. ** 9-apr-90 (bryanp) ** This function is now called via gwf_call(), and takes a gw_rcb. ** 18-apr-90 (bryanp) ** If SCF says not enough memory, return proper error code. ** 27-sep-90 (linda) ** Set up pointer to tidp tuple for extended attribute tables, in ** support of gateway secondary indexes. ** 5-dec-90 (linda) ** Initialize the tcb semaphore. We were using it for locking the tcb ** list -- but it hadn't been initialized so locking was not working. ** 4-nov-91 (rickh) ** Return release identifier string at server initialization time. ** SCF spits up this string when poked with ** "select dbmsinfo( '_version' )" ** 7-oct-92 (daveb) ** fill in gwr_scfcb_size and gwr_server at init time so SCF ** can treat us as a first class citizen and make the session ** init calls. Prototyped. ** 23-Oct-1992 (daveb) ** name semaphore. ** 21-sep-92 (schang) ** initialize individual gateway specific server wide memory pointer ** 05-mar-97 (toumi01) ** initialize the global trace flags array ** 24-jul-97 (stial01) ** gwf_init() Set gwx_rcb.xrcb_gchdr_size before calling gateway init. */ DB_STATUS gwf_init( GW_RCB *gw_rcb ) { i4 i; SCF_CB scf_cb; DB_STATUS status; STATUS cl_status; /* zero out the release id descriptor */ MEfill(sizeof( DM_DATA ), 0, (PTR)&gw_rcb->gwr_out_vdata1 ); for (;;) /* Something to break out of... */ { /* allocate Gwf_facility */ scf_cb.scf_type = SCF_CB_TYPE; scf_cb.scf_length = sizeof(SCF_CB); scf_cb.scf_session = DB_NOSESSION; scf_cb.scf_facility = DB_GWF_ID; scf_cb.scf_scm.scm_functions = 0; scf_cb.scf_scm.scm_in_pages = (sizeof(GW_FACILITY)/SCU_MPAGESIZE+1); if ((status = scf_call(SCU_MALLOC, &scf_cb)) != E_DB_OK) { gwf_error(scf_cb.scf_error.err_code, GWF_INTERR, 0); gwf_error(E_GW0300_SCU_MALLOC_ERROR, GWF_INTERR, 1, sizeof(scf_cb.scf_scm.scm_in_pages), &scf_cb.scf_scm.scm_in_pages); switch (scf_cb.scf_error.err_code) { case E_SC0004_NO_MORE_MEMORY: case E_SC0005_LESS_THAN_REQUESTED: case E_SC0107_BAD_SIZE_EXPAND: gw_rcb->gwr_error.err_code = E_GW0600_NO_MEM; break; default: gw_rcb->gwr_error.err_code = E_GW0200_GWF_INIT_ERROR; break; } break; } Gwf_facility = (GW_FACILITY *)scf_cb.scf_scm.scm_addr; Gwf_facility->gwf_tcb_list = NULL; cl_status = CSw_semaphore(&Gwf_facility->gwf_tcb_lock, CS_SEM_SINGLE, "GWF TCB sem" ); if (cl_status != OK) { gwf_error(cl_status, GWF_INTERR, 0); gw_rcb->gwr_error.err_code = E_GW0200_GWF_INIT_ERROR; status = E_DB_ERROR; break; } /* ** Initialize memory allocation scheme for GWF. We have the following ** memory allocation scheme. ** ** 1. TCB ** Allocated directly by SCF. Allocation and deallocation ** is controlled directly by GWF. It loks like TCB's are ** held until they are no longer valid (due to a DROP or ** REGISTER INDEX) or until the server shuts down. It's ** not clear this is the best allocation strategy. ** ** 2. SCB ** The session control block is allocated within its own ** ULM memory stream. Since there is no other information ** that lives for the entire session, this is the only ** information handled by this memory stream. The stream ** id is stored in the SCB. ** ** 3. RSB ** The record control blocks containing information for a ** particular access to a gateway table are allocated from ** a separate stream initialized at the time the table is ** "opened" for access and deleted at the time the table ** is "closed". The stream id is stored in the RSB. ** ** 4. Temporary Memory ** Memory needed for a single operation such as ** registering a table is allocated from a ULF memory ** stream. Such srteams must be opened and closed within a ** single invocation of the GWF. ** ** At this time we initialize the pool from which ULM streams will be ** allocated. */ Gwf_facility->gwf_ulm_rcb.ulm_facility = DB_GWF_ID; Gwf_facility->gwf_ulm_rcb.ulm_blocksize = SCU_MPAGESIZE; Gwf_facility->gwf_ulm_rcb.ulm_sizepool = gwf_def_pool_size(); status = ulm_startup(&Gwf_facility->gwf_ulm_rcb); if (status != E_DB_OK) { gwf_error(Gwf_facility->gwf_ulm_rcb.ulm_error.err_code, GWF_INTERR, 0); gwf_error(E_GW0310_ULM_STARTUP_ERROR, GWF_INTERR, 1, sizeof(Gwf_facility->gwf_ulm_rcb.ulm_sizepool), &Gwf_facility->gwf_ulm_rcb.ulm_sizepool); if (Gwf_facility->gwf_ulm_rcb.ulm_error.err_code == E_UL0005_NOMEM) gw_rcb->gwr_error.err_code = E_GW0600_NO_MEM; else gw_rcb->gwr_error.err_code = E_GW0200_GWF_INIT_ERROR; break; } Gwf_facility->gwf_gw_active = 0; /* assume no gateways. */ Gwf_facility->gwf_gw_xacts = 0; /* and no transaction handling */ /* initialize the global trace flags array */ MEfill(sizeof(Gwf_facility->gwf_trace), 0, (PTR)Gwf_facility->gwf_trace); /* initialize each gateway's exit vector */ for (i=0; i < GW_GW_COUNT; ++i) { GWX_RCB gwx_rcb; gwx_rcb.xrcb_gwf_version = GWX_VERSION; gwx_rcb.xrcb_exit_table = (GWX_VECTOR *)&Gwf_facility->gwf_gw_info[i].gwf_gw_exits[0]; gwx_rcb.xrcb_dmf_cptr = gw_rcb->gwr_dmf_cptr; gwx_rcb.xrcb_gca_cb = gw_rcb->gwr_gca_cb; /* ** schang: init new field xrcb_xhandle, this field passes ** individual gateway specific, server wide memory ** pointer (sep-21-1992) ** initialize xrcb_xbitset (aug-12-93) */ gwx_rcb.xrcb_xhandle = NULL; gwx_rcb.xrcb_xbitset = 0; MEfill(sizeof( DM_DATA ), 0, (PTR)&gwx_rcb.xrcb_var_data1 ); /* refer to Gwf_itab to decide which initializations are required */ if (Gwf_itab[i] == NULL) { Gwf_facility->gwf_gw_info[i].gwf_gw_exist = 0; } else if ((status = (*Gwf_itab[i])(&gwx_rcb)) == E_DB_OK) { /* schang : new memory pointer initialized */ Gwf_facility->gwf_gw_info[i].gwf_xhandle = gwx_rcb.xrcb_xhandle; Gwf_facility->gwf_gw_info[i].gwf_xbitset = gwx_rcb.xrcb_xbitset; Gwf_facility->gwf_gw_info[i].gwf_rsb_sz = gwx_rcb.xrcb_exit_cb_size; Gwf_facility->gwf_gw_info[i].gwf_xrel_sz = gwx_rcb.xrcb_xrelation_sz; Gwf_facility->gwf_gw_info[i].gwf_xatt_sz = gwx_rcb.xrcb_xattribute_sz; Gwf_facility->gwf_gw_info[i].gwf_xidx_sz = gwx_rcb.xrcb_xindex_sz; Gwf_facility->gwf_gw_info[i].gwf_gw_exist = 1; /* initialize extended catalog names */ STprintf((char *)&Gwf_facility->gwf_gw_info[i].gwf_xrel_tab_name, "iigw%02d_relation", i); STprintf((char *)&Gwf_facility->gwf_gw_info[i].gwf_xatt_tab_name, "iigw%02d_attribute", i); STprintf((char *)&Gwf_facility->gwf_gw_info[i].gwf_xidx_tab_name, "iigw%02d_index", i); /* pass the release identifier up to SCF */ if ( gwx_rcb.xrcb_var_data1.data_address != 0 ) { MEcopy( (PTR)&gwx_rcb.xrcb_var_data1, sizeof( DM_DATA ), (PTR)&gw_rcb->gwr_out_vdata1 ); } /* ** Now set up pointer to tidp tuple for this gateway's extended ** attribute catalog, to support gateway secondary indexes. */ Gwf_facility->gwf_gw_info[i].gwf_xatt_tidp = gwx_rcb.xrcb_xatt_tidp; /* ** Note, if >1 gateway is initialized, then if any gateway needs ** transaction notification, DMF will always notify. Also note, ** we check error.err_code here even though status is E_DB_OK. ** Not great, but I can't think of a better way... */ if (gwx_rcb.xrcb_error.err_code == E_GW0500_GW_TRANSACTIONS) Gwf_facility->gwf_gw_xacts = 1; Gwf_facility->gwf_gw_active = 1; } else /* status != E_DB_OK */ { gwf_error(gwx_rcb.xrcb_error.err_code, GWF_INTERR, 0); gw_rcb->gwr_error.err_code = E_GW0200_GWF_INIT_ERROR; break; } } gw_rcb->gwr_scfcb_size = sizeof(GW_SESSION); gw_rcb->gwr_server = (PTR)Gwf_facility; if (status != E_DB_OK) break; /* gateway exit failed */ /* ** Now that we're ready to go, assign global Dmf_cptr its value (== the ** address of function dmf_call()). We need to do this to remove ** explicit calls to the DMF facility, resolving circular references of ** shareable libraries when building. */ Dmf_cptr = gw_rcb->gwr_dmf_cptr; break; } if (status != E_DB_OK) { return(status); } else { gw_rcb->gwr_error.err_code = E_DB_OK; return(E_DB_OK); } }
/* ** Name: sc0a_qrytext(), write query text in one or more segments ** to the audit log. ** ** Description: ** This routine is an interface to SXF to write audit records ** from SCF. ** ** Inputs: ** scb SCB ** ** qrytext Query text to be written ** ** qrylen Length of qrytext in bytes ** ** ops Operations (start/end/more) ** ** Outputs: ** Returns: ** E_DB_OK ** E_DB_ERROR ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 6-jul-93 (robf) ** Created. ** 23-dec-93 (robf) ** Changed QUERY_TEXT to QUERYTEXT for consistency with ** other messages. */ DB_STATUS sc0a_qrytext( SCD_SCB *scb, char *qrytext, i4 qrylen, i4 ops ) { SXF_RCB sxf_rcb; DB_STATUS status=E_DB_OK; DB_STATUS (*sxfptr)(); SCS_QTACB *qtacb = &scb->scb_sscb.sscb_qtacb; char *tptr; char *bufptr; DB_ERROR err_code; CL_SYS_ERR syserr; i4 local_error; if (!(Sc_main_cb->sc_capabilities & SC_C_C2SECURE)) { /* ** This is not a C2 server so can just return */ return E_DB_OK; } if (!(scb->scb_sscb.sscb_facility & (1 << DB_SXF_ID))) { /* ** SXF not initialized yet so quietly return, */ return E_DB_OK; } if (!(scb->scb_sscb.sscb_ics.ics_rustat&DU_UAUDIT_QRYTEXT)) { /* ** Session doesn't have query text auditing so don't audit */ return E_DB_OK; } /* ** If starting reset the audit sequence */ if(ops& SC0A_QT_START) { qtacb->qta_seq=1; qtacb->qta_len=0; } /* ** If a new record is wanted flush the current one if any */ if((ops&SC0A_QT_NEWREC) && qtacb->qta_len>0) { status=write_audit(scb, SXF_E_QRYTEXT, SXF_A_QRYTEXT|SXF_A_SUCCESS, "QUERYTEXT", sizeof("QUERYTEXT")-1, scb->scb_sscb.sscb_ics.ics_eusername, I_SX2729_QUERY_TEXT, FALSE, qtacb->qta_buff, qtacb->qta_len, qtacb->qta_seq, 0, &err_code ); if(status!=E_DB_OK) { _VOID_ uleFormat(&err_code, 0, &syserr, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); _VOID_ uleFormat(NULL, E_SC023F_QRYTEXT_WRITE, NULL, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); } /* ** And reset length for new record, increment sequence count */ qtacb->qta_len=0; qtacb->qta_seq++; } /* ** Build the text into the audit buffer */ tptr=qrytext; bufptr=qtacb->qta_buff+qtacb->qta_len; while(qrylen>0) { /* ** Fill the current buffer */ if(qtacb->qta_len<SCS_QTA_BUFSIZE) { u_i2 copy_bytes; if(qtacb->qta_len+qrylen>SCS_QTA_BUFSIZE) copy_bytes=SCS_QTA_BUFSIZE-qtacb->qta_len; else copy_bytes=qrylen; /* ** Put the data in the buffer */ MEcopy(tptr,copy_bytes,bufptr); qrylen-=copy_bytes; qtacb->qta_len+=copy_bytes; bufptr+=copy_bytes; tptr+=copy_bytes; } /* ** Check if buffer is full */ if(qtacb->qta_len==SCS_QTA_BUFSIZE) { /* ** Buffer is full */ status=write_audit(scb, SXF_E_QRYTEXT, SXF_A_QRYTEXT|SXF_A_SUCCESS, "QUERYTEXT", sizeof("QUERYTEXT")-1, scb->scb_sscb.sscb_ics.ics_eusername, I_SX2729_QUERY_TEXT, FALSE, qtacb->qta_buff, qtacb->qta_len, qtacb->qta_seq, 0, &err_code ); if(status!=E_DB_OK) { _VOID_ uleFormat(&err_code, 0, &syserr, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); _VOID_ uleFormat(NULL, E_SC023F_QRYTEXT_WRITE, NULL, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); break; } /* ** Reset for next chunk */ qtacb->qta_seq++; /* Next sequence */ qtacb->qta_len=0; /* Start of buffer */ bufptr=qtacb->qta_buff; } } /* ** At end flush any remaining buffer */ if(ops& SC0A_QT_END) { if(qtacb->qta_len) { /* ** Need to flush data */ status=write_audit(scb, SXF_E_QRYTEXT, SXF_A_QRYTEXT|SXF_A_SUCCESS, "QUERYTEXT", sizeof("QUERYTEXT")-1, scb->scb_sscb.sscb_ics.ics_eusername, I_SX2729_QUERY_TEXT, FALSE, qtacb->qta_buff, qtacb->qta_len, qtacb->qta_seq, 0, &err_code ); if(status!=E_DB_OK) { _VOID_ uleFormat(&err_code, 0, &syserr, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); _VOID_ uleFormat(NULL, E_SC023F_QRYTEXT_WRITE, NULL, ULE_LOG, NULL, (char *)0, 0L, (i4 *)0, &local_error, 0); } } /* ** And reset */ qtacb->qta_seq=1; qtacb->qta_len=0; } return status; }
/*{ ** Name: psy_dgroup - Removes members from a group ** ** INTERNAL PSF call format: status = psy_dgroup(&psy_cb, sess_cb); ** ** Description: ** This procedure removes memvers from an existing group identifier. ** All specified members are removed from iiusergroup. ** If a member does not exist, a warning is issued, but the ** statement is not aborted. ** If the user queue is null, all members are removed from ** the group. ** This procedure is called for SQL language only. ** ** Inputs: ** psy_cb ** .psy_tblq head of group queue ** .psy_usrq head of member queue ** sess_cb Pointer to session control block ** (Can be NULL) ** ** Outputs: ** psy_cb ** .psy_error Filled in if error happens ** Returns: ** E_DB_OK Function completed normally. ** E_DB_INFO One or more members were rejected. ** E_DB_WARN One or more groups were rejected. ** E_DB_ERROR Function failed; non-catastrophic error. ** Exceptions: ** none ** ** Side Effects: ** Removed tuples from iiusergroup. ** ** History: ** 13-mar-89 (ralph) ** written ** 20-may-89 (ralph) ** Allow multiple groups to be specified. ** 12-mar-90 (andre) ** set rdr_2types_mask to 0. ** 22-may-90 (teg) ** init rdr_instr to RDF_NO_INSTR */ DB_STATUS psy_dgroup( PSY_CB *psy_cb, PSS_SESBLK *sess_cb) { DB_STATUS status, stat; RDF_CB rdf_cb; register RDR_RB *rdf_rb = &rdf_cb.rdf_rb; DB_USERGROUP ugtuple; register DB_USERGROUP *ugtup = &ugtuple; PSY_TBL *psy_tbl; PSY_USR *psy_usr; /* This code is called for SQL only */ /* ** Fill in the part of RDF request block that will be constant. */ pst_rdfcb_init(&rdf_cb, sess_cb); rdf_rb->rdr_update_op = RDR_DELETE; rdf_rb->rdr_status = DB_SQL; rdf_rb->rdr_types_mask = RDR_GROUP; rdf_rb->rdr_qrytuple = (PTR) ugtup; rdf_rb->rdr_qtuple_count = 1; MEfill(sizeof(ugtup->dbug_reserve), (u_char)' ', (PTR)ugtup->dbug_reserve); status = E_DB_OK; for (psy_tbl = (PSY_TBL *) psy_cb->psy_tblq.q_next; psy_tbl != (PSY_TBL *) &psy_cb->psy_tblq; psy_tbl = (PSY_TBL *) psy_tbl->queue.q_next ) { /* STRUCT_ASSIGN_MACRO(psy_tbl->psy_tabnm, ugtup->dbug_group); */ MEcopy((PTR)&psy_tbl->psy_tabnm, sizeof(ugtup->dbug_group), (PTR)&ugtup->dbug_group); stat = E_DB_OK; if ((PSY_USR *) psy_cb->psy_usrq.q_next /* DROP ALL specified */ == (PSY_USR *) &psy_cb->psy_usrq) { MEfill(sizeof(DB_OWN_NAME), (u_char)' ', (PTR)&ugtup->dbug_member); rdf_rb->rdr_update_op = RDR_PURGE; stat = rdf_call(RDF_UPDATE, (PTR) &rdf_cb); status = (stat > status) ? stat : status; } else for (psy_usr = (PSY_USR *) psy_cb->psy_usrq.q_next; psy_usr != (PSY_USR *) &psy_cb->psy_usrq; psy_usr = (PSY_USR *) psy_usr->queue.q_next ) { STRUCT_ASSIGN_MACRO(psy_usr->psy_usrnm, ugtup->dbug_member); stat = rdf_call(RDF_UPDATE, (PTR) &rdf_cb); status = (stat > status) ? stat : status; if (stat > E_DB_INFO) break; } if (DB_FAILURE_MACRO(stat)) break; } if (DB_FAILURE_MACRO(status)) (VOID) psf_rdf_error(RDF_UPDATE, &rdf_cb.rdf_error, &psy_cb->psy_error); return (status); }
/*{ ** Name: dmve_del_location - The recovery of an delete location operation. ** ** Description: ** This function performs the recovery of the delete location ** update operation. This is to update the config file ** with any new locations deleted by unextenddb. ** In the case of UNDO, reads the config file, if update has been ** made, then it adds it otherwise it just closes and continues. ** In case of DO, reads the config file, deletes and then closes. ** ** Inputs: ** dmve_cb ** .dmve_log_rec The rename file log record. ** .dmve_action Should be DMVE_UNDO, DMVE_REDO or DMVE_DO. ** .dmve_dcb_ptr Pointer to DCB. ** ** Outputs: ** dmve_cb ** .dmve_error.err_code The reason for error status. ** Returns: ** E_DB_OK ** E_DB_ERROR ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 29-apr-2004 (gorvi01) ** Created for UNEXTENDDB. ** 09-Nov-2004 (jenjo02) ** Relocated misplaced logging of CLR from DMVE_DO ** to DMVE_UNDO */ DB_STATUS dmve_del_location( DMVE_CB *dmve_cb) { DMVE_CB *dmve = dmve_cb; DB_STATUS status = E_DB_OK; DB_STATUS local_status = E_DB_OK; i4 error = E_DB_OK, local_error = E_DB_OK; DM0L_DEL_LOCATION *log_rec = (DM0L_DEL_LOCATION *)dmve_cb->dmve_log_rec; LG_LSN *log_lsn = &log_rec->loc_header.lsn; DMP_DCB *dcb; DM0C_CNF *config = 0; DM0C_CNF *cnf = 0; i4 lock_list; DMP_LOC_ENTRY *l; i4 loc_count; i4 i; i4 recovery_action; i4 dm0l_flags; DB_ERROR local_dberr; CLRDBERR(&dmve->dmve_error); for (;;) { if (log_rec->loc_header.length != sizeof(DM0L_DEL_LOCATION) || log_rec->loc_header.type != DM0LDELLOCATION) { SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER); break; } dcb = dmve->dmve_dcb_ptr; lock_list = dmve->dmve_lk_id; recovery_action = dmve->dmve_action; if (log_rec->loc_header.flags & DM0L_CLR) recovery_action = DMVE_UNDO; switch (recovery_action) { case DMVE_REDO: break; case DMVE_DO: /* ** Remove the location entry from the DCB, if it exists. */ if (dcb->dcb_ext && dcb->dcb_ext->ext_count) loc_count = dcb->dcb_ext->ext_count; else loc_count = 0; for (i = 0; i < loc_count; i++) { l = &dcb->dcb_ext->ext_entry[i]; if (MEcmp((char *)&l->logical, (char *)&log_rec->loc_name, sizeof(DB_LOC_NAME)) == 0) break; } if (i >= loc_count) { /* No entry found, nothing to remove. */ ; #ifdef xDEBUG TRdisplay( "dmve_del_location: UNDO location '%s' not found in DCB.\n", (char *)&log_rec->loc_name); #endif } else if (i == (loc_count - 1)) { /* This is last entry, easy. */ dcb->dcb_ext->ext_entry[i].phys_length = 0; dcb->dcb_ext->ext_count--; } else { /* In middle of list, compress. */ loc_count--; MEcopy((char *)&dcb->dcb_ext->ext_entry[i+1].logical, sizeof(DMP_LOC_ENTRY) * (loc_count-i), (char *)&dcb->dcb_ext->ext_entry[i].logical); /* Mark the end of list. */ dcb->dcb_ext->ext_entry[loc_count].phys_length = 0; dcb->dcb_ext->ext_count--; } /* ** Open the configuration file. */ status = dm0c_open(dcb, DM0C_NOLOCK, lock_list, &cnf, &dmve->dmve_error); if (status != E_DB_OK) break; config = cnf; /* ** Delete this entry from the list. */ loc_count = cnf->cnf_dsc->dsc_ext_count; for (i = 0; i < loc_count; i++) { l = &cnf->cnf_ext[i].ext_location; if (MEcmp((char *)&l->logical, (char *)&log_rec->loc_name, sizeof(DB_LOC_NAME)) == 0) break; } if (i >= loc_count) { /* No entry found, nothing to undo. */ break; } if (i == (loc_count - 1)) { /* This is last entry, easy. */ cnf->cnf_ext[i].length = 0; cnf->cnf_ext[i].type = 0; cnf->cnf_dsc->dsc_ext_count--; } else { /* In middle of list, compress. */ loc_count--; MEcopy((char *)&cnf->cnf_ext[i+1].ext_location.logical, sizeof(DMP_LOC_ENTRY)*(loc_count-i), (char *)&cnf->cnf_ext[i].ext_location.logical); /* Mark the end of list. */ cnf->cnf_ext[loc_count].length = 0; cnf->cnf_ext[loc_count].type = 0; cnf->cnf_dsc->dsc_ext_count--; } /* Close the configuration file. */ status = dm0c_close(cnf, DM0C_UPDATE | DM0C_COPY, &dmve->dmve_error); if (status != E_DB_OK) break; config = 0; break; case DMVE_UNDO: /* ** Write CLR if necessary */ if ((dmve->dmve_logging) && ((log_rec->loc_header.flags & DM0L_CLR) == 0)) { dm0l_flags = log_rec->loc_header.flags | DM0L_CLR; status = dm0l_del_location(dmve->dmve_log_id, dm0l_flags, log_rec->loc_type, &log_rec->loc_name, log_rec->loc_l_extent, &log_rec->loc_extent, log_lsn, &dmve->dmve_error); if (status != E_DB_OK) { /* XXXX Better error message and continue after logging. */ TRdisplay( "dmve_del_location: dm0l_del_location error, status: %d, error: %d\n", status, dmve->dmve_error.err_code); /* * Bug56702: return logfull indication. */ dmve->dmve_logfull = dmve->dmve_error.err_code; break; } } /* Open the configuration file. */ l = dcb->dcb_ext->ext_entry; loc_count = dcb->dcb_ext->ext_count; for (i = 0; i < loc_count; i++, l++) if ((MEcmp((char *)&l->logical, (char *)&log_rec->loc_name, sizeof(DB_LOC_NAME)) == 0) && (l->flags == log_rec->loc_type)) break; if (i < loc_count) { /* Found this entry, return error. */ SETDBERR(&dmve->dmve_error, 0, E_DM007E_LOCATION_EXISTS); break; } status = dm0c_open(dcb, 0, lock_list, &cnf, &dmve->dmve_error); if (status != E_DB_OK) break; config = cnf; /* Check if there is room. */ if (cnf->cnf_free_bytes < sizeof(DM0C_EXT)) { status = dm0c_extend(cnf, &dmve->dmve_error); if (status != E_DB_OK) { SETDBERR(&dmve->dmve_error, 0, E_DM0071_LOCATIONS_TOO_MANY); break; } } i = cnf->cnf_dsc->dsc_ext_count++; cnf->cnf_ext[i].length = sizeof(DM0C_EXT); cnf->cnf_ext[i].type = DM0C_T_EXT; MEcopy((char *)&log_rec->loc_name, sizeof(DB_LOC_NAME), (char *)&cnf->cnf_ext[i].ext_location.logical); MEcopy((char *)&log_rec->loc_extent, sizeof(DM_FILENAME), (char *)&cnf->cnf_ext[i].ext_location.physical); cnf->cnf_ext[i].ext_location.flags = log_rec->loc_type; cnf->cnf_ext[i].ext_location.phys_length = log_rec->loc_l_extent; cnf->cnf_ext[i+1].length = 0; cnf->cnf_ext[i+1].type = 0; /* Add new location info to DCB so RFP will be able to use it. */ dcb->dcb_ext->ext_count = cnf->cnf_dsc->dsc_ext_count; STRUCT_ASSIGN_MACRO(cnf->cnf_ext[i].ext_location, dcb->dcb_ext->ext_entry[i]); /* Close the configuration file. */ status = dm0c_close(cnf, DM0C_UPDATE, &dmve->dmve_error); if (status != E_DB_OK) break; config = 0; break; } /* end switch. */ if (config != 0) { (void) dm0c_close(cnf, 0, &local_dberr); } if (status != E_DB_OK) { uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); break; } return(E_DB_OK); } /* end for. */ if (dmve->dmve_error.err_code > E_DM_INTERNAL) { uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); SETDBERR(&dmve->dmve_error, 0, E_DM9617_DMVE_LOCATION); } return(status); }
/*{ ** Name: ops_deallocate - deallocate resources for an optimization ** ** Description: ** This routine will deallocate the resources used for an optimization. ** Resources include any memory requested from the optimizer memory pool ** and any RDF cache objects which were locked in the global range ** table ** ** Inputs: ** global ptr to global state variable ** report TRUE if errors should be reported ** via the user's control block. ** partial_dbp partial deallocation required for ** statement within a procedure ** ** Outputs: ** Returns: ** VOID ** Exceptions: ** none ** ** Side Effects: ** memory resources released, RDF unfixed, ** QSF query tree memory released ** ** History: ** 29-jun-86 (seputis) ** initial creation ** 8-nov-88 (seputis) ** if no query run trace point is set then destroy the QP since ** SCF assumes optimizer cleans up after error ** 8-nov-88 (seputis) ** turn off CPU accounting if was off originally ** 28-jan-91 (seputis) ** added support for OPF ACTIVE flag ** 20-jul-93 (ed) ** changed name ops_lock for solaris, due to OS conflict ** 29-jul-93 (andre) ** rdr_types_mask must be initialized (to RDR_RELATION) before ** calling RDF_UNFIX. Otherwise RDF may end up complaining because we ** ask it to destroy a relation cache entry while RDR_PROCEDURE bit is ** set. ** 12-aug-93 (swm) ** Cast first parameter of CSaltr_session() to CS_SID to match ** revised CL interface specification. ** 02-Jun-1997 (shero03) ** Update the saved rdf_info_block after calling RDF. ** 02-Aug-2001 (hanal04) Bug 105360 INGSRV 1505 ** Plug the RDF memory leak introduced by inkdo01's new ** function oph_temphist(). ** 17-Dec-2003 (jenjo02) ** Added (CS_SID)NULL to CScnd_signal prototype. ** 6-Feb-2006 (kschendel) ** Fix some squirrely looking code that purported to avoid dangling ** references, but didn't really. (No symptoms known.) ** 14-nov-2007 (dougi) ** Add support for cached dynamic query plans. ** 20-may-2008 (dougi) ** Add support for table procedures. ** 29-may-2009 (wanfr01) Bug 122125 ** Need to add dbid to cache_dynamic queries for db uniqueness */ VOID ops_deallocate( OPS_STATE *global, bool report, bool partial_dbp) { DB_STATUS finalstatus; /* this status is returned to the user ** - it will contain the first error ** during resource deallocation */ DB_ERROR error; /* error code from offending facility */ finalstatus = E_DB_OK; error.err_code = 0; { /* close any fixed RDF objects - deallocate prior to closing the ** global memory stream */ OPV_IGVARS gvar; /* index into global range variable ** table */ OPV_GRT *gbase; /* ptr to base of array of ptrs ** to global range table elements */ OPV_IGVARS maxgvar; /* number of global range table ** elements allocated */ RDF_CB *rdfcb; /* ptr to rdf control block used ** unfix the relation info */ OPV_GBMVARS *rdfmap; /* ptr to map of global range ** variables which have RDF info ** fixed */ gbase = global->ops_rangetab.opv_base; maxgvar = global->ops_rangetab.opv_gv; rdfcb = &global->ops_rangetab.opv_rdfcb; rdfmap = &global->ops_rangetab.opv_mrdf; /* ** rdr_types_mask needs to be initialized - since we will be unfixing ** relation entries, RDR_RELATION seems like a good choice, although 0 ** would suffice as well */ rdfcb->rdf_rb.rdr_types_mask = RDR_RELATION; if (global->ops_cstate.opc_relation) { /* OPC allocates a RDF descriptor for cursors so deallocate ** if this is the case */ DB_STATUS opcrdfstatus; /* RDF return status */ rdfcb->rdf_info_blk = global->ops_cstate.opc_relation; opcrdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb ); if ( (DB_FAILURE_MACRO(opcrdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = opcrdfstatus; error.err_code = rdfcb->rdf_error.err_code; } global->ops_cstate.opc_relation = NULL; } if (maxgvar) { for ( gvar = -1; (gvar = BTnext((i4)gvar, (char *)rdfmap, (i4)maxgvar)) >=0;) { OPV_GRV *gvarp; /* ptr to global range variable to ** be deallocated */ if ((gvarp = gbase->opv_grv[gvar]) /* NULL if not allocated */ && (gvarp->opv_relation) /* not NULL if RDF has been ** called for this range variable */ && !(gvarp->opv_gmask & OPV_TPROC) /* not table procedure */ ) { /* if this element has been allocated and if it has an RDF ** cache element associated with it */ DB_STATUS rdfstatus; /* RDF return status */ gbase->opv_grv[gvar] = NULL; /* so we do not try to deallocate ** twice in case of an error */ rdfcb->rdf_info_blk = gvarp->opv_relation; rdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb ); if ( (DB_FAILURE_MACRO(rdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = rdfstatus; error.err_code = rdfcb->rdf_error.err_code; } gvarp->opv_relation = NULL; } if ((gvarp) && (gvarp->opv_ttmodel)) { /* if this element has been allocated and if it has an RDF ** cache element associated with a persistent table ** which provides histogram models. */ DB_STATUS rdfstatus; /* RDF return status */ rdfcb->rdf_info_blk = gvarp->opv_ttmodel; gvarp->opv_ttmodel = NULL; rdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb ); if ( (DB_FAILURE_MACRO(rdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = rdfstatus; error.err_code = rdfcb->rdf_error.err_code; } } } global->ops_rangetab.opv_gv = 0; } } if (partial_dbp) return; /* only deallocate the global range table ** for DBP, and keep the memory streams ** until the end */ if (global->ops_estate.opn_statistics && global->ops_estate.opn_reset_statistics) { /* statistics CPU accounting was turned on, and needs to be reset */ STATUS cs_status; i4 turn_off; turn_off = FALSE; /* turn off accounting */ global->ops_estate.opn_statistics = FALSE; cs_status = CSaltr_session((CS_SID)0, CS_AS_CPUSTATS, (PTR)&turn_off); if (cs_status != OK) { finalstatus = E_DB_ERROR; error.err_code = cs_status; } } /* deallocate ULM memory stream */ if (global->ops_mstate.ops_streamid == NULL) /* non-zero if allocated */ { /* check if ULM stream does not exist then this deallocation has ** already occurred so just return */ return; } else { DB_STATUS ulm1status;/* ULM return status */ global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_streamid; /* ulm will NULL ops_streamid */ ulm1status = ulm_closestream( &global->ops_mstate.ops_ulmrcb ); if ( (DB_FAILURE_MACRO(ulm1status)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = ulm1status; error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code; } } /* deallocate ULM temp buffer memory stream */ if ( global->ops_mstate.ops_tstreamid ) /* non-zero if allocated */ { DB_STATUS ulm2status; /* ULM return status */ global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_tstreamid; /* ulm will NULL ops_tstreamid */ ulm2status = ulm_closestream( &global->ops_mstate.ops_ulmrcb ); if ( (DB_FAILURE_MACRO(ulm2status)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = ulm2status; error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code; } } /* deallocate OPC ULM buffer memory stream */ if ( global->ops_mstate.ops_sstreamid ) /* non-zero if allocated */ { DB_STATUS ulm3status; /* ULM return status */ global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid; /* ulm will NULL ops_sstreamid */ ulm3status = ulm_closestream( &global->ops_mstate.ops_ulmrcb ); if ( (DB_FAILURE_MACRO(ulm3status)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = ulm3status; error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code; } } if (!report #ifdef OPT_F032_NOEXECUTE || /* if trace flag is set then cleanup QSF memory since optimizer will ** generate an error to SCF and SCF assumes optimizer will cleanup */ (global->ops_cb->ops_check && (opt_strace( global->ops_cb, OPT_F032_NOEXECUTE) || opt_strace( global->ops_cb, OPT_F023_NOCOMP) ) ) #endif ) { /* an error or an asychronous abort has occurred so destroy the plan ** or shared plan , FIXME destroy the shared plan in the earlier ** exception handler */ DB_STATUS qsfqpstatus; /* QSF return status */ if(global->ops_qpinit) { /* deallocate QSF object for query plan if another error has occurred ** - in this case OPC has already created a new QP handle and has ** gotten a lock on it */ STRUCT_ASSIGN_MACRO(global->ops_caller_cb->opf_qep, global->ops_qsfcb.qsf_obj_id); /* get ** query plan id */ global->ops_qsfcb.qsf_lk_id = global->ops_qplk_id; /* get lock id for ** QSF */ qsfqpstatus = ops_qsfdestroy(global); /* destroy the query plan */ if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = qsfqpstatus; error.err_code = global->ops_qsfcb.qsf_error.err_code; } } else { /* OPC has not been reached so need to check for shared query plan */ if (!global->ops_procedure) { /* get query tree if it has not already been retrieved */ qsfqpstatus = ops_gqtree(global); if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = qsfqpstatus; error.err_code = global->ops_qsfcb.qsf_error.err_code; } } if (global->ops_qheader && (global->ops_qheader->pst_mask1 & PST_RPTQRY)) { /* shared query plan possible */ if (global->ops_procedure->pst_flags & PST_REPEAT_DYNAMIC) { char *p; global->ops_qsfcb.qsf_obj_id.qso_lname = sizeof(DB_CURSOR_ID) + sizeof(i4); MEfill(sizeof(global->ops_qsfcb.qsf_obj_id.qso_name), 0, global->ops_qsfcb.qsf_obj_id.qso_name); MEcopy((PTR)&global->ops_procedure-> pst_dbpid.db_cursor_id[0], sizeof (global->ops_procedure-> pst_dbpid.db_cursor_id[0]), (PTR)global->ops_qsfcb.qsf_obj_id.qso_name); p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + 2*sizeof(i4); MEcopy((PTR)"qp", sizeof("qp"), p); p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + sizeof(DB_CURSOR_ID); I4ASSIGN_MACRO(global->ops_caller_cb->opf_udbid, *(i4 *) p); } else /* must be proc or regular repeat query */ { global->ops_qsfcb.qsf_obj_id.qso_lname = sizeof (global->ops_procedure->pst_dbpid); MEcopy((PTR)&global->ops_procedure->pst_dbpid, sizeof (global->ops_procedure->pst_dbpid), (PTR)&global->ops_qsfcb.qsf_obj_id.qso_name[0]); } global->ops_qsfcb.qsf_obj_id.qso_type = QSO_QP_OBJ; global->ops_qsfcb.qsf_lk_state = QSO_SHLOCK; qsfqpstatus = qsf_call(QSO_GETHANDLE, &global->ops_qsfcb); if (DB_SUCCESS_MACRO(qsfqpstatus)) { qsfqpstatus = ops_qsfdestroy( global ); if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = qsfqpstatus; error.err_code = global->ops_qsfcb.qsf_error.err_code; } } else if (global->ops_qsfcb.qsf_error.err_code != E_QS0019_UNKNOWN_OBJ) { /* if object is not found then this is not a shared query */ finalstatus = qsfqpstatus; error.err_code = global->ops_qsfcb.qsf_error.err_code; } } } } /* release QSF memory allocated to query tree, make sure that this ** is done after the QP has been processed since pst_rptqry is still ** needed for above block */ { DB_STATUS qsfstatus; /* QSF return status */ STRUCT_ASSIGN_MACRO(global->ops_caller_cb->opf_query_tree, global->ops_qsfcb.qsf_obj_id); /* get ** query tree id */ global->ops_qsfcb.qsf_lk_id = global->ops_lk_id; /* get lock id for ** QSF */ qsfstatus = ops_qsfdestroy( global ); if ( (DB_FAILURE_MACRO(qsfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) ) { finalstatus = qsfstatus; error.err_code = global->ops_qsfcb.qsf_error.err_code; } } /* signal that the session is exiting OPF and that another thread may enter */ if (global->ops_cb->ops_smask & OPS_MCONDITION) { DB_STATUS lockstatus; OPG_CB *servercb; servercb = global->ops_cb->ops_server; global->ops_cb->ops_smask &= (~OPS_MCONDITION); lockstatus = ops_exlock(global->ops_caller_cb, &servercb->opg_semaphore); /* check if server ** thread is available, obtain ** semaphore lock on critical variable */ servercb->opg_activeuser--; /* since exit is about to occur, and memory ** has already been deallocated, allow another ** user to enter OPF */ servercb->opg_waitinguser--; /* since exit is about to occur, and memory ** has already been deallocated, allow another ** user to enter OPF */ if (DB_FAILURE_MACRO(lockstatus) && (DB_SUCCESS_MACRO(finalstatus))) { finalstatus = lockstatus; error.err_code = global->ops_caller_cb->opf_errorblock.err_data; } else { if (servercb->opg_waitinguser > servercb->opg_activeuser) { STATUS csstatus; csstatus = CScnd_signal(&servercb->opg_condition, (CS_SID)NULL); /* signal only if some users are waiting */ if ((csstatus != OK) && (DB_SUCCESS_MACRO(finalstatus))) { finalstatus = E_DB_ERROR; error.err_code = csstatus; } } lockstatus = ops_unlock(global->ops_caller_cb, &servercb->opg_semaphore); /* check if server ** thread is available */ if (DB_FAILURE_MACRO(lockstatus) && (DB_SUCCESS_MACRO(finalstatus))) { finalstatus = lockstatus; error.err_code = global->ops_caller_cb->opf_errorblock.err_data; } } } if (DB_FAILURE_MACRO(finalstatus)) { if (report) opx_verror( finalstatus, E_OP0084_DEALLOCATION, error.err_code); /* report ** error and generate an exception */ else opx_rverror(global->ops_cb->ops_callercb, finalstatus, E_OP0084_DEALLOCATION, error.err_code); /* report error only but do not generate an ** exception */ } }
/* ** NOTE: in SQL grammar target_list of a subselect is processed BEFORE the ** from_list; consequently, data types of target list elements are not ** known when we build RESDOM nodes for the target list elements of form ** [<corr_name>.]<col_name>. In psl_p_tlist(), we revisit the prototype ** tree and fill in the newly available information (type, length, ** precision, etc.) ** ** When making changes to pst_adresdom(), please take time to understand ** the effect these changes may have on the processing of prototype trees. */ DB_STATUS pst_adresdom( char *attname, PST_QNODE *left, PST_QNODE *right, PSS_SESBLK *cb, PSQ_CB *psq_cb, PST_QNODE **newnode) { DB_STATUS status; DMT_ATT_ENTRY *coldesc; DMT_ATT_ENTRY column; PSS_RNGTAB *resrange; char colname[sizeof(DB_ATT_NAME) + 1]; /* null term. */ PST_RSDM_NODE resdom; i4 err_code; PSC_RESCOL *rescol; ADF_CB *adf_scb; i2 null_adjust = 0; i4 temp_collID; /* Convert column name to a null-terminated string. */ (VOID) MEcopy((PTR) attname, sizeof(DB_ATT_NAME), (PTR) colname); colname[sizeof(DB_ATT_NAME)] = '\0'; (VOID) STtrmwhite(colname); /* For these operations, the result domain comes from the result table */ if (psq_cb->psq_mode == PSQ_APPEND || psq_cb->psq_mode == PSQ_PROT) { /* Get the result range variable */ if (psq_cb->psq_qlang == DB_SQL) { resrange = &cb->pss_auxrng.pss_rsrng; } else { resrange = &cb->pss_usrrange.pss_rsrng; } /* "tid" result column not allowed with these operations */ if (!STcasecmp(((*cb->pss_dbxlate & CUI_ID_REG_U) ? "TID" : "tid"), colname )) { psf_error(2100L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } /* Get the column description */ coldesc = pst_coldesc(resrange, (DB_ATT_NAME *) attname); if (coldesc == (DMT_ATT_ENTRY *) NULL) { psf_error(2100L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } if (coldesc->att_flags & DMU_F_SYS_MAINTAINED) { psf_error(E_US1900_6400_UPD_LOGKEY, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } } else if (psq_cb->psq_mode == PSQ_REPLACE) { /* ** For the "replace" command, use the result range variable that's ** in the normal user range table, not the special slot that's ** reserved for the result table in the append command. */ /* Get the result range variable */ resrange = cb->pss_resrng; /* "tid" result column not allowed with these operations */ if (!STcasecmp(((*cb->pss_dbxlate & CUI_ID_REG_U) ? "TID" : "tid"), colname)) { psf_error(2100L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } /* Get the column description */ coldesc = pst_coldesc(resrange, (DB_ATT_NAME *) attname); if (coldesc == (DMT_ATT_ENTRY *) NULL) { psf_error(2100L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } if (coldesc->att_flags & DMU_F_SYS_MAINTAINED) { psf_error(E_US1900_6400_UPD_LOGKEY, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 4, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(sizeof(DB_TAB_NAME), (char *) &resrange->pss_tabname), &resrange->pss_tabname, psf_trmwhite(sizeof(DB_OWN_NAME), (char *) &resrange->pss_ownname), &resrange->pss_ownname, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } } else if (psq_cb->psq_mode == PSQ_REPCURS) { /* ** For the "replace cursor" command, the info comes from the cursor ** control block. Cursor column list and update map should always ** specify same column set, so the second if statemnt (BTtest) could, ** perhaps, be removed. */ rescol = psq_ccol(cb->pss_crsr, (DB_ATT_NAME *) attname); if (rescol == (PSC_RESCOL *) NULL) { psf_error(2207L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 3, sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(DB_CURSOR_MAXNAME, cb->pss_crsr->psc_blkid.db_cur_name), cb->pss_crsr->psc_blkid.db_cur_name, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } /* Make sure the column was declared "for update" */ if (!BTtest((i4) rescol->psc_attid.db_att_id, (char *) &cb->pss_crsr->psc_updmap)) { psf_error(2207L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 3, sizeof(cb->pss_lineno), &cb->pss_lineno, psf_trmwhite(DB_CURSOR_MAXNAME, cb->pss_crsr->psc_blkid.db_cur_name), cb->pss_crsr->psc_blkid.db_cur_name, psf_trmwhite(sizeof(DB_ATT_NAME), attname), attname); return (E_DB_ERROR); } /* Set up column descriptor */ coldesc = &column; MEcopy((char *) attname, sizeof(DB_ATT_NAME), (char *) &coldesc->att_name); #ifdef NO /* ** Count columns. Give error if too many. One extra for tid. */ cb->pss_rsdmno++; if (cb->pss_rsdmno > (DB_MAX_COLS + 1)) { psf_error(2130L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 1, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno); return (E_DB_ERROR); } coldesc->att_number = cb->pss_rsdmno; #endif coldesc->att_number = rescol->psc_attid.db_att_id; coldesc->att_type = rescol->psc_type; coldesc->att_width = rescol->psc_len; coldesc->att_prec = rescol->psc_prec; coldesc->att_collID = -1; coldesc->att_geomtype = -1; coldesc->att_srid = -1; coldesc->att_encflags = 0; coldesc->att_encwid = 0; } else { /* ** In all other cases, just take the datatype info ** from the right child. */ coldesc = &column; MEcopy((char *) attname, sizeof(DB_ATT_NAME), (char *) &coldesc->att_name); /* ** Count columns. Give error if too many. One extra for tid. */ cb->pss_rsdmno++; if (cb->pss_rsdmno > (DB_MAX_COLS + 1)) { psf_error(2130L, 0L, PSF_USERERR, &err_code, &psq_cb->psq_error, 1, (i4) sizeof(cb->pss_lineno), &cb->pss_lineno); return (E_DB_ERROR); } coldesc->att_number = cb->pss_rsdmno; status = pst_rsdm_dt_resolve(right, coldesc, cb, psq_cb); if (DB_FAILURE_MACRO(status)) return(status); } /* Copy attribute information into PST_RSDM_NODE */ resdom.pst_rsno = coldesc->att_number; /* The two fields below are initialized for a common case. ** They are context sensitive and in many cases may have to be ** modified by the caller of this routine. */ resdom.pst_ntargno = resdom.pst_rsno; resdom.pst_ttargtype = PST_USER; resdom.pst_dmuflags = 0; /* Don't bother with the conversion id for now */ /* Not for update until we know otherwise */ resdom.pst_rsupdt = FALSE; resdom.pst_rsflags = PST_RS_PRINT; MEcopy((char *) &coldesc->att_name, sizeof(DB_ATT_NAME), (char *) resdom.pst_rsname); temp_collID = coldesc->att_collID; /* If client can not handle i8 INTs downgrade to i4 */ adf_scb = (ADF_CB *) cb->pss_adfcb; if ( !(adf_scb->adf_proto_level & AD_I8_PROTO) && (abs(coldesc->att_type) == DB_INT_TYPE) ) { if(coldesc->att_type < 0) { null_adjust = 1; } if((coldesc->att_width - null_adjust) == sizeof(i8)) { coldesc->att_width -= sizeof(i4); } } /* Now allocate the node */ status = pst_node(cb, &cb->pss_ostream, left, right, PST_RESDOM, (char *) &resdom, sizeof(PST_RSDM_NODE), (DB_DT_ID) coldesc->att_type, (i2) coldesc->att_prec, (i4) coldesc->att_width, (DB_ANYTYPE *) NULL, newnode, &psq_cb->psq_error, (i4) 0); if (status != E_DB_OK) { return (status); } (*newnode)->pst_sym.pst_dataval.db_collID = temp_collID; /* Remember the last result domain produced */ cb->pss_tlist = *newnode; return (E_DB_OK); }
DB_STATUS qeu_d6_cre_view( QEF_CB *qef_cb, QEUQ_CB *i_quq_p) { DB_STATUS status, sav_status = E_DB_OK; DB_ERROR sav_error; QEF_RCB qer, *qer_p = & qer; QES_DDB_SES *dds_p = & qef_cb->qef_c2_ddb_ses; QED_DDL_INFO *ddl_p = & qer_p->qef_r3_ddb_req.qer_d7_ddl_info; DD_LDB_DESC *cdb_p = & dds_p->qes_d4_ddb_p-> dd_d3_cdb_info.dd_i1_ldb_desc; QEC_L16_TABLES tables; /* used for IITABLES-style info */ QEC_D6_OBJECTS objects; /* used for object info */ QEC_LINK link, /* used as global control block */ *lnk_p = & link; QEQ_1CAN_QRY ins, sel, upd; /* used for ordering canned query */ RQB_BIND rq_bind[QEC_CAT_COL_COUNT_MAX + 1]; QEP_PTR_UNION ptr_u; DMU_CB *dmu_p; SYSTIME now; bool xact_b = FALSE; /* assume no begin transaction */ i4 xact_mode; ptr_u.qep_ptr_u.qep_1_ptr = i_quq_p->qeuq_dmf_cb; dmu_p = ptr_u.qep_ptr_u.qep_3_dmu_cb_p; /* 1. check input information */ if (i_quq_p->qeuq_type != QEUQCB_CB || i_quq_p->qeuq_length != sizeof(*i_quq_p)) { i_quq_p->error.err_code = E_QE0017_BAD_CB; return(E_DB_ERROR); } if ((i_quq_p->qeuq_cq == 0 || i_quq_p->qeuq_qry_tup == 0) || (i_quq_p->qeuq_ct == 0 || i_quq_p->qeuq_tre_tup == 0) || (i_quq_p->qeuq_db_id == 0) || (i_quq_p->qeuq_d_id == 0) || (i_quq_p->qeuq_dmf_cb == 0)) { i_quq_p->error.err_code = E_QE0018_BAD_PARAM_IN_CB; return(E_DB_ERROR); } /* 2. set up control information */ MEfill(sizeof(qer), '\0', (PTR) & qer); qer_p->qef_cb = qef_cb; qer_p->error.err_code = 0; qer_p->qef_r3_ddb_req.qer_d13_ctl_info = QEF_00DD_NIL_INFO; qef_cb->qef_rcb = qer_p; MEcopy(dmu_p->dmu_table_name.db_tab_name, sizeof(ddl_p->qed_d1_obj_name), ddl_p->qed_d1_obj_name); MEcopy(dmu_p->dmu_owner.db_own_name, sizeof(ddl_p->qed_d2_obj_owner), ddl_p->qed_d2_obj_owner); ddl_p->qed_d3_col_count = i_quq_p->qeuq_ano; ddl_p->qed_d4_ddb_cols_pp = NULL; ddl_p->qed_d5_qry_info_p = NULL; ddl_p->qed_d6_tab_info_p = NULL; ddl_p->qed_d7_obj_id.db_tab_base = 0; ddl_p->qed_d7_obj_id.db_tab_index = 0; ddl_p->qed_d9_reg_info_p = NULL; sel.qeq_c2_rqf_bind_p = rq_bind; /* must set up */ ins.qeq_c2_rqf_bind_p = (RQB_BIND *) NULL; upd.qeq_c2_rqf_bind_p = (RQB_BIND *) NULL; lnk_p->qec_1_ddl_info_p = ddl_p; lnk_p->qec_2_tableinfo_p = NULL; lnk_p->qec_3_ldb_id = 0; lnk_p->qec_4_col_cnt = 0; MEfill(DB_DB_MAXNAME, ' ', lnk_p->qec_5_ldb_alias); lnk_p->qec_6_select_p = & sel; lnk_p->qec_7_ldbids_p = NULL; lnk_p->qec_8_longnames_p = NULL; lnk_p->qec_9_tables_p = & tables; lnk_p->qec_10_haves = QEC_07_CREATE; lnk_p->qec_11_ldb_obj_cnt = 0; lnk_p->qec_12_indexes_p = NULL; lnk_p->qec_13_objects_p = & objects; lnk_p->qec_15_ndx_cnt = 0; lnk_p->qec_16_ndx_ids_p = NULL; lnk_p->qec_19_ldb_p = NULL; lnk_p->qec_20_rqf_bind_p = rq_bind; lnk_p->qec_21_delete_p = NULL; lnk_p->qec_22_insert_p = & ins; lnk_p->qec_23_update_p = & upd; status = qed_u8_gmt_now(qer_p, lnk_p->qec_24_cur_time); if (status) { STRUCT_ASSIGN_MACRO(qer_p->error, i_quq_p->error); return(status); } lnk_p->qec_24_cur_time[DD_25_DATE_SIZE] = EOS; lnk_p->qec_25_pre_mins_p = NULL; lnk_p->qec_26_aft_mins_p = NULL; TMnow(& now); lnk_p->qec_17_ts1 = now.TM_secs; lnk_p->qec_18_ts2 = now.TM_msecs; /* transaction semantics ** ** 1. pre-processing: ** ** 1.1 if no outstanding transaction, begin a transaction ** and set xact_b to TRUE; ** 1.2 if not in auto-commit mode, escalate transaction to MST; ** ** 2. post-processing: ** ** 2.1 if processing terminates normally, commit transaction ** only if xact_b is TRUE and not in auto-commit mode; ** 2.2 if processing terminates abnormally, abort transaction ** only if xact_b is TRUE; */ /* 2. begin a transaction if necessary */ if (qef_cb->qef_stat == QEF_NOTRAN) { status = qet_begin(qef_cb); if (status) return(status); xact_b = TRUE; } if (qef_cb->qef_auto == QEF_OFF) qef_cb->qef_stat = QEF_MSTRAN; /* escalate to MST */ /* 3. inform TPF of read intention on CDB */ status = qet_t5_register(qer_p, cdb_p, DB_SQL, QEK_2TPF_READ); if (status) { if (! xact_b) { STRUCT_ASSIGN_MACRO(qer_p->error, i_quq_p->error); return(status); /* ok to return */ } /* fall thru to terminate SST */ } if (status == E_DB_OK) { /* 4. set up new object base, query id */ status = qeu_10_get_obj_base(qer_p, lnk_p); if (status) { if (! xact_b) { STRUCT_ASSIGN_MACRO(qer_p->error, i_quq_p->error); return(status); /* ok to return */ } /* fall thru to terminate SST */ } else { objects.d6_4_qry_id.db_qry_high_time = now.TM_secs; objects.d6_4_qry_id.db_qry_low_time = now.TM_msecs; } } if (status == E_DB_OK) { /* 5. inform TPF of update intention on CDB */ /* 5.1 2PC is required if DDL concurrency is off. */ if (dds_p->qes_d9_ctl_info & QES_01CTL_DDL_CONCURRENCY_ON) xact_mode = QEK_4TPF_1PC; else xact_mode = QEK_3TPF_UPDATE; status = qet_t5_register(qer_p, cdb_p, DB_SQL, xact_mode); if (status) { if (! xact_b) { STRUCT_ASSIGN_MACRO(qer_p->error, i_quq_p->error); return(status); /* ok to return */ } /* fall thru to terminate SST */ } } if (status == E_DB_OK) { /* 6. update IIDD_DDB_OBJECT_BASE */ status = qeu_11_object_base(qer_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 7. update IIDD_DDB_OBJECTS */ status = qeu_12_objects(qer_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 8. update IIDD_IITABLES */ status = qeu_13_tables(qer_p, i_quq_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 9. update IIDD_COLUMNS */ status = qeu_14_columns(qer_p, i_quq_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 10. update IIDD_VIEWS */ status = qeu_15_views(qer_p, i_quq_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 11. update IIDD_DDB_TREE */ status = qeu_16_tree(qer_p, i_quq_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } if (status == E_DB_OK) { /* 12. update IIDD_DDB_DDB_DBDEPENDS */ status = qeu_17_dbdepends(qer_p, i_quq_p, lnk_p); if (status) qef_cb->qef_abort = TRUE; } /* 13. end transaction if necessary */ if (status) { /* error condition */ sav_status = status; STRUCT_ASSIGN_MACRO(qer_p->error, sav_error); if (xact_b || qef_cb->qef_abort) /* SST or abort */ status = qet_abort(qef_cb); } else { /* ok condition */ if (xact_b && qef_cb->qef_auto == QEF_ON) status = qet_commit(qef_cb); /* commit SST */ else { /* send message to commit CDB association to avoid deadlocks */ status = qed_u11_ddl_commit(qef_cb->qef_rcb); } } if (sav_status) { /* returned saved error */ STRUCT_ASSIGN_MACRO(sav_error, i_quq_p->error); return(sav_status); } else if (status) { /* return qef_cb error */ STRUCT_ASSIGN_MACRO(qef_cb->qef_rcb->error, i_quq_p->error); return(status); } return(E_DB_OK); }
/*{ ** Name: dmr_get - Get a record. ** ** INTERNAL DMF call format: status = dmr_get(&dmr_cb); ** ** EXTERNAL call format: status = dmf_call(DMR_GET,&dmr_cb); ** ** Description: ** This function gets a record from a table. It can either get a record ** by tuple identifier, re-get the last record returned, or get the ** next record that passes the qualification specified by the dmr_position ** operation. If no morerecords meet the qualification the caller is ** returned a error indicating no next record. ** ** As a special case for aggregate optimization, this function may ** be called to obtain the count of records in the table, which prior ** to this change required getting all records with multiple calls ** to this function. ** ** Note: When a B1 secure server is running, this interface only ** returns records that pass the B1 MAC assurances. Namely, only ** records with a security label that is dominated by the security ** label of the requester are returned. ** ** Inputs: ** dmr_cb ** .type Must be set to DMR_RECORD_CB. ** .length Must be at least ** sizeof(DMR_RECORD_CB) bytes. ** .dmr_flags_mask Must be DMR_NEXT, DMR_PREV, ** DMR_CURRENT_POS or DMR_BY_TID. ** .dmr_access_id Record access identifer returned ** from DMT_OPEN that identifies a ** table. ** .dmr_tid If dmr_flags_mask = DMR_BY_TID, then ** field is used as a tuple identifer. ** .dmr_data.data_address Pointer to area to return the ** requested record. ** .dmr_data.data_in_size Size of area for record. ** ** Outputs: ** dmr_cb ** .dmr_tid The tuple identifier of the record ** being returned. ** .dmr_data.data_address The record is stored here. ** .dmr_data.data_out_size The size of the returned record. ** .error.err_code One of the following error numbers. ** E_DM0000_OK ** E_DM000B_BAD_CB_LENGTH ** E_DM000C_BAD_CB_TYPE ** E_DM000F_BAD_DB_ACCESS_MODE ** E_DM0011_BAD_DB_NAME ** E_DM001A_BAD_FLAG ** E_DM001D_BAD_LOCATION_NAME ** E_DM002B_BAD_RECORD_ID ** E_DM003C_BAD_TID ** E_DM0042_DEADLOCK ** E_DM0044_DELETED_TID ** E_DM0047_UPDATED_TUPLE ** E_DM004A_INTERNAL_ERROR ** E_DM004B_LOCK_QUOTA_EXCEEDED ** E_DM004D_LOCK_TIMER_EXPIRED ** E_DM0055_NONEXT ** E_DM0065_USER_INTR ** E_DM0064_USER_ABORT ** E_DM0073_RECORD_ACCESS_CONFLICT ** E_DM0074_NOT_POSITIONED ** E_DM008A_ERROR_GETTING_RECORD ** E_DM0100_DB_INCONSISTENT ** E_DM010C_TRAN_ABORTED ** E_DM0112_RESOURCE_QUOTA_EXCEEDED ** E_DM006E_NON_BTREE_GETPREV ** ** Returns: ** E_DB_OK Function completed normally. ** E_DB_WARN Function completed normally with a ** termination status which is in ** dmr_cb.err_code. ** E_DB_ERROR Function completed abnormally ** with a ** termination status which is in ** dmr_cb.err_code. ** E_DB_FATAL Function completed with a fatal ** error which must be handled ** immediately. The fatal status is in ** dmr_cb.err_code. ** History: ** 01-sep-85 (jennifer) ** Created new for jupiter. ** 17-dec-1985 (derek) ** Completed code. ** 28-jul-1989 (mikem) ** Added logging of database, table, and owner when we get an internal ** error. ** 15-aug-1989 (rogerk) ** Added support for Non-SQL Gateway. If getting record from a ** gateway secondary index, then make sure that record buffer is ** large enough to hold a record from the base table, as that is ** what the gateway returns. This is somewhat hokey, and would be ** better if the secondary index could actually describe the records ** being returned back, but... ** 15-oct-90 (linda) ** Integrate bug fix for gateway secondary index support: perform ** sanity check on table width *after* switching tcb's. ** 11-feb-1991 (linda) ** Check for dmr_char->char_id == DMR_TIDJOIN, if it does then set ** rcb->rcb_tidjoin = RCB_TIDJOIN. Part of gateway secondary index ** support. ** 22-apr-92 (schang) ** GW merge ** 30-apr-1991 (rickh) ** Removed the 11-feb-1991 tidjoin logic. Let stand the change in ** where table width calculation occurs. ** 22-jul-1991 (rickh) ** And now remove the table width calculation change that went in ** with the 11-feb-1991 tidjoin logic. ** 28-may-1993 (robf) ** Secure 2.0: Reworked old ORANGE code. ** 23-aug-1993 (bryanp) ** Fix a few cut-and-paste errors in some error message parameters. ** 31-jan-1994 (bryanp) B58487 ** Handle failures in both dm2r_get and dm2r_unfix_pages. ** 30-aug-1994 (cohmi01) ** Add DMR_PREV support for FASTPATH rel. Error if not btree. ** 22-may-1995 (cohmi01) ** Add support for count-only, for aggregate optimisation. ** 21-aug-1995 (cohmi01) ** count-only aggregate code moved to dml!dmragg.c ** 22-nov-96 (stial01,dilma04) ** Row Locking Project: ** Unfix all pages before leaving DMF if row locking. ** 14-may-97 (dilma04) ** Cursor Stability Project: ** - upgrade isolation level from CS to RR, if DMR_SORT flag is set; ** - if isolation level is CS or RR, set RCB_CSRR_LOCK locking mode ** for the time of dm2r_get() call. ** 21-may-97 (stial01) ** Row locking: No more LK_PH_PAGE locks, so page(s) can stay fixed. ** 19-dec-97 (inkdo01) ** Changes for sorts which do NOT materialize results in temp tables. ** get is now directed straight to DMF sorter. ** 08-oct-98 (stial01) ** Deallocate load context after all records read from DMF sorter. ** 09-dec-98 (stial01) ** DMR_PKEY_PROJECTION: check for relspec BTREE not table_type which ** may be GATEWAY ** 11-aug-2003 (chash01) ** For RMS Gateway index table, add specific test to make sure ** dmr->dmr_data.data_in_size + sizeof(DM_TID) ** is no more than the value in table_width ** 12-Feb-2004 (schka24) ** Defend against someone doing a get on a partitioned master. ** 03-Nov-2004 (jenjo02) ** Relocated CSswitch from dmf_call to here; don't waste the ** call if Factotum thread. ** 11-Nov-2005 (jenjo02) ** Replaced dmx_show() with the more robust ** dmxCheckForInterrupt() to standardize external ** interrupt handling. ** 11-Sep-2006 (jonj) ** Don't dmxCheckForInterrupt if extended table as txn is ** likely in a recursive call and not at an atomic ** point in execution as required for LOGFULL_COMMIT. ** 13-Feb-2007 (kschendel) ** Replace CSswitch with cancel check. ** 11-Apr-2008 (kschendel) ** Roll arithmetic exceptions into caller specified ADFCB. ** This is part of getting DMF qual context out of QEF. ** 15-Jan-2010 (jonj) ** SIR 121619 MVCC: Don't change isolation level if crow_locking() ** 03-Mar-2010 (jonj) ** SIR 121619 MVCC, blob support: ** Set rcb_dmr_opcode here; dmpe bypasses dmf_call, ** which used to set it. */ DB_STATUS dmr_get( DMR_CB *dmr_cb) { DMR_CB *dmr = dmr_cb; DMP_RCB *rcb; DMP_TCB *tcb; DML_XCB *xcb; i4 flag; i4 table_width; DB_STATUS status, local_status; i4 error, local_error; DB_ERROR local_dberr; CLRDBERR(&dmr->error); for (status = E_DB_ERROR;;) { rcb = (DMP_RCB *)dmr->dmr_access_id; if (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK) { if (rcb == NULL) { uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, sizeof("record")-1, "record"); SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER); break; } rcb->rcb_dmr_opcode = DMR_GET; tcb = rcb->rcb_tcb_ptr; if (tcb == NULL) { uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, sizeof("table")-1, "table"); SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER); break; } if (tcb->tcb_rel.relstat & TCB_IS_PARTITIONED) { uleFormat(&dmr->error, E_DM0022_BAD_MASTER_OP, NULL, ULE_LOG, NULL, NULL, 0, NULL, &error, 3, 0, "dmrget", sizeof(DB_OWN_NAME),tcb->tcb_rel.relowner.db_own_name, sizeof(DB_TAB_NAME),tcb->tcb_rel.relid.db_tab_name); break; } xcb = rcb->rcb_xcb_ptr; if (xcb == NULL) { uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, sizeof("transaction")-1, "transaction"); SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER); break; } if (dmr->dmr_flags_mask & DMR_NEXT) if (dmr->dmr_flags_mask & DMR_SORTGET) ; else flag = DM2R_GETNEXT; else if (dmr->dmr_flags_mask & DMR_BY_TID) flag = DM2R_BYTID; else if (dmr->dmr_flags_mask & DMR_CURRENT_POS) { /* flag = DM2R_BYPOSITION; */ flag = DM2R_BYTID; dmr->dmr_tid = rcb->rcb_currenttid.tid_i4; } else if (dmr->dmr_flags_mask & DMR_PREV) { flag = DM2R_GETPREV; if (dmr->dmr_flags_mask & DMR_RAAT) flag |= DM2R_RAAT; if (tcb->tcb_table_type != TCB_BTREE) { SETDBERR(&dmr->error, 0, E_DM006E_NON_BTREE_GETPREV); break; } } else { SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG); break; } /* Check for btree primary key projection */ if (dmr->dmr_flags_mask & DMR_PKEY_PROJECTION) { if ((tcb->tcb_rel.relspec == TCB_BTREE) && ((tcb->tcb_rel.relstat & TCB_INDEX) == 0) && (flag == DM2R_GETNEXT || flag == DM2R_GETPREV)) flag |= DM2R_PKEY_PROJ; else { SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG); break; } } if (xcb->xcb_scb_ptr == NULL ) { uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, sizeof("session")-1, "session"); SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER); break; } /* Check for external interrupts */ if ( xcb->xcb_scb_ptr->scb_ui_state && !tcb->tcb_extended ) dmxCheckForInterrupt(xcb, &error); if (xcb->xcb_state == 0) { /* ** Make sure the caller's buffer is large enough to hold ** a record from this table. ** ** Note that for Gateway secondary index's, retrieves done ** will actually return records from the base table!!!! So ** if this is a get on a gateway 2nd index, make sure the ** buffer is large enough. ** Aug-4-2003 (chash01) The value relwid is sizeof(DM_TID) + ** sizeof(index column's length)., but the value in ** data_in_size is the size of base table columns. This leads ** to serious problem (looping) in DMFCALL() if the size of ** base table columns is less than the value in relwid. This ** RMS gateway specific problem will be tested specifically. */ table_width = tcb->tcb_rel.relwid; if ( (dmr->dmr_data.data_address) && ( (tcb->tcb_rel.relgwid != GW_RMS && dmr->dmr_data.data_in_size >= table_width) || ( tcb->tcb_rel.relgwid == GW_RMS && dmr->dmr_data.data_in_size + sizeof(DM_TID) >= table_width) ) ) { dmr->dmr_data.data_out_size = table_width; /* Upgrade isolation level to repeatable read if a ** cursor stability transaction is getting tuples ** to sort them for further update of this table, ** but not if MVCC crow_locking(). */ if ( !crow_locking(rcb) ) { if (dmr->dmr_flags_mask & DMR_SORT && rcb->rcb_access_mode == RCB_A_WRITE && rcb->rcb_iso_level == RCB_CURSOR_STABILITY) { rcb->rcb_iso_level = RCB_REPEATABLE_READ; } if (rcb->rcb_iso_level == RCB_CURSOR_STABILITY || rcb->rcb_iso_level == RCB_REPEATABLE_READ) { rcb->rcb_state |= RCB_CSRR_LOCK; } } /* ** Quick troll for external interrupts. */ CScancelCheck(rcb->rcb_sid); /* If this is a SORTGET, call DMF sorter to retrieve ** next row. */ if (dmr->dmr_flags_mask & DMR_SORTGET) { DM2R_L_CONTEXT *lct; lct = (DM2R_L_CONTEXT *)tcb->tcb_lct_ptr; status = dmse_get_record(lct->lct_srt, &lct->lct_record, &dmr->error); if (status == E_DB_OK) { MEcopy((PTR)lct->lct_record, dmr->dmr_data.data_in_size, (PTR)dmr->dmr_data.data_address); } else { /* eof or error, call dmse to finish up. */ local_status = dmse_end(lct->lct_srt, &local_dberr); if (local_status != E_DB_OK) { dmr->error = local_dberr; status = local_status; } /* Deallocate load context */ if (lct->lct_mct.mct_buffer != (PTR)0) { dm0m_deallocate((DM_OBJECT **)&lct->lct_mct.mct_buffer); } dm0m_deallocate((DM_OBJECT **)&tcb->tcb_lct_ptr); tcb->tcb_lct_ptr = 0; rcb->rcb_state &= ~RCB_LSTART; } return(status); } /* Regular ol' gets are handled here. */ xcb->xcb_scb_ptr->scb_qfun_errptr = &dmr->error; status = dm2r_get(rcb, (DM_TID*)&dmr->dmr_tid, flag, dmr->dmr_data.data_address, &dmr->error); xcb->xcb_scb_ptr->scb_qfun_errptr = NULL; /* If any arithmetic warnings to the RCB ADFCB during ** position, roll them into the caller's ADFCB. */ if (dmr->dmr_q_fcn != NULL && dmr->dmr_qef_adf_cb != NULL && rcb->rcb_adf_cb->adf_warncb.ad_anywarn_cnt > 0) dmr_adfwarn_rollup((ADF_CB *)dmr->dmr_qef_adf_cb, rcb->rcb_adf_cb); if ((tcb->tcb_rel.relstat & TCB_CONCUR)) { local_status = dm2r_unfix_pages(rcb, &local_dberr); if (local_status != E_DB_OK) { if (status == E_DB_OK) { status = local_status; dmr->error = local_dberr; } else { uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char * )NULL, (i4)0, (i4 *)NULL, &local_error, 0); } } } rcb->rcb_state &= ~RCB_CSRR_LOCK; if (status == E_DB_OK) return (status); } else SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER); } else { if (xcb->xcb_state & XCB_USER_INTR) SETDBERR(&dmr->error, 0, E_DM0065_USER_INTR); else if (xcb->xcb_state & XCB_FORCE_ABORT) SETDBERR(&dmr->error, 0, E_DM010C_TRAN_ABORTED); else if (xcb->xcb_state & XCB_ABORT) SETDBERR(&dmr->error, 0, E_DM0064_USER_ABORT); else if (xcb->xcb_state & XCB_WILLING_COMMIT) SETDBERR(&dmr->error, 0, E_DM0132_ILLEGAL_STMT); } } else SETDBERR(&dmr->error, 0, E_DM002B_BAD_RECORD_ID); break; } if (dmr->error.err_code == E_DM0022_BAD_MASTER_OP || dmr->error.err_code == E_DM004B_LOCK_QUOTA_EXCEEDED || dmr->error.err_code == E_DM0112_RESOURCE_QUOTA_EXCEED) { rcb->rcb_xcb_ptr->xcb_state |= XCB_STMTABORT; } else if (dmr->error.err_code == E_DM0042_DEADLOCK || dmr->error.err_code == E_DM004A_INTERNAL_ERROR || dmr->error.err_code == E_DM0100_DB_INCONSISTENT) { rcb->rcb_xcb_ptr->xcb_state |= XCB_TRANABORT; } else if (dmr->error.err_code == E_DM010C_TRAN_ABORTED) { rcb->rcb_xcb_ptr->xcb_state |= XCB_FORCE_ABORT; } else if (dmr->error.err_code == E_DM0065_USER_INTR) { rcb->rcb_xcb_ptr->xcb_state |= XCB_USER_INTR; rcb->rcb_state &= ~RCB_POSITIONED; *(rcb->rcb_uiptr) &= ~SCB_USER_INTR; } else if (dmr->error.err_code > E_DM_INTERNAL) { uleFormat( &dmr->error, 0, NULL, ULE_LOG , NULL, (char * )NULL, 0L, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM904C_ERROR_GETTING_RECORD, NULL, ULE_LOG, NULL, (char * )NULL, 0L, (i4 *)NULL, &error, 3, sizeof(DB_DB_NAME), &rcb->rcb_tcb_ptr->tcb_dcb_ptr->dcb_name, sizeof(DB_OWN_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relowner, sizeof(DB_TAB_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relid ); SETDBERR(&dmr->error, 0, E_DM008A_ERROR_GETTING_RECORD); } return (status); }
/*{ ** Name: psq_rptqry_text - add a piece of text to the text chain for a repeat ** query ** ** Description: Add a piece of text to the text chain for a repeat query. It is ** imperative that text of all repeat queries be stored in a ** uniform fashion so that comparing two stored query texts would ** serve as a reliable indicator of their sameness. Each piece ** will be preceeded with a blank except for a PERIOD. Neither the ** piece consisting of PERIOD nor the following piece will be ** preceeded with a blank. ** ** Inputs: ** header Pointer to chain header ** piece Pointer to piece of text ** size Size of piece ** result Place to put pointer to new piece ** err_blk Filled in if an error happens ** ** Outputs: ** result Filled in with pointer to chain element ** err_blk Filled in if an error happens ** Returns: ** E_DB_OK Success ** E_DB_ERROR Non-catastrophic failure ** E_DB_FATAL Catastrophic failure ** Exceptions: ** none ** ** Side Effects: ** Allocates memory ** ** History: ** 24-jan-90 (andre) ** Plagiarized from psq_tadd(). ** 28-jan-91 (andre) ** Do not insert a space if a piece is immediately following a piece ** consisting of a $. */ DB_STATUS psq_rptqry_text( PTR header, u_char *piece, i4 size, PTR *result, DB_ERROR *err_blk) { PSQ_THEAD *hp = (PSQ_THEAD *) header; PSQ_TEXT *tp; i4 err_code; bool leading_blank; char *txt; DB_STATUS status; /* ** Allocate enough space for PSQ_TEXT structure containing piece: ** all pieces will be preceeded with a blank with the following exceptions: ** - piece consisting of PERIOD will not be preceeeded with a blank; ** - piece which immediately follows a piece consisting of PERIOD; ** - piece starting with a "white" character will not be preceeded with a ** blank; ** - piece which immediately follows a piece consisting of $ (preceeded by ** a blank which was inserted by this function) */ if ( size == CMbytecnt(".") && !CMcmpcase(piece, ".") || CMwhite(piece)) { /* ** piece consists of a period or starts with a "white" character - no ** leading blanks will be added */ leading_blank = FALSE; } else if ( hp->psq_last != (PSQ_TEXT *) NULL && (( hp->psq_last->psq_psize == CMbytecnt(".") && !CMcmpcase(hp->psq_last->psq_tval, ".") ) || ( hp->psq_last->psq_psize == CMbytecnt(" ") + CMbytecnt("$") && !CMcmpcase(hp->psq_last->psq_tval, " ") && !CMcmpcase((hp->psq_last->psq_tval + CMbytecnt(" ")), "$") ) ) ) { /* ** previous piece consists of a period or of a $ preceeded by a blank ** inserted by this function - no leading blanks will be added */ leading_blank = FALSE; } else { /* insert a blank before the piece */ leading_blank = TRUE; } hp->psq_tmem.ulm_psize = (leading_blank) ? size + sizeof(PSQ_TEXT) - 1 + CMbytecnt(" ") : size + sizeof(PSQ_TEXT) - 1; if ((status = ulm_palloc(&hp->psq_tmem)) != E_DB_OK) { if (hp->psq_tmem.ulm_error.err_code == E_UL0005_NOMEM) { (VOID) psf_error(E_PS0F02_MEMORY_FULL, 0L, PSF_CALLERR, &err_code, err_blk, 0); } else { (VOID) psf_error(E_PS0371_ALLOC_TEXT_CHAIN, hp->psq_tmem.ulm_error.err_code, PSF_INTERR, &err_code, err_blk, 0); } return (status); } *result = hp->psq_tmem.ulm_pptr; tp = (PSQ_TEXT*) *result; /* Fill in text piece */ txt = (char *) tp->psq_tval; /* insert a leading blank if necessary */ if (leading_blank) { CMcpychar(" ", txt); txt += CMbytecnt(" "); } MEcopy((char *) piece, size, txt); tp->psq_psize = (leading_blank) ? size + CMbytecnt(" ") : size; /* Hook it up to the chain */ tp->psq_next = (PSQ_TEXT *) NULL; if (hp->psq_last != (PSQ_TEXT *) NULL) { hp->psq_last->psq_next = tp; tp->psq_prev = hp->psq_last; } else { tp->psq_prev = NULL; } hp->psq_last = tp; if (hp->psq_first == (PSQ_TEXT *) NULL) hp->psq_first = tp; /* Add in the length to the total for the chain */ hp->psq_tsize += tp->psq_psize; return (E_DB_OK); }
STATUS ERslookup( i4 msg_number, CL_ERR_DESC *clerror, i4 flags, char *sqlstate, char *msg_buf, i4 msg_buf_size, i4 language, i4 *msg_length, CL_ERR_DESC *err_code, i4 num_param, ER_ARGUMENT *param ) { i4 erindex; /* index of ERmulti table */ i4 status; i4 length = 0; ER_ARGUMENT *p; ER_ARGUMENT hidden[CLE_INFO_ITEMS]; /* to access info in clerror */ char tempbuf[ER_MAX_LEN+ER_MAX_NAME+2]; i4 templen; char *p_msg_buf; char *p_tempbuf; SYSTIME stime; char langbuf[ER_MAX_LANGSTR]; EX_CONTEXT context; ER_SEMFUNCS *sems; #define D_WIDTH 23 #define F_WIDTH 20 #define X_WIDTH 18 /* Validate the parameters. */ if (msg_buf == 0 || msg_buf_size == 0 || msg_length == 0) { return (ER_BADPARAM); } if (language != -1 && ERlangstr(language,langbuf) != OK) { return (ER_BADLANGUAGE); } if (!(flags & ER_NAMEONLY)) { EXdump(msg_number,0); } /* Insert timestamp if requested. */ if (flags & ER_TIMESTAMP) { if (msg_buf_size < 21) { return (ER_TOOSMALL); } TMnow(&stime); TMstr(&stime,msg_buf); length = (i4)STlength(msg_buf); msg_buf[length++] = ' '; } /* ** if (clerror && msg_number) ** look up msg_number, optional parameters in clerror->moreinfo ** else if (clerror) ** { ** if (clerror->intern) ** look up clerror.intern, optional params in clerror->moreinfo ** if (clerror->callid) ** look up system error message ** } */ if (clerror) { if (msg_number) /* Look up message after system error */ { /* ** Set up an ER_ARGUMENT that references system-dependent ** information in `clerror', and point `param' at it. */ i4 i; for (i = 0; i < CLE_INFO_ITEMS; ++i) { /* "...all of whose members begin at offset 0..." (K&R) */ hidden[i].er_value = (PTR)&clerror->moreinfo[i].data._i4; hidden[i].er_size = clerror->moreinfo[i].size; } param = &hidden[0]; num_param = CLE_INFO_ITEMS; } else /* retrieve system-dependent error messages */ { i4 len; ER_ARGUMENT argv[3]; if (clerror->intern) /* look up internal CL error */ { i4 i; for (i = 0; i < CLE_INFO_ITEMS; ++i) { argv[i].er_value = (PTR)&clerror->moreinfo[i].data._i4; argv[i].er_size = clerror->moreinfo[i].size; } /* ** Don't timestamp on recursive call, since it's been done ** already (if requested). */ if ((status = ERslookup((i4) clerror->intern, (CL_ERR_DESC*) NULL, flags & ~ER_TIMESTAMP | ER_TEXTONLY, NULL, &msg_buf[length], msg_buf_size-length, language, &len, err_code, CLE_INFO_ITEMS, argv)) != OK) { return (status); } length += len; if (clerror->callid) msg_buf[length++] = '\n'; } if (clerror->callid) /* look up system error message text */ { DESCRIPTOR msg_desc; msg_desc.desc_length = sizeof(tempbuf) - 1; msg_desc.desc_value = tempbuf; if ((status = cer_sysgetmsg(clerror, &len, &msg_desc, err_code)) != OK) { return(status); } argv[0].er_size = argv[1].er_size = argv[2].er_size = ER_PTR_ARGUMENT; argv[0].er_value = (PTR)&clerror->errnum; argv[1].er_value = (PTR)ERNAME((i4) clerror->callid); argv[2].er_value = (PTR)tempbuf; if ((status = ERslookup(ER_UNIXERROR, (CL_ERR_DESC*) NULL, flags & ~ER_TIMESTAMP | ER_TEXTONLY, NULL, &msg_buf[length], msg_buf_size - length, language, &len,err_code, 3, argv)) != OK) { return (status); } length += len; } msg_buf[*msg_length = length] = EOS; return (OK); } } /* ** Check if error message file is already opened or not yet. ** First see if the language is initialized. If not, initialize ** it and the message files. ** If it is already opened, cer_fndindex function returns the index of ** ERmulti table that internal language code is parameter 'language'. ** If not yet, it returns '-1'. */ if (cer_issem(&sems)) { if (((sems->sem_type & MU_SEM) ? (*sems->er_p_semaphore)(&sems->er_mu_sem) : (*sems->er_p_semaphore)(1, &sems->er_sem)) != OK) { sems = NULL; } } if ((erindex = cer_fndindex(language)) == -1) { if ((status = cer_nxtindex(language,&erindex)) != OK) { /* Error in initializing the language */ if (sems) { if (sems->sem_type & MU_SEM) _VOID_ (*sems->er_v_semaphore)(&sems->er_mu_sem); else _VOID_ (*sems->er_v_semaphore)(&sems->er_sem); } return (status); } } /* If the error message file is not opened, open the message file. */ if (!cer_isopen(erindex,ER_SLOWSIDE)) { if ((status = cer_sinit(language,msg_number,erindex,err_code)) != OK) { if (sems) { if (sems->sem_type & MU_SEM) _VOID_ (*sems->er_v_semaphore)(&sems->er_mu_sem); else _VOID_ (*sems->er_v_semaphore)(&sems->er_sem); } return (status); } } /* If not open then just return. */ if (!cer_isopen(erindex,ER_SLOWSIDE)) { if (sems) { if (sems->sem_type & MU_SEM) _VOID_ (*sems->er_v_semaphore)(&sems->er_mu_sem); else _VOID_ (*sems->er_v_semaphore)(&sems->er_sem); } /* ** As internal file id is '0', openning file will fail. ** In her,return status 'ER_BADOPEN' to show open fail. */ return (ER_BADOPEN); } /* ** Search message string from file and set to buffer. ** Error status on system call set to 'err_code'. */ status = cer_sstr(msg_number, sqlstate, tempbuf, msg_buf_size - length, erindex, err_code, flags & ER_TEXTONLY? ER_GET : ER_LOOKUP); if (sems) { if (sems->sem_type & MU_SEM) _VOID_ (*sems->er_v_semaphore)(&sems->er_mu_sem); else _VOID_ (*sems->er_v_semaphore)(&sems->er_sem); } if (status != OK) { return (status); } /* ** Format the text with parameters into the callers buffer. ** The message is truncated if it will not fit. */ /* Insert part of name from temporary buffer to buffer */ status = OK; templen = (i4)STlength(tempbuf); p_msg_buf = &msg_buf[length]; p_tempbuf = tempbuf; if (!(flags & ER_TEXTONLY)) { while(*p_tempbuf != '\t') { CMcpyinc(p_tempbuf,p_msg_buf); } CMcpyinc(p_tempbuf,p_msg_buf); } /* ============================================ */ /* Copy text to message substituting arguments. */ /* -------------------------------------------- */ /* (But first, declare an exception handler to */ /* catch bad params that may access violate.) */ /* ============================================ */ if (EXdeclare(er_exhandler, &context)) { u_i4 res_len; u_i4 bytes_left_in_buf; bytes_left_in_buf = (u_i4)(msg_buf_size - (p_msg_buf - msg_buf)); res_len = STlen( STncpy(p_msg_buf, ERx("*** ERslookup() ERROR: Missing or bad parameter for this message. ***"), bytes_left_in_buf )); p_msg_buf[ bytes_left_in_buf - 1 ] = EOS; p_msg_buf += res_len; *msg_length = (i4)(p_msg_buf - msg_buf); EXdelete(); return (OK); } for( ;p_tempbuf - tempbuf < templen; CMnext(p_tempbuf)) { long number; u_long unumber; double fnumber; i4 i; i4 pnum; if ( (*p_tempbuf != '%') || (flags & ER_NOPARAM) ) { if ((p_msg_buf - msg_buf) >= msg_buf_size) break; CMcpychar(p_tempbuf,p_msg_buf); CMnext(p_msg_buf); continue; } if (p_tempbuf - tempbuf + 2 >= templen) continue; CMnext(p_tempbuf); if (*p_tempbuf == '!') { if ((p_msg_buf - msg_buf) + 3 >= msg_buf_size) continue; CMcpychar(ERx("\r"),p_msg_buf); CMnext(p_msg_buf); CMcpychar(ERx("\n"),p_msg_buf); CMnext(p_msg_buf); CMcpychar(ERx("\t"),p_msg_buf); CMnext(p_msg_buf); continue; } /* ** Only works for up to 10 parameters, and makes character set ** assumptions - should be fixed. */ if ( *p_tempbuf < '0' || *p_tempbuf > '9' ) { /* treat any other character as a literal */ if ((p_msg_buf - msg_buf) >= msg_buf_size) break; if ( *p_tempbuf != '%' ) { CMcpychar("%",p_msg_buf); CMnext(p_msg_buf); } CMcpychar(p_tempbuf,p_msg_buf); CMnext(p_msg_buf); continue; } pnum = *p_tempbuf - '0'; if (pnum >= num_param) { EXdelete(); return(ER_BADPARAM); } p = ¶m[pnum]; CMnext(p_tempbuf); switch (*p_tempbuf) { case 'd': /* Convert an integer into the buffer with width D_WIDTH */ if (p->er_size == ER_PTR_ARGUMENT) /* this is ptr to i4 */ number = *(i4 *)p->er_value; else if (p->er_size == 0) /* this is a i4 */ number = (i4)(SCALARP)p->er_value; else if (p->er_size == 1) number = *(i1 *)p->er_value; else if (p->er_size == 2) number = *(i2 *)p->er_value; else if (p->er_size == 4) number = *(i4 *)p->er_value; else if (p->er_size == 8) number = *(i8 *)p->er_value; else continue; if (p_msg_buf - msg_buf + D_WIDTH >= msg_buf_size) continue; if (p->er_size == 8) { CVla8(number, p_msg_buf); } else { CVla((i4)number, p_msg_buf); } while (*p_msg_buf) CMnext(p_msg_buf); continue; case 'u': /* Convert an integer into the buffer with width D_WIDTH */ if (p->er_size == ER_PTR_ARGUMENT) /* this is ptr to u_i4 */ number = *(u_i4 *)p->er_value; else if (p->er_size == 0) /* this is a u_i4 */ number = (u_i4)(SCALARP)p->er_value; else if (p->er_size == 1) number = *(u_i1 *)p->er_value; else if (p->er_size == 2) number = *(u_i2 *)p->er_value; else if (p->er_size == 4) number = *(u_i4 *)p->er_value; else if (p->er_size == 8) number = *(u_i8 *)p->er_value; else continue; if (p_msg_buf - msg_buf + D_WIDTH >= msg_buf_size) continue; if (p->er_size == 8) { CVula8(number, p_msg_buf); } else { CVula((u_i4)number, p_msg_buf); } while (*p_msg_buf) CMnext(p_msg_buf); continue; case 'f': { i2 res_width; /* Convert a float into the buffer with width F_WIDTH */ if (p->er_size == ER_PTR_ARGUMENT) /* Pointer to a double */ fnumber = *(double *)p->er_value; else if (p->er_size == 4) fnumber = *(f4 *)p->er_value; else if (p->er_size == 8) fnumber = *(f8 *)p->er_value; else continue; if (p_msg_buf - msg_buf + F_WIDTH >= msg_buf_size) continue; /* Always convert to 'e' format. */ CVfa(fnumber, (i4) 20, (i4) 5, 'e', '.', p_msg_buf, &res_width); p_msg_buf += F_WIDTH; continue; } case 'c': /* Convert a character array into buffer. */ if (p->er_value == 0) p->er_value = (PTR)ERx("<missing>"); if ((p->er_size == 0) || (p->er_size == ER_PTR_ARGUMENT)) { for (i = 0; ((char *)p->er_value)[i]; i++) ; p->er_size = i; } if (p_msg_buf - msg_buf + p->er_size >= msg_buf_size) continue; if (p->er_size > msg_buf_size - (p_msg_buf - msg_buf)) p->er_size = (i4)(msg_buf_size - (p_msg_buf - msg_buf)); /* p->er_size=STtrmwhite(p_msg_buf);*/ MEcopy(p->er_value, p->er_size, p_msg_buf); p->er_size = (i4)STtrmnwhite(p_msg_buf, p->er_size); p_msg_buf += p->er_size; continue; case 'x': /* Convert an integer into the buffer with width D_WIDTH */ if (p->er_size == ER_PTR_ARGUMENT) unumber = *(u_i4 *)p->er_value; else if (p->er_size == 0) unumber = (u_i4)(SCALARP)p->er_value; else if (p->er_size == 1) unumber = *(u_i1 *)p->er_value; else if (p->er_size == 2) unumber = *(u_i2 *)p->er_value; else if (p->er_size == 4) unumber = *(u_i4 *)p->er_value; else if (p->er_size == 8) unumber = *(u_i8 *)p->er_value; if (p_msg_buf - msg_buf + X_WIDTH >= msg_buf_size) continue; for (i = 8; --i >= 0; ) { /* {@fix_me@} ** This is *NOT* machine independent. This relys on an ** ASCII-like character set, where the digits '0'-'9' are ** contiguous and sequential, and the characters 'A'-'F' ** are contiguous and sequential. Both ASCII and EBCDIC ** happen to be this way. */ if ((*(p_msg_buf + i) = (unumber & 0x0f) + '0') > '9') *(p_msg_buf + i) += 'A' - '9' - 1; unumber >>= 4; } p_msg_buf += 8; continue; default: continue; } } *msg_length = (i4)(p_msg_buf - msg_buf); *p_msg_buf = EOS; EXdelete(); return (OK); }