DB_STATUS adu_uuid_compare( ADF_CB *adf_scb, DB_DATA_VALUE *dv1, DB_DATA_VALUE *dv2, DB_DATA_VALUE *rdv) { i4 d; DB_DATA_VALUE exp; DB_STATUS db_stat; UUID u1, u2; UUID *uuid1, *uuid2; /* Verify the result is an integer and both operands are UUIDs */ if ((dv1->db_datatype != DB_BYTE_TYPE) || (dv2->db_datatype != DB_BYTE_TYPE) || (rdv->db_datatype != DB_INT_TYPE)) { db_stat = adu_error(adf_scb, E_AD9998_INTERNAL_ERROR, 2, 0, "uuid_compare type"); return(db_stat); } if ((ME_ALIGN_MACRO((PTR)dv1->db_data, sizeof(ALIGN_RESTRICT)) != (PTR)dv1->db_data) || (dv1->db_length != sizeof(UUID))) { uuid1 = &u1; uuid_format(dv1, uuid1); } else uuid1 = (UUID *)dv1->db_data; if ((ME_ALIGN_MACRO((PTR)dv2->db_data, sizeof(ALIGN_RESTRICT)) != (PTR)dv2->db_data) || (dv2->db_length != sizeof(UUID))) { uuid2 = &u2; uuid_format(dv2, uuid2); } else uuid2 = (UUID *)dv2->db_data; d = IDuuid_compare(uuid1, uuid2); if (rdv->db_datatype == DB_INT_TYPE && rdv->db_length == 4) *(i4 *)rdv->db_data = d; else if (rdv->db_datatype == DB_INT_TYPE && rdv->db_length == 2) *(i2 *)rdv->db_data = d; else { exp.db_datatype = DB_INT_TYPE; exp.db_length = 4; exp.db_data = (PTR) &d; if ((db_stat = adu_1int_coerce(adf_scb, rdv, &exp)) != E_DB_OK) return (db_stat); } return(E_DB_OK); }
DB_STATUS adu_uuid_to_char( ADF_CB *adf_scb, DB_DATA_VALUE *dv1, DB_DATA_VALUE *rdv) { char d[100]; DB_STATUS db_stat; UUID u1; UUID *uuid; /* Verify the operand is a UUID */ if (dv1->db_datatype != DB_BYTE_TYPE) { db_stat = adu_error(adf_scb, E_AD9998_INTERNAL_ERROR, 2, 0 ,"uuid_to_char type"); return(db_stat); } if ((ME_ALIGN_MACRO((PTR)dv1->db_data, sizeof(ALIGN_RESTRICT)) != (PTR)dv1->db_data) || (dv1->db_length != sizeof(UUID))) { uuid = &u1; uuid_format(dv1, uuid); } else uuid = (UUID *)dv1->db_data; db_stat = IDuuid_to_string(uuid, d); if (db_stat = adu_movestring(adf_scb, (u_char *)d,(i4) STlength(d), dv1->db_datatype, rdv)) return (db_stat); return(E_DB_OK); }
/*{ ** Name: dmd_lock_info - Format locking system information. ** ** Description: ** This routine formats locking system information. It is used ** by LOCKSTAT as well as error handling code triggered by certain ** locking errors. ** ** Inputs: ** options Display options ** ** Outputs: ** Returns: ** VOID ** Exceptions: ** none ** ** Inputs: ** options - which statistics to report: ** DMTR_LOCK_SUMMARY -- just summary info ** DMTR_LOCK_STATISTICS -- all lock statistics ** DMTR_LOCK_LISTS -- locks by lock list ** DMTR_LOCK_USER_LISTS -- locks by lock list ** DMTR_LOCK_SPECIAL_LISTS -- locks by lock list ** DMTR_LOCK_RESOURCES -- locks by resource ** DMTR_LOCK_ALL -- all of the above ** DMTR_SMALL_HEADER with any of the above ** to display a shorter header. ** DMTR_LOCK_DIRTY -- dirty-read lock ** lock structures. ** ** Outputs: ** Returns: ** VOID ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 16-jun-1987 (Derek) ** Created for Jupiter. ** 28-Jul-1987 (ac) ** Added cluster support. ** 27-mar-1989 (rogerk) ** Added new lock list status values for Shared Buffer Managaer. ** Added new lock types for shared buffer manager. ** 19-jun-1989 (rogerk) ** When formatting locks by resource, use RSB, not LKB for buffer ** manager locks. ** 21-feb-1990 (sandyh) ** Added LK_SUMM information call to gather quota statistics. ** 25-sep-1991 (mikem) integrated following change: 19-aug-1991 (ralph) ** Add support for LK_EVCONNECT ** 11-sep-1992 (jnash) ** Redued logging project. ** - Add LK_ROW lock type. ** 14-dec-1992 (jnash) ** Add AUDIT lock type in TRdisplay. ** 10-dec-1992 (robf) ** Handle AUDIT locks, also ROW locks now correct. ** 24-may-1993 (bryanp) ** Added options field to enable selective lockstat. ** Fixed display of llb's because cluster and non-cluster LLB's now ** both have the same llb_status definitions. ** Added llb_pid field to the lock list information structure so that ** the lock list's creator PID could be displayed. Displayed the ** PID in hex for VMS, decimal for all other systems. ** Added lots of new locking system statistics. ** 21-jun-1993 (bryanp) ** Display ALL the lkb_attribute values. ** Added "-user_lists" and "-special_lists" ** 26-jul-1993 (bryanp) ** If a lock list is in LLB_INFO_EWAIT state, display event info. ** 22-nov-1993 (jnash) ** Moved from lockstat.c to here for use in deadlock diagnostic ** output. Rename to dmd_lock_info(). Rename options to ** include DMTR prefix. ** 7-jan-94 (robf) ** Add back changes made to lockstat.c: ** 12-jul-93 (robf) ** Improve formatting for AUDIT locks, now shows what they are ** 31-jan-1994 (mikem) ** Bug #58407. ** Added support to display the newly added CS_SID info in the ** LK_LLB_INFO structure: llb_sid. This is used by lockstat and ** internal lockstat type info logging to print out session owning ** locklist. ** Given a pid and session id (usually available from errlog.log ** traces), one can now track down what locks are associated with ** that session. ** 31-mar-94 (robf) ** Added AUDIT QUEUE locks to trace ** 20-apr-1994 (mikem) ** bug #59490 ** TRdisplay() CS_SID's as %p, not %x. ** 20-apr-1995 (nick) ** Add support for LK_CKP_TXN ** 12-Jan-1996 (jenjo02) ** Added TRdisplay of llb_highwater, which is NOT equal ** to lbk_highwater! Added DMTR_SMALL_HEADER option to print ** a header which excludes the date/time. ** 01-aug-1996 (nanpr01 for ICL keving) ** Display the number of granted LKBs with callbacks for RSBs. ** 22-nov-96 (dilma04) ** Row Locking Project: ** Add support for LK_PH_PAGE and LK_VAL_LOCK. ** 11-Apr-1997 (jenjo02) ** Added new stats for callback threads. ** 24-Apr-1997 (jenjo02) ** - Added DMTR_LOCK_DIRTY/LK_S_DIRTY flag to augment LKshow() option. ** This notifies LKshow() that locking structures are NOT to be ** semaphore protected, used as a debugging aid. ** - Fixed DMTR_LOCK_LISTS so that it'll now list all LKBs on a lock list. ** Previously, it'd quit if the LLB + LKBs filled the buffer but there ** were more LKBs on the lock list. ** 22-Jan-1998 (jenjo02) ** Added pid and sid to LK_LKB_INFO so that the true waiting session ** can be identified. The llb's pid and sid identify the lock list ** creator, not necessarily the lock waiter (LK_MULTITHREAD-type ** lock lists and requests). ** 11-Jun-1998 (jenjo02) ** Added ON_DLOCKQ, DEADLOCK, XSVDB, XROW states to display of ** llb_status. ** 03-Sep-1998 (jenjo02) ** DMTR_LOCK_RESOURCES was decrementing the size remaining in the ** buffer by sizeof(LK_LKB_INFO) instead of sizeof(LK_RSB_INFO), ** causing it to lose track of (and not display) the last (or only) ** LKB tied to a RSB. ** 28-Jul-1998 (strpa05) ** - Correct LOCKSTAT output heading (x-int from oping12) ** 22-may-2001 (devjo01) ** s103715 generic cluster stuff. ** 04-dec-2001 (devjo01) ** Use structures LK_S_LOCKS_HEADER and LK_S_RESOURCE_HEADER to ** calculate start of LKB_INFO portion of buffer. This technique ** assures that compilers on all platforms (even Tru64) will ** correctly align structures to prevent unaligned memory accesses ** regardless of platforms structure padding policy. ** 15-Feb-2002 (rigka01) bug#107029 ** print unsigned longnat/i4 data using %u instead of %d with TRdisplay ** 28-Feb-2002 (jenjo02) ** Added MULTITHREAD attribute to llb_status. LLB_WAITING is ** contrived based on llb_waiters. ** 05-Mar-2002 (jenjo02) ** Added LK_SEQUENCE lock type. ** 04-sep-2002 (devjo01) ** Take advantage of LKkey_to_string. ** 26-Oct-2007 (jonj) ** Update llb,lkb status bits, display ** lock requestor pid/sid if multithreaded list, ** all VMS pids in hex. ** 19-Nov-2007 (jonj) ** Include lkdef.h to pick up (new) status bits ** string defines. ** 04-Apr-2008 (jonj) ** Save local copies of llb_status, llb_pid, llb_sid ** as LLB gets overwritten with LKB if lots of locks. ** 15-Jan-2010 (jonj) ** SIR 121619 MVCC: add display of stats by lock type. */ VOID dmd_lock_info(i4 options) { CL_ERR_DESC sys_err; LK_STAT stat; LK_SUMM summ; i4 status; i4 length; i4 count; i4 context; i4 lkb_context; i4 llb_id; i4 llb_lkb_count; u_i4 llb_status; PID llb_pid; CS_SID llb_sid; char keybuf[128]; char info_buffer[16384]; /* (wow) */ char *buffer; i4 buf_size; bool cluster; char *format_string; i4 sem_option = options & DMTR_LOCK_DIRTY; i4 i; cluster = CXcluster_enabled(); buffer = (PTR) ME_ALIGN_MACRO(info_buffer, sizeof(ALIGN_RESTRICT)); buf_size = sizeof(info_buffer) - (buffer - info_buffer); if (options & (DMTR_LOCK_SUMMARY|DMTR_LOCK_STATISTICS)) { status = LKshow(LK_S_SUMM | sem_option, 0, 0, 0, sizeof(summ), (PTR)&summ, (u_i4 *)&length, (u_i4 *)&context, &sys_err); if (status) { TRdisplay("Can't show locking configuration summary.\n"); return; } if (options & DMTR_SMALL_HEADER) TRdisplay("%29*- Locking System Summary %27*-\n\n"); else TRdisplay("%44*=%@ Locking System Summary%47*=\n\n"); TRdisplay(" Total Locks %8d", summ.lkb_size); TRdisplay("%8* Total Resources %8d\n", summ.rsb_size); TRdisplay("%41* Locks per transaction%8d\n", summ.lkbs_per_xact); TRdisplay(" Lock hash table %8d", summ.lkb_hash_size); TRdisplay("%8* Locks in use %8d\n", summ.lkbs_inuse); TRdisplay(" Resource hash table %8d", summ.rsb_hash_size); TRdisplay("%8* Resources in use %8d\n", summ.rsbs_inuse); TRdisplay(" Total lock lists %8d", summ.llb_size); TRdisplay("%8* Lock lists in use %8d\n", summ.llbs_inuse); } if (options & DMTR_LOCK_STATISTICS) { status = LKshow(LK_S_STAT | sem_option, 0, 0, 0, sizeof(stat), (PTR)&stat, (u_i4 *)&length, (u_i4 *)&context, &sys_err); if (status) { TRdisplay("Can't show locking statistics.\n"); return; } if (options & DMTR_SMALL_HEADER) TRdisplay("%29*- Locking System Statistics%24*-\n\n"); else TRdisplay("\n%44*=%@ Locking System Statistics%44*=\n\n"); TRdisplay(" Create lock list %10u", stat.create); TRdisplay("%8* Release lock list %10u\n", stat.destroy); TRdisplay(" Request lock %10u", stat.request); TRdisplay("%8* Re-request lock %10u\n", stat.re_request); TRdisplay(" Convert lock %10u", stat.convert); TRdisplay("%8* Release lock %10u\n", stat.release); TRdisplay(" Escalate %10u", stat.escalate); TRdisplay("%8* Lock wait %10u\n", stat.wait); TRdisplay(" Convert wait %10u", stat.con_wait); TRdisplay("%8* Convert Deadlock %10u\n", stat.con_dead); TRdisplay(" Deadlock Wakeups %10u", stat.dlock_wakeups); TRdisplay("%8* Max dlk queue len %10u\n", stat.dlock_max_q_len); TRdisplay(" Deadlock Search %10u", stat.dead_search); TRdisplay("%8* Deadlock %10u\n", stat.deadlock); TRdisplay(" Cancel %10u", stat.cancel); TRdisplay("%8* Convert Search %10u\n", stat.con_search); TRdisplay(" Allocate CB %10u", stat.allocate_cb); TRdisplay("%8* Deallocate CB %10u\n", stat.deallocate_cb); TRdisplay(" LBK Highwater %10u", stat.lbk_highwater); TRdisplay("%8* LLB Highwater %10u\n", stat.llb_highwater); TRdisplay(" SBK Highwater %10u", stat.sbk_highwater); TRdisplay("%8* LKB Highwater %10u\n", stat.lkb_highwater); TRdisplay(" RBK Highwater %10u", stat.rbk_highwater); TRdisplay("%8* RSB Highwater %10u\n", stat.rsb_highwater); TRdisplay(" Max Local dlk srch %10u", stat.max_lcl_dlk_srch); TRdisplay("%8* Dlk locks examined %10u\n", stat.dlock_locks_examined); TRdisplay(" Max rsrc chain len %10u", stat.max_rsrc_chain_len); TRdisplay("%8* Max lock chain len %10u\n", stat.max_lock_chain_len); TRdisplay(" Max locks per txn %10u", stat.max_lkb_per_txn); TRdisplay("%8* Callback Wakeups %10u\n", stat.cback_wakeups); TRdisplay(" Callbacks Invoked %10u", stat.cback_cbacks); TRdisplay("%8* Callbacks Ignored %10u\n", stat.cback_stale); if (cluster) { TRdisplay(" Enq %10u", stat.enq); TRdisplay("%8* Deq %10u\n", stat.deq); TRdisplay(" Sync Completions %10u", stat.synch_complete); TRdisplay("%8* Async Completions %10u\n", stat.asynch_complete); TRdisplay(" Gbl dlck srch pend %10u", stat.gdlck_search); TRdisplay("%8* Gbl deadlock %10u\n", stat.gdeadlock); TRdisplay(" Gbl grant pre-srch %10u", stat.gdlck_grant); TRdisplay("%8* Gbl dlck srch rqst %10u\n", stat.totl_gdlck_search); TRdisplay(" Gbl dlck srch calls %10u", stat.gdlck_call); TRdisplay("%8* Gbl dlck msgs sent %10u\n", stat.gdlck_sent); TRdisplay(" Con't gbl dlck calls %10u", stat.cnt_gdlck_call); TRdisplay("%8* Con't gbl dlck sent %10u\n", stat.cnt_gdlck_sent); TRdisplay(" Unsent dlck srch rqst%10u", stat.unsent_gdlck_search); TRdisplay("%8* Sent dlck srch rqst %10u\n", stat.sent_all); TRdisplay(" CSP IPC messages %10u", stat.csp_msgs_rcvd); TRdisplay("%8* CSP IPC wakeups %10u\n", stat.csp_wakeups_sent); } TRdisplay("\n Statistics by lock type:\n\n"); /* NB: tstat[0] are the counts for all types */ for ( i = 1; i <= LK_MAX_TYPE; i++ ) { LK_TSTAT *tstat = &stat.tstat[i]; if ( tstat->request_new ) { TRdisplay(" %12w Request lock %10u", LK_LOCK_TYPE_MEANING, i, tstat->request_new); TRdisplay("%8* Re-request lock %10u\n", tstat->request_convert); TRdisplay(" %13* Convert lock %10u", tstat->convert); TRdisplay("%8* Release lock %10u\n", tstat->release); TRdisplay(" %13* Lock wait %10u", tstat->wait); TRdisplay("%8* Convert wait %10u\n", tstat->convert_wait); TRdisplay(" %13* Deadlock %10u", tstat->deadlock); TRdisplay("%8* Convert deadlock %10u\n", tstat->convert_deadlock); } } TRdisplay("\n"); } if (options & DMTR_LOCK_LISTS) { if (options & DMTR_SMALL_HEADER) TRdisplay("%29*- Locks by lock list %30*-\n"); else TRdisplay("%55*-Locks by lock list%55*-\n"); /* Get information about lock lists and locks. */ for (context = 0;;) { LK_LLB_INFO *llb = (LK_LLB_INFO*)buffer; LK_LKB_INFO *lkb = ((LK_S_LOCKS_HEADER*)llb)->lkbi; status = LKshow(LK_S_LOCKS | sem_option, 0, 0, 0, buf_size, (PTR)buffer, (u_i4 *)&length, (u_i4 *)&context, &sys_err); if (length == 0) break; if ((options & DMTR_LOCK_USER_LISTS) == 0) { if ((llb->llb_status & LLB_INFO_NONPROTECT) == 0 && llb->llb_key[0] != 0) continue; /* this is a user lock list */ } if ((options & DMTR_LOCK_SPECIAL_LISTS) == 0) { if ((llb->llb_status & LLB_INFO_NONPROTECT) != 0 || llb->llb_key[0] == 0) continue; /* this is a non-user lock list */ } /* ** Format the LLB. The llb_tick field is displayed only on a ** VAXCluster. */ TRdisplay( "\nId: %x Tran_id: %x%x R_llb: %x R_cnt: %d S_llb: %x S_cnt: %d Wait: %x Locks: (%d,%d/%d)\n", llb->llb_id, llb->llb_key[0], llb->llb_key[1], llb->llb_r_llb_id, llb->llb_r_cnt, llb->llb_s_llb_id, llb->llb_s_cnt, llb->llb_wait_rsb_id, llb->llb_lkb_count, llb->llb_llkb_count, llb->llb_max_lkb); #ifdef VMS TRdisplay(" PID:%x SID:%p ", llb->llb_pid, llb->llb_sid); #else TRdisplay(" PID:%d SID:%p ", llb->llb_pid, llb->llb_sid); #endif /* VMS */ if ( cluster ) TRdisplay("Tick: %d ", llb->llb_tick); TRdisplay("Status: %v ", LLB_STATUS, llb->llb_status); TRdisplay("\n"); if (llb->llb_status & LLB_EWAIT) format_event_wait_info(&llb->llb_event, llb->llb_evflags); TRdisplay("\n"); /* ** If there are more LKBs on this list than will fit in our ** buffer, we'll need to make multiple calls to LKshow() to ** get them all. ** ** Note that between calls, the number of LKBs on the list may ** change and be more or less than that just reported in the LLB, ** so don't expect perfection here. */ llb_id = llb->llb_id; llb_lkb_count = llb->llb_lkb_count; llb_status = llb->llb_status; llb_pid = llb->llb_pid; llb_sid = llb->llb_sid; /* count = number of LKBs just returned */ count = (length - ((PTR)lkb - (PTR)llb)) / sizeof(LK_LKB_INFO); lkb_context = count; while (count) { /* Format each LKB. */ if (cluster) { TRdisplay(" Id:%x Rsb:%x Gr:%3w Req:%3w State:%2w ", lkb->lkb_id, lkb->lkb_rsb_id, LOCK_MODE, lkb->lkb_grant_mode, LOCK_MODE, lkb->lkb_request_mode, LKB_STATE, lkb->lkb_state); if (lkb->lkb_flags & LKB_PHYSICAL) TRdisplay("PHYS(%d) ", lkb->lkb_phys_cnt); else TRdisplay("%8* "); TRdisplay("Lkid:%x.%x V:%x%x ", lkb->lkb_dlm_lkid.lk_uhigh, lkb->lkb_dlm_lkid.lk_ulow, lkb->lkb_dlm_lkvalue[0], lkb->lkb_dlm_lkvalue[1]); TRdisplay("lk_attr: %v ", LKB_ATTRIBUTE, lkb->lkb_flags); } else { TRdisplay(" Id: %x Rsb: %x Gr: %3w Req: %3w State: %2w ", lkb->lkb_id, lkb->lkb_rsb_id, LOCK_MODE, lkb->lkb_grant_mode, LOCK_MODE, lkb->lkb_request_mode, LKB_STATE, lkb->lkb_state); if (lkb->lkb_flags & LKB_PHYSICAL) TRdisplay("PHYS(%d) ", lkb->lkb_phys_cnt); else TRdisplay("%8* "); } /* ** If not granted and the lock requestor is other than ** the lock list creator, show the pid and sid of the ** lock requestor. ** ** Also display pid/sid if multithreaded lock list. */ if ( (llb_status & LLB_MULTITHREAD || lkb->lkb_grant_mode != lkb->lkb_request_mode) && (lkb->lkb_pid != llb_pid || lkb->lkb_sid != llb_sid) ) { #ifdef VMS TRdisplay("Pid:%x Sid:%p ", lkb->lkb_pid, lkb->lkb_sid); #else TRdisplay("Pid:%d Sid:%p ", lkb->lkb_pid, lkb->lkb_sid); #endif /* VMS */ } TRdisplay("KEY(%s)\n", LKkey_to_string( (LK_LOCK_KEY *)&lkb->lkb_key[0], keybuf) ); lkb++; llb_lkb_count--; /* Do next in this buffer */ if (--count) continue; /* Get next block of LKBs if there appear to be more */ if (llb_lkb_count > 0) { status = LKshow(LK_S_LIST_LOCKS | sem_option, llb_id, 0, 0, buf_size, buffer, (u_i4 *)&length, (u_i4 *)&lkb_context, &sys_err); if (length != 0) { count = length / sizeof(LK_LKB_INFO); lkb = (LK_LKB_INFO *)buffer; } } } /* while (count) */ } /* for (context = 0;;) */ } if (options & DMTR_LOCK_RESOURCES) { if (options & DMTR_SMALL_HEADER) TRdisplay("%29*- Locks by resource %31*-\n"); else TRdisplay("%55*-Locks by resource%56*-\n"); /* Get information about resources and lock requests. */ for (context = 0;;) { LK_RSB_INFO *rsb = (LK_RSB_INFO*)buffer; LK_LKB_INFO *lkb = ((LK_S_RESOURCE_HEADER*)rsb)->lkbi; status = LKshow(LK_S_RESOURCE | sem_option, 0, 0, 0, buf_size, buffer, (u_i4 *)&length, (u_i4 *)&context, &sys_err); if (length == 0) break; /* Format the RSB. */ TRdisplay("\nId: %x Gr: %3w Conv: %3w Cbacks %d Value: <%x%x> %8w KEY(%s)\n", rsb->rsb_id, LOCK_MODE, rsb->rsb_grant, LOCK_MODE, rsb->rsb_convert, rsb->rsb_cback_count, rsb->rsb_value[0], rsb->rsb_value[1], ",INVALID", rsb->rsb_invalid, LKkey_to_string( (LK_LOCK_KEY *)&rsb->rsb_key[0], keybuf ) ); for (count = (length - ((PTR)lkb - (PTR)rsb)) / sizeof(LK_LKB_INFO); count; count--) { /* Format each LKB. */ TRdisplay(" Id: %x Llb: %x Gr: %3w Req: %3w State: %2w ", lkb->lkb_id, lkb->lkb_llb_id, LOCK_MODE, lkb->lkb_grant_mode, LOCK_MODE, lkb->lkb_request_mode, LKB_STATE, lkb->lkb_state); if (lkb->lkb_flags & LKB_PHYSICAL) TRdisplay("PHYS(%d) ", lkb->lkb_phys_cnt); /* Show the pid and sid of the lock requestor. */ if (lkb->lkb_grant_mode != lkb->lkb_request_mode) #ifdef VMS TRdisplay("Pid:%x Sid:%p\n", lkb->lkb_pid, lkb->lkb_sid); #else TRdisplay("Pid:%d Sid:%p\n", lkb->lkb_pid, lkb->lkb_sid); #endif /* VMS */ else TRdisplay("\n"); lkb++; } } }
/*{ ** Name: adt_utf8comp() - Compare 2 c/text/char/varchar values in UTF8 server ** ** Description: ** This routine compares two string data values. Before they are compared, ** they must be coerced to UCS2 and the comparison is done according to ** the Unicode Collation Algorithm. This necessitates a call to ** adu_nvchr_utf8comp() to perform the setup and comparison. This function ** presents the values as DB_DATA_VALUE structures, as expected by ** adu_nvchr_utf8comp(). ** ** Inputs: ** adf_scb Pointer to an ADF session control block. ** .adf_errcb ADF_ERROR struct. ** .ad_ebuflen The length, in bytes, of the buffer ** pointed to by ad_errmsgp. ** .ad_errmsgp Pointer to a buffer to put formatted ** error message in, if necessary. ** atr_bdt Data type to be compared. ** atr_len Length of comparands. ** d1 Pointer to first value. ** d2 Pointer to second value. ** ** Outputs: ** adf_scb Pointer to an ADF session control block. ** .adf_errcb ADF_ERROR struct. If an ** error occurs the following fields will ** be set. NOTE: if .ad_ebuflen = 0 or ** .ad_errmsgp = NULL, no error message ** will be formatted. ** .ad_errcode ADF error code for the error. ** .ad_errclass Signifies the ADF error class. ** .ad_usererr If .ad_errclass is ADF_USER_ERROR, ** this field is set to the corresponding ** user error which will either map to ** an ADF error code or a user-error code. ** .ad_emsglen The length, in bytes, of the resulting ** formatted error message. ** .adf_errmsgp Pointer to the formatted error message. ** status Status returned from ** adu_mvchr_utf8comp(), if called. ** ** The following DB_STATUS codes may be returned by ** adu_nvchr_utf8comp(): ** E_DB_OK, E_DB_WARN, E_DB_ERROR, E_DB_SEVERE, E_DB_FATAL ** ** If a DB_STATUS code other than E_DB_OK is returned, the caller ** can look in the field adf_scb.adf_errcb.ad_errcode to determine ** the ADF error code. The following is a list of possible ADF error ** codes that can be returned by this routine: ** ** E_AD0000_OK Operation succeeded. ** E_AD2004_BAD_DTID Datatype id unknown to ADF. ** E_AD2005_BAD_DTLEN Internal length is illegal for ** the given datatype. ** (Others not yet defined) ** ** Returns: ** i4 < 0 if 1st < 2nd ** = 0 if 1st = 2nd ** > 0 if 1st > 2nd ** ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 10-may-2007 (dougi) ** Written for UTF8-enabled server. ** 05-nov-2007 (gupsh01) ** pass in default for quel pattern matching to ** adu_nvchr_utf8comp. ** 19-mar-2008 (gupsh01) ** Use an aligned buffer for varchar and text types. ** 20-mar-2008 (gupsh01) ** Start with an aligned buffer to work with. ** 12-jul-2008 (gupsh01) ** Fix the size allocated for aligned buffer. */ static i4 adt_utf8comp( ADF_CB *adf_scb, DB_ATTS *atr, i4 atr_bdt, i2 atr_len, char *d1, /* Ptr to 1st value */ char *d2, /* Ptr to 2nd value */ DB_STATUS *status) /* Status from adc_compare */ { i4 cur_cmp; /* Result of latest attr cmp */ DB_DATA_VALUE dv1; DB_DATA_VALUE dv2; ALIGN_RESTRICT temp_dv1[2048 / sizeof(ALIGN_RESTRICT)]; ALIGN_RESTRICT temp_dv2[2048 / sizeof(ALIGN_RESTRICT)]; char *dv1data = d1; char *dv2data = d2; u_char *tc1, *tc2, *dc1, *dc2; bool getdv1mem = FALSE; bool getdv2mem = FALSE; i4 reqlen; /* If varchar/text then use the aligned buffer */ if ((abs(atr_bdt) == DB_VCH_TYPE) || (abs(atr_bdt) == DB_TXT_TYPE)) { STATUS stat; /* ** Check for alignment */ reqlen = atr_len + DB_CNTSIZE + sizeof(ALIGN_RESTRICT); if(ME_ALIGN_MACRO((PTR)d1, sizeof(ALIGN_RESTRICT)) !=d1) { if (reqlen > sizeof(temp_dv1)) { dv1data = (char *)MEreqmem(0, reqlen, FALSE, &stat); if ((dv1data == NULL) || (stat != OK)) return (adu_error(adf_scb, E_AD2042_MEMALLOC_FAIL, 2, 0, (i4) sizeof(stat), (i4 *)&stat)); getdv1mem = TRUE; } else dv1data = (char *)&temp_dv1[0]; I2ASSIGN_MACRO(((DB_TEXT_STRING *)d1)->db_t_count, ((DB_TEXT_STRING *)dv1data)->db_t_count); tc1 = (u_char *)d1 + DB_CNTSIZE; dc1 = (u_char *)dv1data + DB_CNTSIZE; MEcopy ( tc1, atr_len, dc1); } if(ME_ALIGN_MACRO((PTR)d2, sizeof(ALIGN_RESTRICT)) !=d2) { if (reqlen > sizeof(temp_dv2)) { dv2data = (char *)MEreqmem(0, reqlen , FALSE, &stat); if ((dv2data == NULL) || (stat != OK)) return (adu_error(adf_scb, E_AD2042_MEMALLOC_FAIL, 2, 0, (i4) sizeof(stat), (i4 *)&stat)); getdv2mem = TRUE; } else dv2data = (char *)&temp_dv2[0]; I2ASSIGN_MACRO(((DB_TEXT_STRING *)d2)->db_t_count, ((DB_TEXT_STRING *)dv2data)->db_t_count); tc2 = (u_char *)d2 + DB_CNTSIZE; dc2 = (u_char *)dv2data + DB_CNTSIZE; MEcopy ( tc2, atr_len, dc2); } } /* Set up DB_DATA_VALUEs. */ dv1.db_datatype = dv2.db_datatype = (DB_DT_ID)atr_bdt; dv1.db_prec = dv2.db_prec = atr->precision; dv1.db_length = dv2.db_length = atr_len; dv1.db_collID = dv2.db_collID = atr->collID; dv1.db_data = dv1data; dv2.db_data = dv2data; *status = adu_nvchr_utf8comp(adf_scb, 0, &dv1, &dv2, &cur_cmp); if (getdv1mem && dv1data) MEfree (dv1data); if (getdv2mem && dv2data) MEfree (dv2data); return(cur_cmp); }
/* Name: adu_readmap - Reads and processes a coercion map table. ** ** Description: ** This routine reads a compiled coercion map table and ** initializes the in memory table structures for the map ** table. ** ** Input: ** chset: character set to read from. ** ** Output: ** E_DB_OK if the maptable is opened and initialized. ** E_DB_ERROR if it fails. ** ** History: ** 23-Jan-2004 (gupsh01) ** Created. ** 18-Feb-2004 (gupsh01) ** Added handling of CM_DEFAULTFILE_LOC, so that ingbuld ** at install time when no characterset info is avaliable ** reads the default mapping file from $II_CONFIG location. ** 22-Oct-2004 (gupsh01) ** Fixed for correctly reading the mapping files. ** 11-May-2009 (kschendel) b122041 ** Compiler warning fixes. ** */ DB_STATUS adu_readmap(char *charset) { CL_ERR_DESC syserr; char *buf = NULL; char *bufptr = NULL; i4 bytes_read = 0; DB_STATUS stat = OK; ADU_MAP_HEADER *header; ADU_MAP_ASSIGNMENT *assignments; char *aptr; ADU_MAP_VALIDITY *validities; char *vptr; i4 buf_rem; i4 val_rem; i4 assn_rem; if (CMopen_col(charset, &syserr, CM_UCHARMAPS_LOC) != OK) { /* If we are opening the default file then check and return ** an ERROR if we are unable to open the file */ if (STbcompare (charset, 0, "default", 0, 1) == 0) { if (CMopen_col(charset, &syserr, CM_DEFAULTFILE_LOC) != OK) return (FAIL); } else return (FAIL); } /* allocate memory for buffer */ buf = MEreqmem(0, COL_BLOCK, TRUE, &stat); if (buf == NULL || stat != OK) { CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } /* First file buffer has header information. */ stat = CMread_col(buf, &syserr); if (stat != OK) { MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } bytes_read = COL_BLOCK; header = (ADU_MAP_HEADER *) MEreqmem(0, sizeof(ADU_MAP_HEADER), TRUE, &stat); if (stat != OK) { MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return FAIL; } MEcopy (buf, sizeof(ADU_MAP_HEADER), header); bufptr = buf + sizeof(ADU_MAP_HEADER); bufptr = ME_ALIGN_MACRO (bufptr, sizeof(PTR)); if (header->validity_size > 0) { validities = (ADU_MAP_VALIDITY *)MEreqmem(0, (u_i4) header->validity_size, FALSE, &stat); if (validities == NULL || stat != OK) { MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return FAIL; } vptr = (char *) validities; } else { /* ERROR: validity information is required */ CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } if (header->assignment_size > 0) { assignments = (ADU_MAP_ASSIGNMENT *)MEreqmem (0, (u_i4) header->assignment_size , FALSE, &stat); if (assignments == NULL || stat != OK) { MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return FAIL; } aptr = (char *) assignments; } else { /* ERROR: assignment information is required */ CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } buf_rem = COL_BLOCK - sizeof(ADU_MAP_HEADER); val_rem = header->validity_size; assn_rem = header->assignment_size; for ( ;(bytes_read < (header->validity_size + header->assignment_size + sizeof(ADU_MAP_HEADER))) || (assn_rem > 0); ) { if (val_rem > 0) { if (buf_rem >= val_rem) { /* Validation table was read completely */ MEcopy (bufptr, val_rem, vptr); bufptr += val_rem; bufptr = ME_ALIGN_MACRO (bufptr, sizeof(PTR)); buf_rem -= val_rem; val_rem = 0; if ((assn_rem > 0) && (buf_rem <= assn_rem)) { MEcopy (bufptr, buf_rem, aptr); bufptr = ME_ALIGN_MACRO (bufptr, sizeof(PTR)); bufptr += buf_rem; aptr +=buf_rem; assn_rem -= buf_rem; buf_rem = 0; } else { MEcopy (bufptr, assn_rem, aptr); bufptr += assn_rem; bufptr = ME_ALIGN_MACRO (bufptr, sizeof(PTR)); aptr +=assn_rem; buf_rem -= assn_rem; assn_rem = 0; } } else { /* read more validities before proceeding */ MEcopy (bufptr, buf_rem, vptr); bufptr = ME_ALIGN_MACRO (bufptr, sizeof(PTR)); vptr += buf_rem; val_rem -= buf_rem; buf_rem = 0 ; } } else if (assn_rem > 0) { if (buf_rem <= assn_rem) { MEcopy (bufptr, buf_rem, aptr); aptr += buf_rem; assn_rem -= buf_rem; buf_rem = 0; } else { MEcopy (bufptr, assn_rem, aptr); aptr += assn_rem; buf_rem -= assn_rem; assn_rem = 0; } } /* read next buffer if more data is needed to be read */ if (assn_rem > 0) { stat = CMread_col(buf, &syserr); if (stat != OK) { /* FIXME: either we are have got an error or we have ** found the end of the file in either case ** free the buf and exit. should report ** ERROR condition. */ break; } bufptr = buf; /* initialize bufptr */ bytes_read += COL_BLOCK; buf_rem = COL_BLOCK; } } /* check if we have read the whole file */ if (bytes_read < (header->validity_size + header->assignment_size + sizeof(ADU_MAP_HEADER))) { /* we had to exit for some unknown reason */ MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return FAIL; } /* done with the file so close it now */ if (CMclose_col(&syserr, CM_UCHARMAPS_LOC) != OK) { MEfree ((char *)buf); /* free the buffer */ return FAIL; } /* set up the pointers in the ADF memory ** from the information obtained from mapfile. */ stat = adu_initmap (header, validities, assignments); if (stat) return (E_DB_ERROR); if (header) MEfree ((char *)header); if (validities) MEfree ((char *)validities); if (assignments) MEfree ((char *)assignments); if (buf) MEfree ((char *)buf); return (E_DB_OK); }
/*{ ** Name: LGK_initialize() - initialize the lg/lk shared mem segment. ** ** Description: ** This routine is called by the LGinitialize or LKinitialize routine. IT ** assumes that a previous caller has allocated the shared memory segment. ** ** If it discovers that the shared memory segment has not yet been ** initialized, it calls the LG and LK initialize-memory routines to do so. ** ** Inputs: ** flag - bit mask of: ** LOCK_LGK_MEMORY to lock the shared data segment ** LGK_IS_CSP if process is CSP process this node. ** ** Outputs: ** sys_err - place for system-specific error information. ** ** Returns: ** OK - success ** !OK - failure (CS*() routine failure, segment not mapped, ...) ** ** History: ** Summer, 1992 (bryanp) ** Working on the new portable logging and locking system. ** 19-oct-1992 (bryanp) ** Check memory version number when attaching. ** 22-oct-1992 (bryanp) ** Change LGLKDATA.MEM to lglkdata.mem. ** 23-Oct-1992 (daveb) ** name the semaphore too. ** 13-feb-1993 (keving) ** Remove support for II_LGK_MEMORY_SIZE. If II_LG_MEMSIZE ** is not set then calculate memory size from PM values. ** 24-may-1993 (bryanp) ** If the shared memory is the wrong version, don't install the ** at_exit handlers (the rundown routines won't be able to interpret ** the memory properly). ** 26-jul-1993 (jnash) ** Add 'flag' param lock the LGK data segment. ** 20-sep-1993 (bryanp) ** In addition to calling PCatexit, call (on VMS) sys$dclexh, since ** there are some situations (image death and image rundown without ** process rundown) which are caught neither by PCatexit (since ** PCexit isn't run) nor by check-dead threads (since process ** rundown never happened). This fixes a hole where an access- ** violating ckpdb or auditdb command never got cleaned up. ** 31-jan-1994 (bryanp) ** Back out a few "features" which are proving countereffective: ** 1) Don't bother checking mem_creator_pid to see if the previous ** creator of the shared memory has died. This was an attempt to ** gracefully re-use sticky shared memory following a system crash, ** but it is suspected as being the culprit in a series of system ** failures by re-initializing the shared memory at inopportune ** times. ** 2) Don't complain if the shared memory already exists but is of a ** different size than you expected. Just go ahead and try to use ** it anyway. ** 21-feb-1994 (bryanp) ** Reverse item (1) of the above 31-jan-1994 change and re-enable the ** graceful re-use of shared memory. People weren't happy with ** having to run ipcclean and csinstall all the time. ** 23-may-1994 (bryanp) ** On VMS, disable ^Y for LG/LK-aware processes. We don't want to allow ** ^Y because you might interrupt the process right in the middle ** of an LG or LK operation, while holding the shared memory ** semaphore, and this would then wedge the whole installation. ** ** 17-May-1994 (daveb) 59127 ** Attach lgk_mem semaphore if we're attaching to the segment. ** 30-jan-1995 (lawst01) bug 61984 ** Use memory needed calculation from the 'lgk_calculate_size' ** function to determine the size of the shared memory pool for ** locking and locking. If the II_LG_MEMSIZE variable is specified ** with a value larger than needed use the supplied value. If ** lgk_calculate_size is unable to calculate a size then use the ** magic number of 400000. In addition issue a warning message ** and continue executing in the event the number of pages ** allocated is less than the number requested. ** 24-apr-1997 (nanpr01) ** Reinstate Bryanp's change. In the process of fixing bug 61984 ** by Steve Lawrence and subsequent undo of Steve's fix by Nick ** Ireland on 25-jun-96 (nick) caused the if 0 code removed. ** Part of the Steve's change was not reinstated such as not returning ** the status and exit and continue. ** 1. Don't complain if the shared memory already exists but is of a ** different size than you expected. Just go ahead and try to use ** it. ** 18-aug-1998 (hweho01) ** Reclaim the kernel resource if LG/LK shared memory segment is ** reinitialized. If the shared segment is re-used (the previous creator ** of the shared segment has died), the cross-process semaphores get ** initialized more than once at the same locations. That cause the ** kernel resource leaks on DG/UX (OS release 4.11MU04). To fix the ** problem, CS_cp_sem_cleanup() is called to destroy all the ** semaphores before LG/LK shraed segment get recreated. ** CS_cp_sem_cleanup() is made dependent on xCL_NEED_SEM_CLEANUP and ** OS_THREADS_USED, it returns immediately for most platforms. ** 27-Mar-2000 (jenjo02) ** Added test for crossed thread types, refuse connection ** to LGK memory with E_DMA811_LGK_MT_MISMATCH. ** 18-apr-2001 (devjo01) ** s103715 (Portable cluster support) ** - Add CX mem requirement calculations. ** - Add LGK_IS_CSP flag to indicate that LGK memory is being ** initialized for a CSP process. ** - Add basic CX initialization. ** 19-sep-2002 (devjo01) ** If running NUMA clustered allocate memory out of local RAD. ** 30-Apr-2003 (jenjo02) ** Rearchitected to silence long-tolerated race conditions. ** BUG 110121. ** 27-feb-2004 (devjo01) ** Rework allocation of CX shared memory to be compatible ** with race condition fix introduced for bug 110121. ** 29-Dec-2008 (jonj) ** If lgk_calculate_size() returns FAIL, the total memory ** needed exceeds MAX_SIZE_TYPE and we can't continue, but ** tell what we can about the needs of the various bits of ** memory before quitting. ** 06-Aug-2009 (wanfr01) ** Bug 122418 - Return E_DMA812 if LOCK_LGK_MUST_ATTACH is ** is passed in and memory segment does not exist ** 20-Nov-2009 (maspa05) bug 122642 ** In order to synchronize creation of UUIDs across servers added ** a semaphore and a 'last time' variable into LGK memory. ** 14-Dec-2009 (maspa05) bug 122642 ** #ifdef out the above change for Windows. The rest of the change ** does not apply to Windows so the variables aren't defined. */ STATUS LGK_initialize( i4 flag, CL_ERR_DESC *sys_err, char *lgk_info) { PTR ptr; SIZE_TYPE memleft; SIZE_TYPE size; STATUS ret_val; STATUS mem_exists; char mem_name[15]; SIZE_TYPE allocated_pages; i4 me_flags; i4 me_locked_flag; SIZE_TYPE memory_needed; char *nm_string; SIZE_TYPE pages; LGK_MEM *lgk_mem; i4 err_code; SIZE_TYPE min_memory; i4 retries; i4 i; i4 attached; PID *my_pid_slot; i4 clustered; u_i4 nodes; SIZE_TYPE cxmemreq; PTR pcxmem; LGLK_INFO lgkcount; char instid[4]; CL_CLEAR_ERR(sys_err); /* ** if LGK_base is set then this routine has already been called. It is ** set up so that both LGiniitalize and LKinitialize calls it, but only ** the first call does anything. */ if (LGK_base.lgk_mem_ptr) return(OK); PCpid( &LGK_my_pid ); memory_needed = 0; NMgtAt("II_LG_MEMSIZE", &nm_string); if (nm_string && *nm_string) #if defined(LP64) if (CVal8(nm_string, (long*)&memory_needed)) #else if (CVal(nm_string, (i4 *)&memory_needed)) #endif /* LP64 */ memory_needed = 0; /* Always calculate memory needed from PM resource settings */ /* and compare with supplied value, if supplied value is less */ /* than minimum then use minimum */ min_memory = 0; if ( OK == lgk_get_counts(&lgkcount, FALSE)) { if ( lgk_calculate_size(FALSE, &lgkcount, &min_memory) ) { /* ** Memory exceeds MAX_SIZE_TYPE, can't continue. ** ** Do calculation again, this time with "wordy" ** so user can see allocation bits, then quit. */ lgk_calculate_size(TRUE, &lgkcount, &min_memory); return (E_DMA802_LGKINIT_ERROR); } } if (min_memory) memory_needed = (memory_needed < min_memory) ? min_memory : memory_needed; else memory_needed = (memory_needed < 400000 ) ? 400000 : memory_needed; clustered = (i4)CXcluster_enabled(); cxmemreq = 0; if ( clustered ) { if ( OK != CXcluster_nodes( &nodes, NULL ) ) nodes = 0; cxmemreq = CXshm_required( 0, nodes, lgkcount.lgk_max_xacts, lgkcount.lgk_max_locks, lgkcount.lgk_max_resources ); if ( MAX_SIZE_TYPE - memory_needed < cxmemreq ) { /* ** Memory exceeds MAX_SIZE_TYPE, can't continue. ** ** Do calculation again, this time with "wordy" ** so user can see allocation bits, then quit. */ SIprintf("Total LG/LK/CX allocation exceeds max of %lu bytes by %lu\n" "Adjust logging/locking configuration values and try again\n", MAX_SIZE_TYPE, cxmemreq - (MAX_SIZE_TYPE - memory_needed)); lgk_calculate_size(TRUE, &lgkcount, &min_memory); return (E_DMA802_LGKINIT_ERROR); } memory_needed += cxmemreq; } if ( memory_needed < MAX_SIZE_TYPE - ME_MPAGESIZE ) pages = (memory_needed + ME_MPAGESIZE - 1) / ME_MPAGESIZE; else pages = memory_needed / ME_MPAGESIZE; /* ** Lock the LGK segment if requested to do so */ if (flag & LOCK_LGK_MEMORY) me_locked_flag = ME_LOCKED_MASK; else me_locked_flag = 0; me_flags = (me_locked_flag | ME_MSHARED_MASK | ME_IO_MASK | ME_CREATE_MASK | ME_NOTPERM_MASK | ME_MZERO_MASK); if (CXnuma_user_rad()) me_flags |= ME_LOCAL_RAD; STcopy("lglkdata.mem", mem_name); /* ** In general, we just want to attach to the shared memory and detect if ** we are the first process to do so. However, there are ugly race ** conditions to consider, as well as complications because the shared ** memory may be left around following a system crash. ** ** First we attempt to create the shared memory. Usually it already exists, ** so we check for and handle the case of "already exists". */ /* ** (jenjo02) ** ** Restructured to better handle all those ugly race conditions ** which are easily reproduced by running two scripts, one that ** continuously executes "lockstat" while the other is starting ** and stopping Ingres. ** ** For example, ** ** lockstat A acquires and init's the memory ** RCP attaches to "A" memory ** lockstat A terminates normally ** lockstat B attaches to "A" memory, sees that ** "A"s pid is no longer alive, and ** reinitializes the memory, much to ** the RCP's chagrin. ** or (more commonly) ** ** lockstat A acquires and begins to init the mem ** RCP attaches to "A" memory which is ** still being zero-filled by lockstat, ** checks the version number (zero), ** and fails with a E_DMA434 mismatch. ** ** The fix utilizes the mem_ext_sem to synchronize multiple ** processes; if the semaphore hasn't been initialized or ** if mem_version_no is zero, we'll wait one second and retry, ** up to 60 seconds before giving up. This gives the creating ** process time to complete initialization of the memory. ** ** Up to LGK_MAX_PIDS are allowed to attach to the shared ** memory. When a process attaches it sets its PID in the ** first vacant slot in lgk_mem->mem_pid[]; if there are ** no vacant slots, the attach is refused. When the process ** terminates normally by calling LGK_rundown(), it zeroes ** its PID slot. ** ** When attaching to an existing segment, we check if ** there are any live processes still using the memory; ** if so, we can't destroy it (no matter who created it). ** If there are no live processes attached to the memory, ** we destroy and reallocate it (based on current config.dat ** settings). */ for ( retries = 0; ;retries++ ) { LGK_base.lgk_mem_ptr = (PTR)NULL; /* Give up if unable to get memory in one minute */ #if defined(conf_CLUSTER_BUILD) if (retries > 1) #else if ( retries ) #endif { if ( retries < 60 ) PCsleep(1000); else { /* Another process has it blocked way too long */ uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); /* Unable to attach allocated shared memory segment. */ return (E_DMA802_LGKINIT_ERROR); } } ret_val = MEget_pages(me_flags, pages, mem_name, (PTR*)&lgk_mem, &allocated_pages, sys_err); if ( mem_exists = ret_val ) { if (ret_val == ME_ALREADY_EXISTS) { ret_val = MEget_pages((me_locked_flag | ME_MSHARED_MASK | ME_IO_MASK), pages, mem_name, (PTR*)&lgk_mem, &allocated_pages, sys_err); #if defined(conf_CLUSTER_BUILD) if (ret_val && !retries) continue; /* try one more time */ #endif } if (ret_val) { uleFormat(NULL, ret_val, sys_err, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); /* Unable to attach allocated shared memory segment. */ return (E_DMA802_LGKINIT_ERROR); } } else if (flag & LOCK_LGK_MUST_ATTACH) { /* Do not use the shared segment you just allocated */ MEfree_pages((PTR)lgk_mem, allocated_pages, sys_err); return (E_DMA812_LGK_NO_SEGMENT); } size = allocated_pages * ME_MPAGESIZE; /* Expose this process to the memory */ LGK_base.lgk_mem_ptr = (PTR)lgk_mem; if ( mem_exists ) { /* ** Memory exists. ** ** Try to acquire the semaphore. If it's ** uninitialzed, retry from the top. ** ** If the version is zero, then another ** process is initializing the memory; ** keep retrying until the version is ** filled in. ** */ if ( ret_val = CSp_semaphore(1, &lgk_mem->mem_ext_sem) ) { if ( ret_val != E_CS000A_NO_SEMAPHORE ) { uleFormat(NULL, ret_val, sys_err, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); ret_val = E_DMA802_LGKINIT_ERROR; break; } continue; } /* Retry if still being init'd by another process */ if ( !lgk_mem->mem_version_no ) { CSv_semaphore(&lgk_mem->mem_ext_sem); continue; } /* ** Check pids which appear to be attached to ** the memory: ** ** If any process is still alive, then we ** assume the memory is consistent and use it. ** ** If a process is now dead, it terminated ** without going through LGK_rundown ** to zero its PID slot, zero it now. ** ** If there are no live PIDs attached to ** the memory, we destroy and recreate it. */ my_pid_slot = (PID*)NULL; attached = 0; for ( i = 0; i < LGK_MAX_PIDS; i++ ) { if ( lgk_mem->mem_pid[i] && PCis_alive(lgk_mem->mem_pid[i]) ) { attached++; } else { /* Vacate the slot */ if (lgk_mem->mem_pid[i]) { uleFormat(NULL, E_DMA499_DEAD_PROCESS_INFO, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, lgk_mem->mem_pid[i], 0, lgk_mem->mem_info[i].info_txt); } lgk_mem->mem_pid[i] = (PID)0; lgk_mem->mem_info[i].info_txt[0] = EOS; /* Use first vacant slot for this process */ if ( !my_pid_slot ) { my_pid_slot = &lgk_mem->mem_pid[i]; LGK_base.lgk_pid_slot = i; } } /* Quit when both questions answered */ if ( attached && my_pid_slot ) break; } /* If no living pids attached, destroy/reallocate */ if ( !attached ) { CSv_semaphore(&lgk_mem->mem_ext_sem); if ( LGK_destroy(allocated_pages, sys_err) ) { ret_val = E_DMA802_LGKINIT_ERROR; break; } continue; } /* All attached pids alive? */ if ( !my_pid_slot ) { /* ... then there's no room for this process */ uleFormat(NULL, E_DMA80A_LGK_ATTACH_LIMIT, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 1, 0, attached); ret_val = E_DMA802_LGKINIT_ERROR; } else if (lgk_mem->mem_version_no != LGK_MEM_VERSION_CURRENT) { uleFormat(NULL, E_DMA434_LGK_VERSION_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, lgk_mem->mem_version_no, 0, LGK_MEM_VERSION_CURRENT); ret_val = E_DMA435_WRONG_LGKMEM_VERSION; } /* ** Don't allow mixed connections of MT/non-MT processes. ** Among other things, the mutexing mechanisms are ** incompatible! */ else if ( (CS_is_mt() && (lgk_mem->mem_status & LGK_IS_MT) == 0) || (!CS_is_mt() && lgk_mem->mem_status & LGK_IS_MT) ) { uleFormat(NULL, E_DMA811_LGK_MT_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, (lgk_mem->mem_status & LGK_IS_MT) ? "OS" : "INTERNAL", 0, (CS_is_mt()) ? "OS" : "INTERNAL"); ret_val = E_DMA802_LGKINIT_ERROR; } else { /* ** CX memory (if any) will lie immediately past LGK header. */ pcxmem = (PTR)(lgk_mem + 1); pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT)); LGK_base.lgk_lkd_ptr = (char *)LGK_base.lgk_mem_ptr + lgk_mem->mem_lkd; LGK_base.lgk_lgd_ptr = (char *)LGK_base.lgk_mem_ptr + lgk_mem->mem_lgd; /* Stuff our pid in first vacant slot */ *my_pid_slot = LGK_my_pid; STlcopy(lgk_info, lgk_mem->mem_info[i].info_txt, LGK_INFO_SIZE-1); } #if defined(VMS) || defined(UNIX) /* set up pointers to reference the uuid mutex and last time * variable */ if (!ID_uuid_sem_ptr) ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem; if (!ID_uuid_last_time_ptr) ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time; if (!ID_uuid_last_cnt_ptr) ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt; #endif CSv_semaphore(&lgk_mem->mem_ext_sem); } else { /* Memory did not exist */ /* Zero the version to keep other processes out */ lgk_mem->mem_version_no = 0; #if defined(VMS) || defined(UNIX) /* set up the uuid mutex and last time pointers to * reference the objects in shared memory */ { STATUS id_stat; ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem; ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time; ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt; *ID_uuid_last_cnt_ptr=0; ID_UUID_SEM_INIT(ID_uuid_sem_ptr,CS_SEM_MULTI,"uuid sem", &id_stat); } #endif /* ... then initialize the mutex */ CSw_semaphore(&lgk_mem->mem_ext_sem, CS_SEM_MULTI, "LGK mem ext sem" ); /* Record if memory created for MT or not */ if ( CS_is_mt() ) lgk_mem->mem_status = LGK_IS_MT; /* ** memory is as follows: ** ** -----------------------------------------------------------| ** | LGK_MEM struct (keep track of this mem) | ** | | ** -----------------------------------------------------------| ** | If a clustered installation memory reserved for CX | ** | | ** ------------------------------------------------------------ ** | LKD - database of info for lk system | ** | | ** ------------------------------------------------------------ ** | LGD - database of info for lg system | ** | | ** ------------------------------------------------------------ ** | memory manipulated by LGKm_* routines for structures used | ** | by both the lk and lg systems. | ** | | ** ------------------------------------------------------------ */ /* put the LGK_MEM struct at head of segment leaving ptr pointing ** at next aligned piece of memory */ /* ** CX memory (if any) will lie immediately past LGK header. */ pcxmem = (PTR)(lgk_mem + 1); pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT)); LGK_base.lgk_lkd_ptr = pcxmem + cxmemreq; LGK_base.lgk_lkd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lkd_ptr, sizeof(ALIGN_RESTRICT)); lgk_mem->mem_lkd = (i4)((char *)LGK_base.lgk_lkd_ptr - (char *)LGK_base.lgk_mem_ptr); LGK_base.lgk_lgd_ptr = (PTR) ((char *) LGK_base.lgk_lkd_ptr + sizeof(LKD)); LGK_base.lgk_lgd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lgd_ptr, sizeof(ALIGN_RESTRICT)); lgk_mem->mem_lgd = (i4)((char *)LGK_base.lgk_lgd_ptr - (char *)LGK_base.lgk_mem_ptr); /* now initialize the rest of memory for allocation */ /* how much memory is left? */ ptr = ((char *)LGK_base.lgk_lgd_ptr + sizeof(LGD)); memleft = size - (((char *) ptr) - ((char *) LGK_base.lgk_mem_ptr)); if ( (ret_val = lgkm_initialize_mem(memleft, ptr)) == OK && (ret_val = LG_meminit(sys_err)) == OK && (ret_val = LK_meminit(sys_err)) == OK ) { /* Clear array of attached pids and pid info */ for ( i = 0; i < LGK_MAX_PIDS; i++ ) { lgk_mem->mem_pid[i] = (PID)0; lgk_mem->mem_info[i].info_txt[0] = EOS; } /* Set the creator pid */ LGK_base.lgk_pid_slot = 0; lgk_mem->mem_creator_pid = LGK_my_pid; /* Set the version, releasing other processes */ lgk_mem->mem_version_no = LGK_MEM_VERSION_CURRENT; } else { uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); ret_val = E_DMA802_LGKINIT_ERROR; /* Destroy the shared memory */ LGK_destroy(allocated_pages, sys_err); } } if ( ret_val == OK ) { PCatexit(LGK_rundown); if ( clustered ) { /* ** Perform preliminary cluster connection and CX memory init. */ /* Get installation code */ NMgtAt("II_INSTALLATION", &nm_string); if ( nm_string ) { instid[0] = *(nm_string); instid[1] = *(nm_string+1); } else { instid[0] = 'A'; instid[1] = 'A'; } instid[2] = '\0'; ret_val = CXinitialize( instid, pcxmem, flag & LGK_IS_CSP ); if ( ret_val ) { /* Report error returned from CX */ uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0 ); break; } } #ifdef VMS { static $EXHDEF exit_block; i4 ctrl_y_mask = 0x02000000; /* ** On VMS, programs like the dmfjsp and logstat run as images in ** the shell process. That is, the system doesn't start and stop ** a process for each invocation of the program, it just starts ** and stops an image in the same process. This means that if ** the program should die, the image may be rundown but the process ** will remain, which means that the check-dead threads of other ** processes in the installation will not feel that they need to ** rundown this process, since it's still alive. ** ** By declaring an exit handler, which will get a chance to run ** even if PCexit isn't called, we improve our chances of getting ** to perform rundown processing if we should die unexpectedly. ** ** Furthermore, we ask DCL to disable its ^Y processing, which ** lessens the chance that the user will interrupt us while we ** are holding the semaphore. */ exit_block.exh$g_func = LGK_rundown; exit_block.exh$l_argcount = 1; exit_block.exh$gl_value = &exit_block.exh$l_status; if (sys$dclexh(&exit_block) != SS$_NORMAL) ret_val = FAIL; lib$disable_ctrl(&ctrl_y_mask, 0); } #endif } break; } if ( ret_val ) LGK_base.lgk_mem_ptr = NULL; return(ret_val); }
DB_STATUS adu_like_comp_uni( ADF_CB *adf_scb, DB_DATA_VALUE *pat_dv, DB_DATA_VALUE *ret_dv, DB_DATA_VALUE *esc_dv, u_i4 pat_flags) { DB_STATUS db_stat = E_DB_OK; AD_PAT_SEA_CTX _sea_ctx; AD_PAT_SEA_CTX *sea_ctx = &_sea_ctx; AD_PATDATA *patdata = (AD_PATDATA*)ret_dv->db_data; AD_PATDATA _patdata; if (ME_ALIGN_MACRO(patdata, sizeof(i2)) != (PTR)patdata) { if ((i2)sizeof(_patdata) >= ret_dv->db_length) patdata = &_patdata; else { patdata = (AD_PATDATA*)MEreqmem(0, ret_dv->db_length, FALSE, &db_stat); if (!patdata || db_stat) return db_stat; } } patdata->patdata.length = ret_dv->db_length/sizeof(patdata->vec[0]); sea_ctx->patdata = patdata; /* ** To allow for default processing we have the user specified ** case flags to deal with. If AD_PAT_WITH_CASE or AD_PAT_WO_CASE ** is set then we use that setting to override any collation ** case request. If neither are set we obey the collation. */ if (pat_flags & AD_PAT_WITH_CASE) { pat_flags &= ~AD_PAT_WO_CASE; } else if (!(pat_flags & AD_PAT_WO_CASE) && pat_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL) { pat_flags |= AD_PAT_WO_CASE; } /* ** From this point on, the AD_PAT_WITH_CASE flag is ignored as ** its state has been folded into the AD_PAT_WO_CASE flag. */ if (ADU_pat_legacy == -1) pat_flags &= ~AD_PAT_WO_CASE; else if (ADU_pat_legacy == -2) pat_flags |= AD_PAT_WO_CASE; pat_flags |= (AD_PAT2_UNICODE<<16); /* Compile the input pattern into output parameter */ db_stat = adu_patcomp_like_uni(adf_scb, pat_dv, esc_dv, pat_flags, sea_ctx); /* Map the forced fail so the executor sees it */ if (sea_ctx->force_fail) patdata->patdata.flags2 |= AD_PAT2_FORCE_FAIL; adu_patcomp_free(sea_ctx); if (patdata != (AD_PATDATA*)ret_dv->db_data) { MEcopy((PTR)patdata, patdata->patdata.length*sizeof(i2), ret_dv->db_data); if (patdata != &_patdata) MEfree((PTR)patdata); } return db_stat; }
DB_STATUS adu_like_all( ADF_CB *adf_scb, DB_DATA_VALUE *src_dv, DB_DATA_VALUE *pat_dv, DB_DATA_VALUE *esc_dv, u_i4 pat_flags, i4 *rcmp) { DB_STATUS db_stat = E_DB_OK; AD_PAT_SEA_CTX _sea_ctx; AD_PAT_SEA_CTX *sea_ctx = &_sea_ctx; AD_PATDATA _patdata; AD_PATDATA *patdata; AD_PAT_DA_CTX da_ctx; DB_DATA_VALUE dv_tmp1; DB_DATA_VALUE dv_tmp2; DB_DATA_VALUE *s1 = src_dv, *p1 = pat_dv; i4 rcmp1 = 0; i4 long_seen = 0; DB_STATUS db_stat1 = E_DB_OK; char tmp[2000]; i2 saved_uninorm_flag = adf_scb->adf_uninorm_flag; static struct { ADU_PATCOMP_FUNC *compile; ADU_PATEXEC_FUNC *execute; } rtns[] = { {adu_patcomp_like, adu_pat_execute}, {adu_patcomp_like, adu_pat_execute_col}, {adu_patcomp_like_uni, adu_pat_execute_uni}, }; enum rtns_idx { LIKE, LIKE_COLLATION, LIKE_UNICODE } form = LIKE; if (pat_dv->db_datatype == DB_PAT_TYPE) { patdata = (AD_PATDATA*)pat_dv->db_data; if (ME_ALIGN_MACRO(patdata, sizeof(i2)) != (PTR)patdata) { if ((i2)sizeof(_patdata) >= pat_dv->db_length) patdata = &_patdata; else { patdata = (AD_PATDATA*)MEreqmem(0, pat_dv->db_length, FALSE, &db_stat); if (!patdata || db_stat) return db_stat; } MEcopy(pat_dv->db_data, pat_dv->db_length, patdata); } pat_flags = patdata->patdata.flags| (patdata->patdata.flags2<<16); /* Pre-compiled pattern */ { i4 i; /* ->patdata is passed in preset with a valid [PATDATA_LENGTH] - ** save it now as we will clear the lot */ MEfill(sizeof(*sea_ctx), 0, (PTR)sea_ctx); sea_ctx->patdata = patdata; /*sea_ctx->buffer = NULL;*/ /*sea_ctx->bufend = NULL;*/ /*sea_ctx->buftrueend = NULL;*/ /*sea_ctx->seg_offset = 0;*/ /*sea_ctx->buflen = 0;*/ /*sea_ctx->at_bof = FALSE;*/ /*sea_ctx->at_eof = FALSE;*/ /*sea_ctx->trace = FALSE;*/ /*sea_ctx->force_fail = FALSE;*/ /*sea_ctx->cmplx_lim_exc = FALSE;*/ /*sea_ctx->stalled = NULL;*/ /*sea_ctx->pending = NULL;*/ /*sea_ctx->free = NULL;*/ /*sea_ctx->setbuf = NULL;*/ #if PAT_DBG_TRACE>0 /*sea_ctx->nid = 0;*/ /*sea_ctx->infile = NULL;*/ /*sea_ctx->outfile = NULL;*/ #endif /*sea_ctx->nctxs_extra = 0;*/ sea_ctx->nctxs = DEF_THD; for (i = DEF_THD-1; i >= 0; i--) { sea_ctx->ctxs[i].next = sea_ctx->free; sea_ctx->free = &sea_ctx->ctxs[i]; } } db_stat = adu_patcomp_set_pats(sea_ctx); /* If prior patcomp flagged force_fail - obey it */ if (patdata->patdata.flags2 & AD_PAT2_FORCE_FAIL) sea_ctx->force_fail = TRUE; } else { patdata = &_patdata; /* Tell compiler size we are prepared for */ patdata->patdata.length = sizeof(_patdata)/sizeof(_patdata.vec[0]); sea_ctx->patdata = patdata; /* ** To allow for default processing we have the user specified ** case flags to deal with. If AD_PAT_WITH_CASE or AD_PAT_WO_CASE ** is set then we use that setting to override any collation ** case request. If neither are set we obey the collation. */ if (pat_flags & AD_PAT_WITH_CASE) { pat_flags &= ~AD_PAT_WO_CASE; } else if (!(pat_flags & AD_PAT_WO_CASE) && (src_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL || pat_dv->db_collID == DB_UNICODE_CASEINSENSITIVE_COLL)) { pat_flags |= AD_PAT_WO_CASE; } /* ** From this point on, the AD_PAT_WITH_CASE flag is ignored as ** its state has been folded into the AD_PAT_WO_CASE flag. */ } if (ADU_pat_legacy == -1) pat_flags &= ~AD_PAT_WO_CASE; else if (ADU_pat_legacy == -2) pat_flags |= AD_PAT_WO_CASE; dv_tmp1.db_data = NULL; dv_tmp2.db_data = NULL; switch (abs(src_dv->db_datatype)) { case DB_LNVCHR_TYPE: case DB_LNLOC_TYPE: long_seen = 1; case DB_NVCHR_TYPE: case DB_NCHR_TYPE: case DB_UTF8_TYPE: case DB_NQTXT_TYPE: form = LIKE_UNICODE; /* All handled directly by DA */ break; case DB_LVCH_TYPE: case DB_LCLOC_TYPE: if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) form = LIKE_UNICODE; case DB_LBYTE_TYPE: case DB_LBLOC_TYPE: long_seen = 1; case DB_BYTE_TYPE: case DB_VBYTE_TYPE: /* All handled directly by DA */ break; case DB_CHR_TYPE: pat_flags |= AD_PAT_BIGNORE; /* Ignore blanks */ /*FALLTHROUGH*/ case DB_CHA_TYPE: case DB_VCH_TYPE: case DB_TXT_TYPE: case DB_LTXT_TYPE: if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) { form = LIKE_UNICODE; dv_tmp1.db_datatype = DB_NVCHR_TYPE; dv_tmp1.db_length = src_dv->db_length * 4 + DB_CNTSIZE; dv_tmp1.db_prec = 0; dv_tmp1.db_collID = -1; if (dv_tmp1.db_length < (i2)sizeof(tmp)) dv_tmp1.db_data = tmp; else { dv_tmp1.db_data = (char *) MEreqmem (0, dv_tmp1.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_nvchr_fromutf8(adf_scb, src_dv, &dv_tmp1); adf_scb->adf_uninorm_flag = saved_uninorm_flag; src_dv = &dv_tmp1; } break; default: return(adu_error(adf_scb, E_AD9999_INTERNAL_ERROR, 0)); } /* ** See how the pattern looks and coerce appropriatly */ switch (abs(pat_dv->db_datatype)) { case DB_LNVCHR_TYPE: case DB_LNLOC_TYPE: long_seen = 1; case DB_NVCHR_TYPE: case DB_NCHR_TYPE: case DB_UTF8_TYPE: case DB_NQTXT_TYPE: if (form != LIKE_UNICODE) { DB_DATA_VALUE dv_tmp3; form = LIKE_UNICODE; dv_tmp1.db_datatype = DB_NVCHR_TYPE; dv_tmp1.db_length = src_dv->db_length * 3 + DB_CNTSIZE; dv_tmp1.db_prec = 0; dv_tmp1.db_collID = -1; dv_tmp3.db_datatype = DB_NVCHR_TYPE; dv_tmp3.db_length = src_dv->db_length * 3 + DB_CNTSIZE; dv_tmp3.db_prec = 0; dv_tmp3.db_collID = -1; if (dv_tmp1.db_length < (i2)sizeof(tmp)) dv_tmp1.db_data = tmp; else { dv_tmp1.db_data = (char *) MEreqmem (0, dv_tmp1.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } dv_tmp3.db_data = (char *) MEreqmem (0, dv_tmp3.db_length, TRUE, &db_stat); if (db_stat) return db_stat; if (db_stat = adu_nvchr_coerce(adf_scb, src_dv, &dv_tmp3)) { if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_unorm(adf_scb, &dv_tmp3, &dv_tmp1); adf_scb->adf_uninorm_flag = saved_uninorm_flag; MEfree((char *)dv_tmp3.db_data); if (db_stat) { if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); return db_stat; } src_dv = &dv_tmp1; } break; case DB_CHR_TYPE: pat_flags |= AD_PAT_BIGNORE; /* Ignore blanks */ /*FALLTHROUGH*/ case DB_LVCH_TYPE: case DB_LCLOC_TYPE: case DB_LBYTE_TYPE: case DB_LBLOC_TYPE: case DB_BYTE_TYPE: case DB_VBYTE_TYPE: case DB_CHA_TYPE: case DB_VCH_TYPE: case DB_TXT_TYPE: case DB_LTXT_TYPE: if (form == LIKE_UNICODE) { DB_DATA_VALUE dv_tmp3; dv_tmp2.db_datatype = DB_NVCHR_TYPE; dv_tmp2.db_length = pat_dv->db_length * 4 + DB_CNTSIZE; dv_tmp2.db_prec = 0; dv_tmp2.db_collID = -1; dv_tmp3.db_datatype = DB_NVCHR_TYPE; dv_tmp3.db_length = pat_dv->db_length * 3 + DB_CNTSIZE; dv_tmp3.db_prec = 0; dv_tmp3.db_collID = -1; if (dv_tmp2.db_length < (i2)sizeof(tmp) && dv_tmp1.db_data != tmp) dv_tmp2.db_data = tmp; else { dv_tmp2.db_data = (char *) MEreqmem (0, dv_tmp2.db_length, TRUE, &db_stat); if (db_stat) return db_stat; } if (adf_scb->adf_utf8_flag & AD_UTF8_ENABLED) { adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_nvchr_fromutf8(adf_scb, pat_dv, &dv_tmp2); adf_scb->adf_uninorm_flag = saved_uninorm_flag; } else { dv_tmp3.db_data = (char *) MEreqmem (0, dv_tmp3.db_length, TRUE, &db_stat); if (db_stat) return db_stat; if (db_stat = adu_nvchr_coerce(adf_scb, pat_dv, &dv_tmp3)) { if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat; } adf_scb->adf_uninorm_flag = AD_UNINORM_NFC; db_stat = adu_unorm(adf_scb, &dv_tmp3, &dv_tmp2); adf_scb->adf_uninorm_flag = saved_uninorm_flag; MEfree((char *)dv_tmp3.db_data); if (db_stat) { if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat; } } pat_dv = &dv_tmp2; } break; case DB_PAT_TYPE: if (patdata->patdata.flags2 & AD_PAT2_UNICODE) form = LIKE_UNICODE; else if (patdata->patdata.flags2 & AD_PAT2_COLLATE) form = LIKE_COLLATION; break; default: return(adu_error(adf_scb, E_AD9999_INTERNAL_ERROR, 0)); } if (abs(pat_dv->db_datatype) != DB_PAT_TYPE) { if (form == LIKE && adf_scb->adf_collation) { form = LIKE_COLLATION; pat_flags |= (AD_PAT2_COLLATE<<16); } else if (form == LIKE_UNICODE) pat_flags |= (AD_PAT2_UNICODE<<16); if (ADU_pat_legacy > 0 && !long_seen && (pat_flags & AD_PAT_FORM_MASK) == AD_PAT_FORM_LIKE) { if (form == LIKE_UNICODE) db_stat1 = adu_ulike(adf_scb, s1, p1, (UCS2*)(esc_dv?esc_dv->db_data:0), &rcmp1); else db_stat1 = adu_like(adf_scb, s1, p1, (u_char*)(esc_dv?esc_dv->db_data:0), &rcmp1); if (ADU_pat_legacy == 3) { /* Look no further */ *rcmp = rcmp1; if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); return db_stat1; } } /* Compile the input pattern */ db_stat = (rtns[form].compile)(adf_scb, pat_dv, esc_dv, pat_flags, sea_ctx); } if (!db_stat && sea_ctx) { if (sea_ctx->force_fail) *rcmp = 1; else { /* Init the data access */ if (!(db_stat = adu_patda_init(adf_scb, src_dv, sea_ctx, &da_ctx))) { /* Do the search */ db_stat = (rtns[form].execute)(sea_ctx, &da_ctx, rcmp); } /* Cleanup the data access */ (VOID)adu_patda_term(&da_ctx); if (!db_stat && sea_ctx->cmplx_lim_exc) db_stat = adu_error(adf_scb, E_AD1026_PAT_TOO_CPLX, 0); } } adu_patcomp_free(sea_ctx); if (dv_tmp1.db_data && dv_tmp1.db_data != tmp) MEfree((char *)dv_tmp1.db_data); if (dv_tmp2.db_data && dv_tmp2.db_data != tmp) MEfree((char *)dv_tmp2.db_data); if (patdata != &_patdata && (PTR)patdata != pat_dv->db_data) MEfree((PTR)patdata); if (ADU_pat_legacy > 0 && !long_seen && (pat_flags & AD_PAT_FORM_MASK) == AD_PAT_FORM_LIKE) { if (db_stat1 && db_stat) { /* Old unsupported? - just report */ TRdisplay("%s old fail - %d %d osts=%d nsts=%d\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat1, db_stat); } else if (db_stat1 && !db_stat) { /* NEW SOLUTION - */ TRdisplay("%s new support - %d %d osts=%d\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat1); } else if (!db_stat1 && db_stat) { i4 sl, pl; char *s, *p; /*NEW PROB - report & fixup */ adu_lenaddr(adf_scb, s1, &sl, &s); adu_lenaddr(adf_scb, p1, &pl, &p); TRdisplay("%s new problem - %d %d nsts=%d ores=%d nres=%d '%.#s' '%.#s'\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, db_stat, rcmp1, *rcmp, sl,s,pl,p); if (ADU_pat_legacy > 1) { *rcmp = rcmp1; db_stat = db_stat1; } } else if (*rcmp != rcmp1) { i4 sl, pl; char *s, *p; /*NEW PROB - report & fixup */ adu_lenaddr(adf_scb, s1, &sl, &s); adu_lenaddr(adf_scb, p1, &pl, &p); TRdisplay("%s bad? %d %d ores=%d nres=%d '%.#s' '%.#s'\n", pat_flags & AD_PAT_WO_CASE?"ILIKE":"LIKE", s1->db_datatype, p1->db_datatype, rcmp1, *rcmp, sl,s,pl,p); if (ADU_pat_legacy > 1) { *rcmp = rcmp1; db_stat = db_stat1; } } } return db_stat; }
/* Name: adu_getconverter - Obtains the name of converter mapping file ** to use for unicode coercion. ** Description: ** ** To obtain the mapping file to be used for carrying out unicode-local ** character conversion. The following mechanism is followed: ** ** 1. Check symbol table for user defined converter setting ** II_UNICODE_CONVERTER. If set then return this setting ** 2. If the variable is not set then ** 2.a Get the platform character set ** 2.b Read the aliasmaptbl file. ** 2.c Search the alias file for platform charset. ** 3. If still not found then find the II_CHARSETxx value ** for ingres installation and search the alias file for ** this value. ** 4. If none of these attempts succeed then return default ** with a warning to the errorlog if this happens ** ** Input: ** converter - Place holder for the output string, ** It is assumed that the area is at least MAX_LOC ** chars in size. ** Output: ** converter - Pointer to string where the output ** converter name is stored. ** History: ** ** 22-jan-2004 (gupsh01) ** Added. ** 14-Jun-2004 (schka24) ** Safe charset name handling. */ STATUS adu_getconverter( char *converter) { STATUS stat; char *tptr; char *env = 0; char chset[CM_MAXATTRNAME+1]; char pcs[CM_MAXLOCALE+1]; /* platform character set */ char norm_pcs[CM_MAXLOCALE+1]; CL_ERR_DESC syserr; char *alias_buffer = NULL; char *bufptr = NULL; char *buf = NULL; ADU_ALIAS_MAPPING *aliasmapping; ADU_ALIAS_DATA *aliasdata; char *datasize; SIZE_TYPE filesize = 0; SIZE_TYPE sizemap = 0; SIZE_TYPE sizedata = 0; i4 bytes_read; char *abufptr; i4 i = 0; i4 index = 0; /* STEP 1 */ NMgtAt(ERx("II_UNICODE_CONVERTER"), &env); if (env && *env) { STlcopy(env, converter, MAX_LOC-1); return OK; } /* STEP 2 */ stat = CM_getcharset(pcs); if (CMopen_col("aliasmaptbl", &syserr, CM_UCHARMAPS_LOC) != OK) { /* return an ERROR if we are unable to open the file */ return FAIL; } /* initialize buf to help read from the aliasmaptbl file */ buf = MEreqmem(0, COL_BLOCK, TRUE, &stat); if (buf == NULL || stat != OK) { CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } /* First file buffer has size information. */ stat = CMread_col(buf, &syserr); if (stat != OK) { MEfree((char *)buf); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } tptr = buf; bytes_read = COL_BLOCK; /* filesize is the first entry of the map file */ filesize = *(SIZE_TYPE *) buf; tptr += sizeof(SIZE_TYPE); tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* allocate working space for the data */ alias_buffer = (char *)MEreqmem(0, filesize, TRUE, &stat); if (alias_buffer == NULL || stat != OK) { CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } abufptr = alias_buffer; MEcopy (buf, COL_BLOCK, abufptr); abufptr += COL_BLOCK; /* Read the file till it is read completely */ for ( ;bytes_read < filesize;) { stat = CMread_col(buf, &syserr); if (stat != OK) { MEfree((char *)buf); MEfree((char *)alias_buffer); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } bytes_read += COL_BLOCK; MEcopy (buf, COL_BLOCK, abufptr); abufptr += COL_BLOCK; } if (bytes_read < filesize) { /* we had to exit for some unknown reason */ MEfree((char *)buf); MEfree((char *)alias_buffer); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } tptr = alias_buffer; tptr += sizeof(SIZE_TYPE); tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* Read the size of the MappingArray nodes */ sizemap = *(SIZE_TYPE *) tptr; tptr += sizeof(SIZE_TYPE); tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* initialize buffer for ADU_ALIAS_MAPPING buffer */ aliasmapping = (ADU_ALIAS_MAPPING *) MEreqmem(0, sizemap, TRUE, &stat); if (aliasmapping == NULL) { MEfree((char *)buf); MEfree((char *)alias_buffer); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } /* Copy data for ADU_ALIAS_MAPPING array */ MEcopy(tptr, sizemap, aliasmapping); tptr += sizemap; tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* Get size for the aliasdata */ sizedata = *(SIZE_TYPE *) tptr; tptr += sizeof(SIZE_TYPE); tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* Initialize buffer for ADU_ALIAS_MAPPING buffer */ aliasdata = (ADU_ALIAS_DATA *) MEreqmem(0, sizedata, TRUE, &stat); if (aliasdata == NULL) { MEfree((char *)buf); MEfree((char *)alias_buffer); MEfree((char *)aliasmapping); CMclose_col(&syserr, CM_UCHARMAPS_LOC); return (FAIL); } /* Copy the ADU_ALIAS_DATA array */ MEcopy(tptr, sizedata, aliasdata); tptr += sizedata; tptr = ME_ALIGN_MACRO(tptr, sizeof(PTR)); /* Close the "aliasmaptbl" file */ CMclose_col(&syserr, CM_UCHARMAPS_LOC); /* Normalize pcs */ adu_csnormalize (pcs, STlength(pcs), norm_pcs); /* Retrieve the pcs value */ for (i=0; i < sizedata/sizeof(ADU_ALIAS_DATA); i++) { if ((STcompare (aliasdata[i].aliasNameNorm, norm_pcs)) == 0) { index = aliasdata[i].aliasMapId; /* found */ STcopy (aliasmapping[index].mapping_id, converter); /* cleanup */ MEfree((char *)buf); MEfree((char *)alias_buffer); MEfree((char *)aliasmapping); MEfree((char *)aliasdata); return (OK); } } /* STEP 3 */ /* Obtain Ingres characterset */ STcopy("default", converter); CMget_charset_name(&chset[0]); if (STcasecmp(chset, "UTF8") != 0) { /* search maptable for env */ for (i=0; i < sizedata/sizeof(ADU_ALIAS_DATA); i++) { if ((STcompare (aliasdata[i].aliasNameNorm, norm_pcs)) == 0) { index = aliasdata[i].aliasMapId; /* found */ STcopy (aliasmapping[index].mapping_id, converter); break; } } } /* cleanup */ MEfree((char *)buf); MEfree((char *)alias_buffer); MEfree((char *)aliasmapping); MEfree((char *)aliasdata); /* FIXME warning or error if still "default" ? */ return (OK); }
/* ** Name: makeutable - make unicode collation element table ** ** Descrription: ** Reads input collation element text file and compiles a table. ** also reads the Unicode character database and adds attributes for ** each character found. Currently we only take note of canonical ** decomposition for normalization. ** ** Inputs: ** desfile - collation element description file ** ** Outputs: ** None ** ** Returns: ** utbl - collation element table ** ** History: ** 14-mar-2001 (stephenb) ** Created ** 5-apr-2001 (stephenb) ** Add code to read unciode character database. ** 17-may-2001 (stephenb) ** Read in combining class from character database. ** 24-dec-2001 (somsa01) ** Modified such that the varsize which is placed in the Unicode ** table is now a SIZE_TYPE rather than an i4. ** 12-dec-2001 (devjo01) ** Make sure 1st overflow entry is properly aligned. ** Add messages to report if files cannot be opened. ** 17-jul-2001 (gupsh01) ** Modified the placing of the decomposition enty to ** decomp structure in the file, to be the first entry. ** 04-Mar-2005 (hanje04) ** Add support for reverse hybrid builds, i.e. 64bit exe needs to ** call 32bit version. ** 17-May-2007 (gupsh01) ** Added support for reverse sorting of the accented characters. ** This is done to support @backwards tag in custom collation file. ** 08-Aug-2007 (kiria01) b118917 ** Detect lines relating to conditional special caseing and ignore them. ** 08-Aug-2007 (gupsh01) ** Fixed the typo for CASE_IGNORABLE setting. Also fixed the ** handling of Final_Sigma in the special_casing.txt. ** 08-Aug-2007 (gupsh01) ** Fixed the decomposition mapping which were not being set for ** composite characters. ** 24-Nov-2008 (gupsh01) ** When alloating the memory for recombination table make sure ** we initialize the buffer. */ static ADUUCETAB * makeutable( char *desfile) { char buf[MAX_UCOL_REC]; FILE *fp; FILE *dfp; LOCATION loc; char table_vers[MAX_UVERS] = ""; i4 lno = 0; char *recptr; char *separator; ADUUCETAB *tab = &w.utab; i4 num_rvals = 0; ADUUCETAB *epointer = (ADUUCETAB *)w.buff; /* entries first */ /* then instructions */ char *ipointer; SIZE_TYPE *varsize; char *tptr; ADU_UCEFILE *rrec = NULL; ADU_UCEFILE *cerec = NULL; char csval[7]; char svalue[21]; char *ceptr; char *comment; u_i4 cval; i4 numce; bool combent; i4 i; /* Stuff to track the size of the recomb_tbl entries */ u_i2 **recomb_def2d; u_i2 *recomb_def1d; char *recombtbl; char *tracker; SIZE_TYPE *recomb_tbl_size; i4 tr = 0; i4 j = 0; STATUS stat; i4 rcbufsz; char *tempbuf; i4 current_bytes; char *upperval; char *lowerval; char *titleval; char *endval; i4 bts = 0; i4 lcnt = 0; i4 tcnt = 0; i4 ucnt = 0; i4 row = 0; i4 col = 0; bool backward_set = FALSE; /* open file */ STcopy(desfile, buf); LOfroms(FILENAME, buf, &loc); if (SIopen(&loc, "r", &fp) != OK) { SIprintf("aducompile: Unable to open input file \"%s\".\n", loc.string ); return NULL; } varsize = (SIZE_TYPE *)(w.buff + ENTRY_SIZE); recomb_tbl_size = (SIZE_TYPE *)(varsize + 1); ipointer = (char*)(recomb_tbl_size + 1); ipointer = ME_ALIGN_MACRO(ipointer, sizeof(PTR)); /* this is a sparse table, make sure we initialize it */ MEfill(INSTRUCTION_SIZE + ENTRY_SIZE, 0, w.buff); *varsize = 0; /* read data */ while (SIgetrec(buf, sizeof(buf), fp) != ENDFILE) { lno++; if (buf[0] == '#') /* comment line */ continue; (VOID)STtrmwhite(buf); if (STcompare(buf, "") == 0) /* blank line */ continue; /* should first find version */ if (table_vers[0] == EOS) { if (STbcompare(buf, 8, "@version", 8, TRUE) == 0) { /* we don't parse the version string, maybe we should */ STlcopy(buf+9, table_vers, MAX_UVERS); continue; } else { SIprintf("Syntax error on line %d, version line must be first \ non-comment line in the file, ignored\n", lno); continue; } } /* then alternate weights (optional), currently un-supported */ if (STbcompare(buf, 10, "@alternate", 10, TRUE) == 0) { SIprintf("Syntax error on line %d, alternate weights are not \ currently supported, ignored\n", lno); continue; } /* now backwards lines (also not currently supported) */ if (STbcompare(buf, 10, "@backwards", 10, TRUE) == 0) { backward_set = TRUE; continue; } /* and rearrange lines */ if (STbcompare(buf, 10, "@rearrange", 10, TRUE) == 0) { bool strend = FALSE; u_i4 rval; for (recptr = buf + 10;;) { /* skip blanks */ if ((recptr = STskipblank(recptr, STlength(recptr))) == NULL) { /* blank string, ignore */ SIprintf("Syntax error on line %d, no characters in \ rearrange list, ignoring\n", lno); strend = TRUE; break; } /* find next comma separator */ if ((separator = STindex(recptr, ",", 0)) == NULL) { strend = TRUE; separator = recptr + 4; } if (separator - recptr != 4) { SIprintf("Syntax error on line %d, characters in a rearrange\ line must be a comma separated list of 4 digit hex values, ABORTING\n", lno); tab = NULL; break; }