/* ** Name: CS_cp_mbx_create - mailbox creation and initialization ** ** Description: ** This subroutine is called from CSinitiate(). ** ** It does the following: ** a) establishes a mailbox, with the name II_CPRES_xx_pid, where ** xx is the (optional) installation code, and pid is the ** process ID in hex. ** b) queues a read on the mailbox, with completion routine set to ** CS_cp_mbx_complete ** c) deletes the mailbox, so it'll go away when the process dies. ** ** Inputs: ** num_sessions - Number of sessions for the process. ** ** Outputs: ** sys_err - reason for error ** ** Returns: ** OK, !OK ** ** Side Effects: ** Sets cpres_mbx_chan to the mailbox's channel ** Defines the system-wide logical name II_CPRES_xx_pid ** ** History: ** Summer, 1992 (bryanp) ** Working on the new portable logging and locking system. ** 08-Nov-2007 (jonj) ** Use of "num_sessions" is totally bogus. CS_cp_mbx_create() is called ** before the startup parms are determined from config.dat (where we'd ** find "connect_limit"), so SCD hard-codes num_sessions = 32, resulting ** in CS_CP_MIN_MSGS == 5 always being used, which is way too small. ** Instead, default to the (configurable) VMS sysgen parameter ** DEFMBXBUFQUO. ** Also, create mailbox as read-only. Writers will assign write-only ** channels. */ STATUS CS_cp_mbx_create(i4 num_sessions, CL_ERR_DESC *sys_err) { struct dsc$descriptor_s name_desc; i4 vms_status; char mbx_name[100]; char *inst_id; PID pid; CL_CLEAR_ERR(sys_err); /* ** Build the mailbox logical name: */ PCpid(&pid); NMgtAt("II_INSTALLATION", &inst_id); if (inst_id && *inst_id) STprintf(mbx_name, "II_CPRES_%s_%x", inst_id, (i4)pid); else STprintf(mbx_name, "II_CPRES_%x", (i4)pid); name_desc.dsc$a_pointer = mbx_name; name_desc.dsc$w_length = STlength(mbx_name); name_desc.dsc$b_dtype = DSC$K_DTYPE_T; name_desc.dsc$b_class = DSC$K_CLASS_S; vms_status = sys$crembx( 1, /* Mailbox is "permanent" */ &cpres_mbx_chan, /* where to put channel */ (i4)sizeof(CS_CP_WAKEUP_MSG), /* maximum message size */ 0, /* buffer quota (DEFMBXBUFQUO) */ 0, /* prot mask = all priv */ PSL$C_USER, /* acmode */ &name_desc, /* logical name descriptor */ CMB$M_READONLY, /* flags */ 0); /* nullarg */ if ( vms_status != SS$_NORMAL ) { sys_err->error = vms_status; if (vms_status == SS$_NOPRIV) return (E_CS00F8_CSMBXCRE_NOPRIV); else return (E_CS00F7_CSMBXCRE_ERROR); } /* Hang a read */ cpres_q_read_io(); /* Mark for deletion, so it disappears when we exit. */ sys$delmbx(cpres_mbx_chan); cpres_channels_sem = 0; cpres_num_channels_assigned = 0; return (OK); }
/* we need to use the shortest unique subset of the ** VMS pid for identifying temp files because of ** name length problems. -tw */ void PCunique( char *numb) { static char uniq_str[8] = "\0"; static bool got_uniq = FALSE; PID numbuf; if (!got_uniq) { PCpid(&numbuf); numbuf &= 0177777; /* lower half is unique in VMS V3.x */ STprintf(uniq_str, "%x", numbuf); got_uniq = TRUE; } STcopy(uniq_str, numb); }
/*{ ** Name: ule_initiate - Provide server constant info for the server header ** ** Description: ** This routine takes in the server constant parameters SERVER NAME and ** NODE NAME, placing them in an area which is easily found to place on ** all error messages. ** ** Inputs: ** node_name Pointer to node name, which is ** l_node_name characters long ** server_name Same for server name ** l_server_name ** ** Outputs: ** None. ** Returns: ** VOID ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 06-Aug-1987 (fred) ** Created. ** 21-Mar-2005 (mutma03) ** cleanup of node translation code for clusters. ** 07-jan-2008 (joea) ** Undo 28-jun-2001 and 21-mar-2005 cluster nickname changes. */ VOID ule_initiate( char *node_name, i4 l_node_name, char *server_name, i4 l_server_name ) { PID proc_id; char fmt[20]; if (Ule_started <= 0) { PCpid(&proc_id); MEfill(sizeof(ULE_MHDR), (u_char)' ', (PTR)&Ule_mhdr); MEmove( l_node_name, (PTR) node_name, (char) ' ', sizeof(Ule_mhdr.ule_node), (PTR) &Ule_mhdr.ule_node[0]); MEmove( l_server_name, (PTR) server_name, (char) ' ', sizeof(Ule_mhdr.ule_server), (PTR) &Ule_mhdr.ule_server[0]); MEfill( sizeof(Ule_mhdr.ule_session), (u_char) '*', (PTR) &Ule_mhdr.ule_session[0]); Ule_mhdr.ule_pad1[0] = ':', Ule_mhdr.ule_pad1[1] = ':', Ule_mhdr.ule_pad1[2] = '['; Ule_mhdr.ule_pad2[0] = ',', Ule_mhdr.ule_pad2[1] = ' '; STprintf( fmt, "%s", PIDFMT ); STprintf(Ule_mhdr.ule_pid, fmt, proc_id); Ule_mhdr.ule_pad3[0] = ',', Ule_mhdr.ule_pad3[1] = ' '; Ule_mhdr.ule_pad4[0] = ','; Ule_mhdr.ule_pad4[1] = ' '; MEfill( sizeof(Ule_mhdr.ule_source), (char) ' ', (PTR) &Ule_mhdr.ule_source[0]); Ule_mhdr.ule_pad5[0] = ']', Ule_mhdr.ule_pad5[1] = ':', Ule_mhdr.ule_pad5[2] = ' '; Ule_started = 1; } }
abort_handler() { i4 i, j; HI_RES_TIME now; ACTIVETEST *curtest; PID pid; ACdisableAlarm(); ACblock(STATRPT ); ACgetTime(&now, (LO_RES_TIME *) NULL); PCpid(&pid); log_entry(&now, -1, -1, 0, pid, C_ABORT, 0, 0); for (i = 0 ; i < numtests ; i++) for (j = 0 ; j < tests[i]->t_nkids ; j++) { curtest = &(tests[i]->t_children[j]); if ( (tests[i]->t_niters > 0) && (curtest->a_iters > tests[i]->t_niters) ) log_entry(&now, i, j, 0, 0, C_NO_KILL, 0, 0); else { log_entry(&now, i, j, curtest->a_iters, #ifdef NT_GENERIC curtest->a_processInfo.dwProcessId, C_KILL, 0, 0); #else curtest->a_pid, C_KILL, 0, 0); #endif /* END #ifndef NT_GENERIC */ curtest->a_iters = tests[i]->t_niters; ACkillChild(curtest); } } #ifdef NT_GENERIC return 0; #else ACunblock(); #endif /* END #ifndef NT_GENERIC */ } /* abort_handler */
VOID qee_d1_qid( QEE_DSH *v_dsh_p) { QEE_DDB_CB *qee_p = v_dsh_p->dsh_ddb_cb; DB_CURSOR_ID *csr_p; SYSTIME tm_now; char hi_ascii[QEK_015_LEN], lo_ascii[QEK_015_LEN], pid_ascii[QEK_015_LEN], temp[QEK_050_LEN + DB_CURSOR_MAXNAME]; PID pid; /* an i4 */ PCpid(& pid); /* get process (server) id */ CVla(pid, pid_ascii); /* convert to ascii */ TMnow(& tm_now); csr_p = & qee_p->qee_d4_given_qid; csr_p->db_cursor_id[0] = tm_now.TM_secs; csr_p->db_cursor_id[1] = tm_now.TM_msecs; CVla(tm_now.TM_secs, hi_ascii); CVla(tm_now.TM_msecs, lo_ascii); STpolycat((i4) 4, /* 4 constituent pieces */ "dd", lo_ascii, hi_ascii, pid_ascii, temp); MEmove(STlength(temp), temp, ' ', DB_CURSOR_MAXNAME, csr_p->db_cur_name); csr_p = & qee_p->qee_d5_local_qid; csr_p->db_cursor_id[0] = 0; csr_p->db_cursor_id[1] = 0; MEfill(DB_CURSOR_MAXNAME, ' ', (PTR) csr_p->db_cur_name); return; }
int main(int argc, char *argv[]) { #define MAXBUF 4095 char buf[ MAXBUF+1 ]; int iarg, ibuf, ichr; bool debug = FALSE; CL_ERR_DESC err_code; char *p1 = NULL; char pid[MAXBUF]; char *database = ERx(""); char *user = ERx(""); char *xmlfile = ERx(""); char sql_fname[LO_NM_LEN + 1]; char *work_dir = NULL; char directory[MAX_LOC + 1]; char *tmp_dir = NULL; char tmp_buf[MAX_LOC + 1]; char subdir_buf[MAX_LOC + 1]; char sql_loc_buf[MAX_LOC + 1]; char *progname; LOCATION tmp_dir_loc; LOCATION tmp_subdir_loc; LOCATION tmp_buff_loc; LOCATION curr_loc; LOCATION sql_file_loc; char *password = ERx(""); char *groupid = ERx(""); ARGRET rarg; i4 pos; LOCATION xmlfile_loc; FILE *xmlfile_read; STATUS stat = FAIL; char dbuf[256]; char encode[32]; u_i4 tmppid; TM_STAMP tm_stamp; /* Tell EX this is an ingres tool. */ (void) EXsetclient(EX_INGRES_TOOL); /* Call IIUGinit to initialize character set attribute table */ if ( IIUGinit() != OK) PCexit(FAIL); progname = ERget(F_XM0006_IMPXML); FEcopyright(progname, ERx("2001")); /* ** Get arguments from command line */ /* required parameters */ if (FEutaopen(argc, argv, ERx("xmlimport")) != OK) PCexit(FAIL); /* database name is required */ if (FEutaget(ERx("database"), 0, FARG_PROMPT, &rarg, &pos) != OK) PCexit(FAIL); database = rarg.dat.name; if (FEutaget(ERx("xmlfile"), 0, FARG_PROMPT, &rarg, &pos) != OK) PCexit(FAIL); xmlfile = rarg.dat.name; if (FEutaget(ERx("user"), 0, FARG_FAIL, &rarg, &pos) == OK) user = rarg.dat.name; if (FEutaget(ERx("password"), 0, FARG_FAIL, &rarg, &pos) == OK) { char *IIUIpassword(); if ((password = IIUIpassword(ERx("-P"))) == NULL) { FEutaerr(BADARG, 1, ERx("")); PCexit(FAIL); } } if (FEutaget(ERx("groupid"), 0, FARG_FAIL, &rarg, &pos) == OK) groupid = rarg.dat.name; if (FEutaget(ERx("debug"), 0, FARG_FAIL, &rarg, &pos) == OK) debug = TRUE; ibuf = STlength(buf); /* b121678: pid is no longer based on process id, but it's ** a random number instead. */ PCpid(&tmppid); TMget_stamp(&tm_stamp); MHsrand2(tmppid * tm_stamp.tms_usec); STprintf(pid, "%x", MHrand2()); #ifdef xDEBUG SIprintf(" the pid is: %s \n", pid); #endif /* create the sql file */ /* Avoid a name like "foo.xml.sql" on VMS, use pid.sql instead */ STcopy(pid, sql_fname); STcat(sql_fname, ".sql"); /* ** create in the temp location a directory ** with the name pid. set this directory ** as the working directory for impxml */ NMloc (TEMP, PATH, NULL, &tmp_dir_loc); /* make a location for TMP loc */ /* print location name */ LOcopy (&tmp_dir_loc, tmp_buf, &tmp_buff_loc); LOtos (&tmp_buff_loc, &tmp_dir); #ifdef xDEBUG SIprintf ("temploc: %s \n", tmp_dir); #endif /* make a subdir location with filename, pid */ STcopy (pid, subdir_buf); /* Initialize result loc so that everyone is happy */ LOcopy (&tmp_dir_loc, sql_loc_buf, &sql_file_loc); /* Generate location for temp subdirectory */ if (LOfaddpath (&tmp_dir_loc, subdir_buf, &sql_file_loc) != OK) { IIUGerr(E_XM0007_Locname_Failed, UG_ERR_FATAL, 2, tmp_dir, subdir_buf); /* NOTREACHED */ } /* print the location name */ LOcopy (&sql_file_loc, tmp_buf, &tmp_buff_loc); LOtos (&tmp_buff_loc, &work_dir); #ifdef xDEBUG SIprintf ("work dir loc: %s \n", work_dir); #endif /* create the subdir */ if (LOcreate (&sql_file_loc) != OK) { IIUGerr(E_XM0008_Create_Temp_Dir, UG_ERR_ERROR, 1, work_dir); PCexit(FAIL); } STcopy(work_dir, directory); #ifdef xDEBUG SIprintf ("sql file name: %s \n", sql_fname); SIprintf ("xml file name: %s \n", xmlfile); #endif /* Execute the command impxml */ STprintf (buf, ERx( "impxml -d=\"%s\" -o=\"%s\" " ), directory, sql_fname); /* encoding? */ if ( (LOfroms(PATH & FILENAME, xmlfile, &xmlfile_loc) != OK) || (SIopen(&xmlfile_loc, "r", &xmlfile_read) != OK) || (xmlfile_read == NULL) ) { IIUGerr(E_XM0009_Cannot_Open_File, UG_ERR_ERROR, 1, xmlfile); PCexit(FAIL); } /* scan XML declaration for encoding, if any */ if (stat = SIgetrec(dbuf, sizeof(dbuf) - 1, xmlfile_read) == OK) { char *d = dbuf; i4 i = 0; for (d = dbuf; d != (dbuf + sizeof(dbuf)); d++) { if (MEcmp(d, ERx("encoding="), sizeof(ERx("encoding=")) - 1) == 0) { d += sizeof(ERx("encoding=")); while (MEcmp (d, "\'", sizeof(char)) && MEcmp(d, "\"", sizeof(char))) MEcopy(d++, sizeof(char), &encode[i++]); encode[i++] = MIN_CHAR; encode[i] = EOS; STcat(buf, ERx("-x=")); STcat(buf, encode); break; } } } else if (stat != ENDFILE) { /* unable to read file, report error */ IIUGerr(E_XM000A_Cannot_Read_File, UG_ERR_ERROR, 1, xmlfile); PCexit(FAIL); } stat = SIclose(xmlfile_read); STcat(buf, xmlfile); #ifdef xDEBUG SIprintf ( " query send: %s \n", buf); #endif /* Execute the command. */ if( PCcmdline((LOCATION *) NULL, buf, PC_WAIT, (LOCATION *)NULL, &err_code) != OK ) { if (!debug) LOdelete(&sql_file_loc); PCexit(FAIL); } /* ** we should run the sql script ** sql dbname < new_filename */ /* save the current location */ LOcopy(&sql_file_loc, tmp_buf, &curr_loc); /* make a full location path to the location first */ LOfroms(FILENAME, sql_fname, &tmp_buff_loc); LOstfile(&tmp_buff_loc, &curr_loc); LOcopy (&curr_loc, tmp_buf, &tmp_buff_loc); LOtos (&tmp_buff_loc, &tmp_dir); #ifdef xDEBUG SIprintf ("sql file is: %s \n", tmp_dir); #endif /* No space between < and input file for VMS */ STprintf(buf, ERx( "sql -s %s %s %s %s <%s" ), database, user, password, groupid, tmp_dir); #ifdef xDEBUG SIprintf (" query send: %s \n", buf); #endif /* ** Execute the command. */ if( PCcmdline((LOCATION *) NULL, buf, PC_WAIT, (LOCATION *)NULL, &err_code) != OK ) { if (!debug) LOdelete(&sql_file_loc); PCexit(FAIL); } /* ** Delete the location */ if (!debug) LOdelete(&sql_file_loc); PCexit(OK); }
/*{ ** Name: RSstats_init - initialize monitor statistics ** ** Description: ** Initializes the Replicator Server statistics memory segment. ** ** Inputs: ** none ** ** Outputs: ** none ** ** Returns: ** OK Function completed normally. */ STATUS RSstats_init() { STATUS status; char *val; i4 num_targs; SIZE_TYPE pages_alloc; u_i4 stats_size; RS_TARGET_STATS *targ; RS_TARGET_STATS *targ_end; RS_TABLE_STATS *tbl; RS_CONN *conn; RS_TBLDESC *tbl_info; CL_SYS_ERR sys_err; char server_num[4]; PMsetDefault(1, PMhost()); STprintf(server_num, ERx("%d"), (i4)RSserver_no); PMsetDefault(3, server_num); status = PMget(ERx("II.$.REPSERV.$.SHARED_STATISTICS"), &val); if (status == OK && STbcompare(val, 0, ERx("ON"), 0, TRUE) == 0) shared_stats = TRUE; num_targs = RSnum_conns - TARG_BASE; stats_size = sizeof(RS_MONITOR) + sizeof(RS_MONITOR) % sizeof(ALIGN_RESTRICT); targ_size = sizeof(RS_TARGET_STATS) + sizeof(RS_TARGET_STATS) % sizeof(ALIGN_RESTRICT); if (shared_stats) tbl_size = sizeof(RS_TABLE_STATS) + sizeof(RS_TABLE_STATS) % sizeof(ALIGN_RESTRICT); else tbl_size = 0; num_pages = (stats_size + num_targs * (targ_size + RSrdf_svcb.num_tbls * tbl_size)) / ME_MPAGESIZE + 1; if (shared_stats) { STprintf(sm_key, ERx("%s.%03d"), RS_STATS_FILE, RSserver_no); status = MEget_pages(ME_SSHARED_MASK | ME_CREATE_MASK | ME_MZERO_MASK | ME_NOTPERM_MASK, num_pages, sm_key, (PTR *)&mon_stats, &pages_alloc, &sys_err); if (status == ME_ALREADY_EXISTS) { status = MEsmdestroy(sm_key, &sys_err); if (status == OK) status = MEget_pages(ME_SSHARED_MASK | ME_CREATE_MASK | ME_MZERO_MASK | ME_NOTPERM_MASK, num_pages, sm_key, (PTR *)&mon_stats, &pages_alloc, &sys_err); } } else { mon_stats = (RS_MONITOR *)MEreqmem(0, num_pages * ME_MPAGESIZE, TRUE, &status); } if (status != OK) return (status); mon_stats->server_no = RSserver_no; mon_stats->local_db_no = RSlocal_conn.db_no; PCpid(&mon_stats->pid); mon_stats->startup_time = TMsecs(); mon_stats->num_targets = num_targs; mon_stats->num_tables = RSrdf_svcb.num_tbls; STcopy(RSlocal_conn.vnode_name, mon_stats->vnode_name); STcopy(RSlocal_conn.db_name, mon_stats->db_name); mon_stats->target_stats = (RS_TARGET_STATS *)((PTR)mon_stats + stats_size); targ_end = (RS_TARGET_STATS *)((PTR)mon_stats->target_stats + mon_stats->num_targets * (targ_size + tbl_size * mon_stats->num_tables)); for (conn = &RSconns[TARG_BASE], targ = mon_stats->target_stats; targ < targ_end; ++conn, targ = (RS_TARGET_STATS *)((PTR)targ + targ_size + tbl_size * mon_stats->num_tables)) { targ->db_no = conn->db_no; STcopy(conn->vnode_name, targ->vnode_name); STcopy(conn->db_name, targ->db_name); if (shared_stats) { targ->table_stats = (RS_TABLE_STATS *)((PTR)targ + targ_size); for (tbl_info = RSrdf_svcb.tbl_info, tbl = targ->table_stats; tbl < targ->table_stats + mon_stats->num_tables; ++tbl, ++tbl_info) { tbl->table_no = tbl_info->table_no; STcopy(tbl_info->table_owner, tbl->table_owner); STcopy(tbl_info->table_name, tbl->table_name); } } } return (OK); }
/*{ ** Name: LGK_initialize() - initialize the lg/lk shared mem segment. ** ** Description: ** This routine is called by the LGinitialize or LKinitialize routine. IT ** assumes that a previous caller has allocated the shared memory segment. ** ** If it discovers that the shared memory segment has not yet been ** initialized, it calls the LG and LK initialize-memory routines to do so. ** ** Inputs: ** flag - bit mask of: ** LOCK_LGK_MEMORY to lock the shared data segment ** LGK_IS_CSP if process is CSP process this node. ** ** Outputs: ** sys_err - place for system-specific error information. ** ** Returns: ** OK - success ** !OK - failure (CS*() routine failure, segment not mapped, ...) ** ** History: ** Summer, 1992 (bryanp) ** Working on the new portable logging and locking system. ** 19-oct-1992 (bryanp) ** Check memory version number when attaching. ** 22-oct-1992 (bryanp) ** Change LGLKDATA.MEM to lglkdata.mem. ** 23-Oct-1992 (daveb) ** name the semaphore too. ** 13-feb-1993 (keving) ** Remove support for II_LGK_MEMORY_SIZE. If II_LG_MEMSIZE ** is not set then calculate memory size from PM values. ** 24-may-1993 (bryanp) ** If the shared memory is the wrong version, don't install the ** at_exit handlers (the rundown routines won't be able to interpret ** the memory properly). ** 26-jul-1993 (jnash) ** Add 'flag' param lock the LGK data segment. ** 20-sep-1993 (bryanp) ** In addition to calling PCatexit, call (on VMS) sys$dclexh, since ** there are some situations (image death and image rundown without ** process rundown) which are caught neither by PCatexit (since ** PCexit isn't run) nor by check-dead threads (since process ** rundown never happened). This fixes a hole where an access- ** violating ckpdb or auditdb command never got cleaned up. ** 31-jan-1994 (bryanp) ** Back out a few "features" which are proving countereffective: ** 1) Don't bother checking mem_creator_pid to see if the previous ** creator of the shared memory has died. This was an attempt to ** gracefully re-use sticky shared memory following a system crash, ** but it is suspected as being the culprit in a series of system ** failures by re-initializing the shared memory at inopportune ** times. ** 2) Don't complain if the shared memory already exists but is of a ** different size than you expected. Just go ahead and try to use ** it anyway. ** 21-feb-1994 (bryanp) ** Reverse item (1) of the above 31-jan-1994 change and re-enable the ** graceful re-use of shared memory. People weren't happy with ** having to run ipcclean and csinstall all the time. ** 23-may-1994 (bryanp) ** On VMS, disable ^Y for LG/LK-aware processes. We don't want to allow ** ^Y because you might interrupt the process right in the middle ** of an LG or LK operation, while holding the shared memory ** semaphore, and this would then wedge the whole installation. ** ** 17-May-1994 (daveb) 59127 ** Attach lgk_mem semaphore if we're attaching to the segment. ** 30-jan-1995 (lawst01) bug 61984 ** Use memory needed calculation from the 'lgk_calculate_size' ** function to determine the size of the shared memory pool for ** locking and locking. If the II_LG_MEMSIZE variable is specified ** with a value larger than needed use the supplied value. If ** lgk_calculate_size is unable to calculate a size then use the ** magic number of 400000. In addition issue a warning message ** and continue executing in the event the number of pages ** allocated is less than the number requested. ** 24-apr-1997 (nanpr01) ** Reinstate Bryanp's change. In the process of fixing bug 61984 ** by Steve Lawrence and subsequent undo of Steve's fix by Nick ** Ireland on 25-jun-96 (nick) caused the if 0 code removed. ** Part of the Steve's change was not reinstated such as not returning ** the status and exit and continue. ** 1. Don't complain if the shared memory already exists but is of a ** different size than you expected. Just go ahead and try to use ** it. ** 18-aug-1998 (hweho01) ** Reclaim the kernel resource if LG/LK shared memory segment is ** reinitialized. If the shared segment is re-used (the previous creator ** of the shared segment has died), the cross-process semaphores get ** initialized more than once at the same locations. That cause the ** kernel resource leaks on DG/UX (OS release 4.11MU04). To fix the ** problem, CS_cp_sem_cleanup() is called to destroy all the ** semaphores before LG/LK shraed segment get recreated. ** CS_cp_sem_cleanup() is made dependent on xCL_NEED_SEM_CLEANUP and ** OS_THREADS_USED, it returns immediately for most platforms. ** 27-Mar-2000 (jenjo02) ** Added test for crossed thread types, refuse connection ** to LGK memory with E_DMA811_LGK_MT_MISMATCH. ** 18-apr-2001 (devjo01) ** s103715 (Portable cluster support) ** - Add CX mem requirement calculations. ** - Add LGK_IS_CSP flag to indicate that LGK memory is being ** initialized for a CSP process. ** - Add basic CX initialization. ** 19-sep-2002 (devjo01) ** If running NUMA clustered allocate memory out of local RAD. ** 30-Apr-2003 (jenjo02) ** Rearchitected to silence long-tolerated race conditions. ** BUG 110121. ** 27-feb-2004 (devjo01) ** Rework allocation of CX shared memory to be compatible ** with race condition fix introduced for bug 110121. ** 29-Dec-2008 (jonj) ** If lgk_calculate_size() returns FAIL, the total memory ** needed exceeds MAX_SIZE_TYPE and we can't continue, but ** tell what we can about the needs of the various bits of ** memory before quitting. ** 06-Aug-2009 (wanfr01) ** Bug 122418 - Return E_DMA812 if LOCK_LGK_MUST_ATTACH is ** is passed in and memory segment does not exist ** 20-Nov-2009 (maspa05) bug 122642 ** In order to synchronize creation of UUIDs across servers added ** a semaphore and a 'last time' variable into LGK memory. ** 14-Dec-2009 (maspa05) bug 122642 ** #ifdef out the above change for Windows. The rest of the change ** does not apply to Windows so the variables aren't defined. */ STATUS LGK_initialize( i4 flag, CL_ERR_DESC *sys_err, char *lgk_info) { PTR ptr; SIZE_TYPE memleft; SIZE_TYPE size; STATUS ret_val; STATUS mem_exists; char mem_name[15]; SIZE_TYPE allocated_pages; i4 me_flags; i4 me_locked_flag; SIZE_TYPE memory_needed; char *nm_string; SIZE_TYPE pages; LGK_MEM *lgk_mem; i4 err_code; SIZE_TYPE min_memory; i4 retries; i4 i; i4 attached; PID *my_pid_slot; i4 clustered; u_i4 nodes; SIZE_TYPE cxmemreq; PTR pcxmem; LGLK_INFO lgkcount; char instid[4]; CL_CLEAR_ERR(sys_err); /* ** if LGK_base is set then this routine has already been called. It is ** set up so that both LGiniitalize and LKinitialize calls it, but only ** the first call does anything. */ if (LGK_base.lgk_mem_ptr) return(OK); PCpid( &LGK_my_pid ); memory_needed = 0; NMgtAt("II_LG_MEMSIZE", &nm_string); if (nm_string && *nm_string) #if defined(LP64) if (CVal8(nm_string, (long*)&memory_needed)) #else if (CVal(nm_string, (i4 *)&memory_needed)) #endif /* LP64 */ memory_needed = 0; /* Always calculate memory needed from PM resource settings */ /* and compare with supplied value, if supplied value is less */ /* than minimum then use minimum */ min_memory = 0; if ( OK == lgk_get_counts(&lgkcount, FALSE)) { if ( lgk_calculate_size(FALSE, &lgkcount, &min_memory) ) { /* ** Memory exceeds MAX_SIZE_TYPE, can't continue. ** ** Do calculation again, this time with "wordy" ** so user can see allocation bits, then quit. */ lgk_calculate_size(TRUE, &lgkcount, &min_memory); return (E_DMA802_LGKINIT_ERROR); } } if (min_memory) memory_needed = (memory_needed < min_memory) ? min_memory : memory_needed; else memory_needed = (memory_needed < 400000 ) ? 400000 : memory_needed; clustered = (i4)CXcluster_enabled(); cxmemreq = 0; if ( clustered ) { if ( OK != CXcluster_nodes( &nodes, NULL ) ) nodes = 0; cxmemreq = CXshm_required( 0, nodes, lgkcount.lgk_max_xacts, lgkcount.lgk_max_locks, lgkcount.lgk_max_resources ); if ( MAX_SIZE_TYPE - memory_needed < cxmemreq ) { /* ** Memory exceeds MAX_SIZE_TYPE, can't continue. ** ** Do calculation again, this time with "wordy" ** so user can see allocation bits, then quit. */ SIprintf("Total LG/LK/CX allocation exceeds max of %lu bytes by %lu\n" "Adjust logging/locking configuration values and try again\n", MAX_SIZE_TYPE, cxmemreq - (MAX_SIZE_TYPE - memory_needed)); lgk_calculate_size(TRUE, &lgkcount, &min_memory); return (E_DMA802_LGKINIT_ERROR); } memory_needed += cxmemreq; } if ( memory_needed < MAX_SIZE_TYPE - ME_MPAGESIZE ) pages = (memory_needed + ME_MPAGESIZE - 1) / ME_MPAGESIZE; else pages = memory_needed / ME_MPAGESIZE; /* ** Lock the LGK segment if requested to do so */ if (flag & LOCK_LGK_MEMORY) me_locked_flag = ME_LOCKED_MASK; else me_locked_flag = 0; me_flags = (me_locked_flag | ME_MSHARED_MASK | ME_IO_MASK | ME_CREATE_MASK | ME_NOTPERM_MASK | ME_MZERO_MASK); if (CXnuma_user_rad()) me_flags |= ME_LOCAL_RAD; STcopy("lglkdata.mem", mem_name); /* ** In general, we just want to attach to the shared memory and detect if ** we are the first process to do so. However, there are ugly race ** conditions to consider, as well as complications because the shared ** memory may be left around following a system crash. ** ** First we attempt to create the shared memory. Usually it already exists, ** so we check for and handle the case of "already exists". */ /* ** (jenjo02) ** ** Restructured to better handle all those ugly race conditions ** which are easily reproduced by running two scripts, one that ** continuously executes "lockstat" while the other is starting ** and stopping Ingres. ** ** For example, ** ** lockstat A acquires and init's the memory ** RCP attaches to "A" memory ** lockstat A terminates normally ** lockstat B attaches to "A" memory, sees that ** "A"s pid is no longer alive, and ** reinitializes the memory, much to ** the RCP's chagrin. ** or (more commonly) ** ** lockstat A acquires and begins to init the mem ** RCP attaches to "A" memory which is ** still being zero-filled by lockstat, ** checks the version number (zero), ** and fails with a E_DMA434 mismatch. ** ** The fix utilizes the mem_ext_sem to synchronize multiple ** processes; if the semaphore hasn't been initialized or ** if mem_version_no is zero, we'll wait one second and retry, ** up to 60 seconds before giving up. This gives the creating ** process time to complete initialization of the memory. ** ** Up to LGK_MAX_PIDS are allowed to attach to the shared ** memory. When a process attaches it sets its PID in the ** first vacant slot in lgk_mem->mem_pid[]; if there are ** no vacant slots, the attach is refused. When the process ** terminates normally by calling LGK_rundown(), it zeroes ** its PID slot. ** ** When attaching to an existing segment, we check if ** there are any live processes still using the memory; ** if so, we can't destroy it (no matter who created it). ** If there are no live processes attached to the memory, ** we destroy and reallocate it (based on current config.dat ** settings). */ for ( retries = 0; ;retries++ ) { LGK_base.lgk_mem_ptr = (PTR)NULL; /* Give up if unable to get memory in one minute */ #if defined(conf_CLUSTER_BUILD) if (retries > 1) #else if ( retries ) #endif { if ( retries < 60 ) PCsleep(1000); else { /* Another process has it blocked way too long */ uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); /* Unable to attach allocated shared memory segment. */ return (E_DMA802_LGKINIT_ERROR); } } ret_val = MEget_pages(me_flags, pages, mem_name, (PTR*)&lgk_mem, &allocated_pages, sys_err); if ( mem_exists = ret_val ) { if (ret_val == ME_ALREADY_EXISTS) { ret_val = MEget_pages((me_locked_flag | ME_MSHARED_MASK | ME_IO_MASK), pages, mem_name, (PTR*)&lgk_mem, &allocated_pages, sys_err); #if defined(conf_CLUSTER_BUILD) if (ret_val && !retries) continue; /* try one more time */ #endif } if (ret_val) { uleFormat(NULL, ret_val, sys_err, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); /* Unable to attach allocated shared memory segment. */ return (E_DMA802_LGKINIT_ERROR); } } else if (flag & LOCK_LGK_MUST_ATTACH) { /* Do not use the shared segment you just allocated */ MEfree_pages((PTR)lgk_mem, allocated_pages, sys_err); return (E_DMA812_LGK_NO_SEGMENT); } size = allocated_pages * ME_MPAGESIZE; /* Expose this process to the memory */ LGK_base.lgk_mem_ptr = (PTR)lgk_mem; if ( mem_exists ) { /* ** Memory exists. ** ** Try to acquire the semaphore. If it's ** uninitialzed, retry from the top. ** ** If the version is zero, then another ** process is initializing the memory; ** keep retrying until the version is ** filled in. ** */ if ( ret_val = CSp_semaphore(1, &lgk_mem->mem_ext_sem) ) { if ( ret_val != E_CS000A_NO_SEMAPHORE ) { uleFormat(NULL, ret_val, sys_err, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); ret_val = E_DMA802_LGKINIT_ERROR; break; } continue; } /* Retry if still being init'd by another process */ if ( !lgk_mem->mem_version_no ) { CSv_semaphore(&lgk_mem->mem_ext_sem); continue; } /* ** Check pids which appear to be attached to ** the memory: ** ** If any process is still alive, then we ** assume the memory is consistent and use it. ** ** If a process is now dead, it terminated ** without going through LGK_rundown ** to zero its PID slot, zero it now. ** ** If there are no live PIDs attached to ** the memory, we destroy and recreate it. */ my_pid_slot = (PID*)NULL; attached = 0; for ( i = 0; i < LGK_MAX_PIDS; i++ ) { if ( lgk_mem->mem_pid[i] && PCis_alive(lgk_mem->mem_pid[i]) ) { attached++; } else { /* Vacate the slot */ if (lgk_mem->mem_pid[i]) { uleFormat(NULL, E_DMA499_DEAD_PROCESS_INFO, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, lgk_mem->mem_pid[i], 0, lgk_mem->mem_info[i].info_txt); } lgk_mem->mem_pid[i] = (PID)0; lgk_mem->mem_info[i].info_txt[0] = EOS; /* Use first vacant slot for this process */ if ( !my_pid_slot ) { my_pid_slot = &lgk_mem->mem_pid[i]; LGK_base.lgk_pid_slot = i; } } /* Quit when both questions answered */ if ( attached && my_pid_slot ) break; } /* If no living pids attached, destroy/reallocate */ if ( !attached ) { CSv_semaphore(&lgk_mem->mem_ext_sem); if ( LGK_destroy(allocated_pages, sys_err) ) { ret_val = E_DMA802_LGKINIT_ERROR; break; } continue; } /* All attached pids alive? */ if ( !my_pid_slot ) { /* ... then there's no room for this process */ uleFormat(NULL, E_DMA80A_LGK_ATTACH_LIMIT, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 1, 0, attached); ret_val = E_DMA802_LGKINIT_ERROR; } else if (lgk_mem->mem_version_no != LGK_MEM_VERSION_CURRENT) { uleFormat(NULL, E_DMA434_LGK_VERSION_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, lgk_mem->mem_version_no, 0, LGK_MEM_VERSION_CURRENT); ret_val = E_DMA435_WRONG_LGKMEM_VERSION; } /* ** Don't allow mixed connections of MT/non-MT processes. ** Among other things, the mutexing mechanisms are ** incompatible! */ else if ( (CS_is_mt() && (lgk_mem->mem_status & LGK_IS_MT) == 0) || (!CS_is_mt() && lgk_mem->mem_status & LGK_IS_MT) ) { uleFormat(NULL, E_DMA811_LGK_MT_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2, 0, (lgk_mem->mem_status & LGK_IS_MT) ? "OS" : "INTERNAL", 0, (CS_is_mt()) ? "OS" : "INTERNAL"); ret_val = E_DMA802_LGKINIT_ERROR; } else { /* ** CX memory (if any) will lie immediately past LGK header. */ pcxmem = (PTR)(lgk_mem + 1); pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT)); LGK_base.lgk_lkd_ptr = (char *)LGK_base.lgk_mem_ptr + lgk_mem->mem_lkd; LGK_base.lgk_lgd_ptr = (char *)LGK_base.lgk_mem_ptr + lgk_mem->mem_lgd; /* Stuff our pid in first vacant slot */ *my_pid_slot = LGK_my_pid; STlcopy(lgk_info, lgk_mem->mem_info[i].info_txt, LGK_INFO_SIZE-1); } #if defined(VMS) || defined(UNIX) /* set up pointers to reference the uuid mutex and last time * variable */ if (!ID_uuid_sem_ptr) ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem; if (!ID_uuid_last_time_ptr) ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time; if (!ID_uuid_last_cnt_ptr) ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt; #endif CSv_semaphore(&lgk_mem->mem_ext_sem); } else { /* Memory did not exist */ /* Zero the version to keep other processes out */ lgk_mem->mem_version_no = 0; #if defined(VMS) || defined(UNIX) /* set up the uuid mutex and last time pointers to * reference the objects in shared memory */ { STATUS id_stat; ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem; ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time; ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt; *ID_uuid_last_cnt_ptr=0; ID_UUID_SEM_INIT(ID_uuid_sem_ptr,CS_SEM_MULTI,"uuid sem", &id_stat); } #endif /* ... then initialize the mutex */ CSw_semaphore(&lgk_mem->mem_ext_sem, CS_SEM_MULTI, "LGK mem ext sem" ); /* Record if memory created for MT or not */ if ( CS_is_mt() ) lgk_mem->mem_status = LGK_IS_MT; /* ** memory is as follows: ** ** -----------------------------------------------------------| ** | LGK_MEM struct (keep track of this mem) | ** | | ** -----------------------------------------------------------| ** | If a clustered installation memory reserved for CX | ** | | ** ------------------------------------------------------------ ** | LKD - database of info for lk system | ** | | ** ------------------------------------------------------------ ** | LGD - database of info for lg system | ** | | ** ------------------------------------------------------------ ** | memory manipulated by LGKm_* routines for structures used | ** | by both the lk and lg systems. | ** | | ** ------------------------------------------------------------ */ /* put the LGK_MEM struct at head of segment leaving ptr pointing ** at next aligned piece of memory */ /* ** CX memory (if any) will lie immediately past LGK header. */ pcxmem = (PTR)(lgk_mem + 1); pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT)); LGK_base.lgk_lkd_ptr = pcxmem + cxmemreq; LGK_base.lgk_lkd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lkd_ptr, sizeof(ALIGN_RESTRICT)); lgk_mem->mem_lkd = (i4)((char *)LGK_base.lgk_lkd_ptr - (char *)LGK_base.lgk_mem_ptr); LGK_base.lgk_lgd_ptr = (PTR) ((char *) LGK_base.lgk_lkd_ptr + sizeof(LKD)); LGK_base.lgk_lgd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lgd_ptr, sizeof(ALIGN_RESTRICT)); lgk_mem->mem_lgd = (i4)((char *)LGK_base.lgk_lgd_ptr - (char *)LGK_base.lgk_mem_ptr); /* now initialize the rest of memory for allocation */ /* how much memory is left? */ ptr = ((char *)LGK_base.lgk_lgd_ptr + sizeof(LGD)); memleft = size - (((char *) ptr) - ((char *) LGK_base.lgk_mem_ptr)); if ( (ret_val = lgkm_initialize_mem(memleft, ptr)) == OK && (ret_val = LG_meminit(sys_err)) == OK && (ret_val = LK_meminit(sys_err)) == OK ) { /* Clear array of attached pids and pid info */ for ( i = 0; i < LGK_MAX_PIDS; i++ ) { lgk_mem->mem_pid[i] = (PID)0; lgk_mem->mem_info[i].info_txt[0] = EOS; } /* Set the creator pid */ LGK_base.lgk_pid_slot = 0; lgk_mem->mem_creator_pid = LGK_my_pid; /* Set the version, releasing other processes */ lgk_mem->mem_version_no = LGK_MEM_VERSION_CURRENT; } else { uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0); ret_val = E_DMA802_LGKINIT_ERROR; /* Destroy the shared memory */ LGK_destroy(allocated_pages, sys_err); } } if ( ret_val == OK ) { PCatexit(LGK_rundown); if ( clustered ) { /* ** Perform preliminary cluster connection and CX memory init. */ /* Get installation code */ NMgtAt("II_INSTALLATION", &nm_string); if ( nm_string ) { instid[0] = *(nm_string); instid[1] = *(nm_string+1); } else { instid[0] = 'A'; instid[1] = 'A'; } instid[2] = '\0'; ret_val = CXinitialize( instid, pcxmem, flag & LGK_IS_CSP ); if ( ret_val ) { /* Report error returned from CX */ uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0 ); break; } } #ifdef VMS { static $EXHDEF exit_block; i4 ctrl_y_mask = 0x02000000; /* ** On VMS, programs like the dmfjsp and logstat run as images in ** the shell process. That is, the system doesn't start and stop ** a process for each invocation of the program, it just starts ** and stops an image in the same process. This means that if ** the program should die, the image may be rundown but the process ** will remain, which means that the check-dead threads of other ** processes in the installation will not feel that they need to ** rundown this process, since it's still alive. ** ** By declaring an exit handler, which will get a chance to run ** even if PCexit isn't called, we improve our chances of getting ** to perform rundown processing if we should die unexpectedly. ** ** Furthermore, we ask DCL to disable its ^Y processing, which ** lessens the chance that the user will interrupt us while we ** are holding the semaphore. */ exit_block.exh$g_func = LGK_rundown; exit_block.exh$l_argcount = 1; exit_block.exh$gl_value = &exit_block.exh$l_status; if (sys$dclexh(&exit_block) != SS$_NORMAL) ret_val = FAIL; lib$disable_ctrl(&ctrl_y_mask, 0); } #endif } break; } if ( ret_val ) LGK_base.lgk_mem_ptr = NULL; return(ret_val); }
/*{ ** ** Name: dmc_write_behind_common - the guts of a write behind thread ** ** Description: ** ** The dmc_write_behind routine is used for implementing an asynchronous ** write behind thread. It wakes up whenever signaled by an LK event ** and writes dirty pages out of the cache to make room for new pages ** to be read in. ** ** The dmc_write_behind routine should only be called within a special ** session that is dedicated for this purpose. This routine will not ** return under normal circumstances until server shutdown time. ** ** This routine uses two routines in DM0P to drive the write behind ** thread: ** DM0P_BMFLUSH_WAIT waits for a session in the buffer manager ** to signal the event to wake up the write behind threads. This ** is signalled when some specified percent of the buffer manager ** is filled with dirty pages. ** ** DM0P_FLUSH_PAGES goes through the buffer manager modified queue ** in reverse priority order writing pages until some specified ** percentage of the buffer manager is free. ** ** This routine will return only if the event wait in DM0P_BMFLUSH_WAIT ** is cancelled by an interrupt. At server shutdown time, the server ** is expected to interrupt all the write behind threads. ** ** This common code is executed by both Primary and Cloned ** WriteBehind agents. ** ** Inputs: ** i_am_a_clone FALSE if this is the Primary WB Agent, ** TRUE if a Clone. ** cfa Agent's data. ** ** Outputs: ** dmf_err ** .error.err_code One of the following error numbers. ** E_DB_OK ** E_DM004A_INTERNAL_ERROR ** E_DM004B_LOCK_QUOTA_EXCEED ** E_DM0062_TRAN_QUOTA_EXCEED ** E_DM0117_WRITE_BEHIND ** ** Returns: ** E_DB_OK ** E_DB_FATAL ** ** History: ** 30-jun-1988 (rogerk) ** Created for Jupiter. ** 30-Jan-1989 (ac) ** Added arguments to LGbegin(). ** 15-may-1989 (rogerk) ** Return resource errors if resource limit is exceeded. ** 2-oct-1992 (ed) ** Use DB_MAXNAME to replace hard coded numbers ** - also created defines to replace hard coded character strings ** dependent on DB_MAXNAME ** 18-oct-1993 (rogerk) ** Add check for LOGFULL status. We don't execute write behind when ** in logfull to avoid background log forces which wreak havoc on ** the recovery logspace reservation algorithms. ** 10-oct-93 (swm) ** Bug #56438 ** Put LG_DBID into automatic variable lg_dbid rather than overloading ** dmc_cb->dmc_db_id. ** 31-jan-1994 (bryanp) B58380, B58381 ** Log LG/LK status code if LG or LK call fails. ** Check return code from CSsuspend. ** 10-Mar-1998 (jenjo02) ** Support for demand-driven WriteBehind threads. Changed prototype ** to pass a boolean indicating whether this is the primary or ** cloned WB thread and a pointer to DB_ERROR instead of a pointer ** to DMC_CB. ** Made this a common function called by Primary and Cloned threads. */ static DB_STATUS dmc_write_behind_common( i4 i_am_a_clone, char *cfa, DB_ERROR *dmf_err) { DM_SVCB *svcb = dmf_svcb; DB_TRAN_ID tran_id; LG_LXID lx_id; DM0L_ADDDB add_info; TIMERSTAT stat_block; i4 lock_list; i4 len_add_info; i4 event_mask; i4 events, wakeup_event; i4 have_locklist = FALSE; i4 have_transaction = FALSE; i4 lg_added = FALSE; DB_STATUS status = E_DB_OK; i4 wbcount = 0; i4 wait_time = 0; i4 base_time = 0; i4 flush_time, new_time; i4 length; i4 lgd_status; STATUS stat; i4 error; CL_ERR_DESC sys_err; DB_OWN_NAME user_name; LG_DBID lg_dbid; #ifdef xDEBUG CS_SID sid; i4 pid; PCpid(&pid); CSget_sid(&sid); TRdisplay("Starting Write Behind Thread %x in server process %d\n", sid, pid); #endif CLRDBERR(dmf_err); if (status == E_DB_OK) { /* ** Add write behind thread to logging system. ** Write behind thread does not actually open a database, so use ** the LG_NOTDB flag. */ STmove((PTR)DB_WRITEBEHIND_THREAD, ' ', sizeof(add_info.ad_dbname), (PTR) &add_info.ad_dbname); MEcopy((PTR)DB_INGRES_NAME, sizeof(add_info.ad_dbowner), (PTR) &add_info.ad_dbowner); MEcopy((PTR)"None", 4, (PTR) &add_info.ad_root); add_info.ad_dbid = 0; add_info.ad_l_root = 4; len_add_info = sizeof(add_info) - sizeof(add_info.ad_root) + 4; stat = LGadd(dmf_svcb->svcb_lctx_ptr->lctx_lgid, LG_NOTDB, (char *)&add_info, len_add_info, &lg_dbid, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM900A_BAD_LOG_DBADD, &sys_err, ULE_LOG, NULL, (char *)NULL, 0L, (i4 *)NULL, &error, 4, 0, dmf_svcb->svcb_lctx_ptr->lctx_lgid, sizeof(add_info.ad_dbname), (PTR) &add_info.ad_dbname, sizeof(add_info.ad_dbowner), (PTR) &add_info.ad_dbowner, 4, (PTR) &add_info.ad_root); if (stat == LG_EXCEED_LIMIT) SETDBERR(dmf_err, 0, E_DM0062_TRAN_QUOTA_EXCEEDED); else SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } else lg_added = TRUE; } if (status == E_DB_OK) { /* ** Begin transaction in order to do LG and LK calls. ** Must specify NOPROTECT transaction so that LG won't pick us ** as a force-abort victim. Also, the Log File BOF can be advanced ** past this transaction's position in the log file, which means that ** the Write Behind thread should do no logging nor work that could ** require backout. */ STmove((PTR)DB_WRITEBEHIND_THROWN, ' ', sizeof(DB_OWN_NAME), (PTR) &user_name); stat = LGbegin(LG_NOPROTECT, lg_dbid, &tran_id, &lx_id, sizeof(DB_OWN_NAME), user_name.db_own_name, (DB_DIS_TRAN_ID*)NULL, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM900C_BAD_LOG_BEGIN, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lg_dbid); if (stat == LG_EXCEED_LIMIT) SETDBERR(dmf_err, 0, E_DM0062_TRAN_QUOTA_EXCEEDED); else SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } else have_transaction = TRUE; } if (status == E_DB_OK) { /* ** Create locklist to use to wait for Write Behind event. */ stat = LKcreate_list(LK_NONPROTECT, (i4) 0, (LK_UNIQUE *)&tran_id, (LK_LLID *)&lock_list, (i4)0, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM901A_BAD_LOCK_CREATE, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); if (stat == LK_NOLOCKS) SETDBERR(dmf_err, 0, E_DM004B_LOCK_QUOTA_EXCEEDED); else SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } else have_locklist = TRUE; } if (status == E_DB_OK) { /* ** Now begin loop of waiting for Write Behind event and flushing ** the buffer manager. */ do { if (DMZ_ASY_MACRO(2)) { new_time = TMsecs(); flush_time = new_time - base_time - wait_time; base_time = new_time; /* Write Write Behind thread statistics. */ stat = CSstatistics(&stat_block, 0); TRdisplay("%22*- DMF Write Behind Thread statistics %21*-\n"); TRdisplay(" Write Behind wakeups: %d Cpu : %d Dio : %d\n", wbcount, stat_block.stat_cpu, stat_block.stat_dio); TRdisplay(" Time waiting for event: %d seconds\n", wait_time); TRdisplay(" Time to flush pages: %d seconds\n", flush_time); TRdisplay("%79*-\n"); } /* ** Cloned threads don't wait for a signal, they just ** help flush the cache, then go away. */ if (i_am_a_clone == FALSE) { /* ** Wait for the next signal that the buffer manager needs to have ** pages flushed. ** ** This routine will also clear the event from the previous ** signal. */ status = dm0p_wbflush_wait(cfa, lock_list, dmf_err); if (status != E_DB_OK) { /* ** If warning is returned, that's a signal that ** this thread is to terminate. */ if (status == E_DB_WARN) { status = E_DB_OK; break; } else { if (dmf_err->err_code > E_DM_INTERNAL) { uleFormat(dmf_err, 0, NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); } break; } } } /* ** Check LOGFULL status. We don't execute write behind when in ** logfull to avoid background log forces which wreak havoc on ** the recovery logspace reservation algorithms. */ stat = LGshow(LG_S_LGSTS, (PTR)&lgd_status, sizeof(lgd_status), &length, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM9017_BAD_LOG_SHOW, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, LG_S_LGSTS); SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; break; } /* ** If logfull, skip the cache flush. */ if (lgd_status & LGD_LOGFULL) { /* ** Pause for a moment since the write-behind event will likely ** be immediately resignaled. We expect that this 5-second ** wait will return with "timed-out"; if it returns with ** "interrupted", then the server is being shut down. If it ** returns with any other return code, something is awry. */ stat = CSsuspend(CS_TIMEOUT_MASK | CS_INTERRUPT_MASK, 5, 0); if (stat == E_CS0008_INTERRUPTED) { status = E_DB_OK; break; } if (stat != E_CS0009_TIMEOUT) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; break; } } else { /* ** Flush some dirty pages out of the Buffer Manager. */ if (dmf_svcb->svcb_status & SVCB_IOMASTER) { /* in IOMASTER server use same func as write-along thread */ i4 numforce; u_i4 duty = 0xffffffff; status = dm0p_write_along(lock_list, (i4)lx_id, &numforce, duty, dmf_err); } else status = dm0p_flush_pages(lock_list, (i4)lx_id, cfa, dmf_err); if (status != E_DB_OK) { if (dmf_err->err_code > E_DM_INTERNAL) { uleFormat(dmf_err, 0, NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); } break; } } /* ** If dumping statistics, save time for event to be signaled. */ if (DMZ_ASY_MACRO(2)) wait_time = TMsecs() - base_time; wbcount++; } while (i_am_a_clone == FALSE); } if (i_am_a_clone == FALSE) { /* Write Fast Commit thread statistics. */ stat = CSstatistics(&stat_block, 0); TRdisplay("\n%22*- DMF Write Behind Thread statistics %21*-\n"); TRdisplay(" Write Behind wakeup: %d Cpu : %d Dio : %d\n", wbcount, stat_block.stat_cpu, stat_block.stat_dio); TRdisplay("%79*-\n"); } /* ** Clean up transaction and/or lock list left hanging around. */ if (have_transaction) { stat = LGend(lx_id, 0, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM900E_BAD_LOG_END, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lx_id); if ( status == E_DB_OK ) { SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } } have_transaction = FALSE; } if (have_locklist) { stat = LKrelease(LK_ALL, lock_list, (LK_LKID *)0, (LK_LOCK_KEY *)0, (LK_VALUE *)0, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM901B_BAD_LOCK_RELEASE, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lock_list); if ( status == E_DB_OK ) { SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } } have_locklist = FALSE; } if (lg_added) { stat = LGremove(lg_dbid, &sys_err); if (stat != OK) { uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0); uleFormat(NULL, E_DM9016_BAD_LOG_REMOVE, &sys_err, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lg_dbid); if ( status == E_DB_OK ) { SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND); status = E_DB_ERROR; } } } return (status); }