Пример #1
0
/* decode semaphore error */
void
semerr(
i4 rv,
CS_SEMAPHORE *sp,
char *msg)
{
    MY_SCB *scb;
#ifdef EX_DEBUG
    EX_CONTEXT context;

    if (EXdeclare(ex_handler, &context) != OK) {
	/* some exception was raised */
	SIfprintf( stderr,"Error: unexpected exception in semerr()...");
	EXdelete();
	return;
    }
#endif

    CSget_scb( (CS_SCB **)&scb );
    SIfprintf(stderr, "%s %p returned %d (%s)\n", msg, sp, rv, maperr(rv) );
    SIfprintf(stderr, "\tPhilosopher %d, scb %p\n", scb->phil, scb );
    SIfprintf(stderr, "\t%p->cs_value %d\n", sp, sp->cs_value );
    SIfprintf(stderr, "\t%p->cs_count %d\n", sp, sp->cs_count );
    SIfprintf(stderr, "\t%p->cs_owner %x\n", sp, sp->cs_owner );
    SIfprintf(stderr, "\t%p->cs_list %p\n", sp, sp->cs_list );
    SIfprintf(stderr, "\t%p->cs_next %p\n", sp, sp->cs_next );

#ifdef EX_DEBUG
    EXdelete();
#endif
}
Пример #2
0
/*
** Name: scf_cut_test - test CUT
**
** Description:
**	Test the functionality of the CUT modules
**
** Inputs:
**	None
**
** Outputs:
**	None
**
** Returns:
**	None
**
** History:
**	2-jul-98 (stephenb)
**	    Created
*/
VOID
scf_cut_test()
{
    SCF_CB	scf;
    SCD_SCB	*scb;
    SCF_FTC	ftc;
    STATUS	status;
    char	thread_name[] = "<CUT Test Master>";

    CSget_scb((CS_SCB**)&scb);
    
    /*
    ** Create the base thread (which will in turn create other threads)
    */
    scf.scf_type = SCF_CB_TYPE;
    scf.scf_length = sizeof(SCF_CB);
    scf.scf_session = DB_NOSESSION;
    scf.scf_facility = DB_SCF_ID;
    scf.scf_ptr_union.scf_ftc = &ftc;
    
    ftc.ftc_facilities = (1 << DB_ADF_ID) | (1 << DB_SCF_ID) | 
	(1 << DB_CUF_ID);
    ftc.ftc_data = NULL;
    ftc.ftc_data_length = 0;
    ftc.ftc_thread_name = thread_name;
    ftc.ftc_priority = SCF_CURPRIORITY;
    ftc.ftc_thread_entry = thread_main;
    ftc.ftc_thread_exit = NULL;
    
    (VOID)scs_atfactot(&scf, scb);

    return;
}
Пример #3
0
/*{
** Name: scs_enable	- enable AIC delivery
**
** Description:
**      This function allows AIC delivery to commence.  If any AIC's 
**      have been queued since disabling, they are delivered now.
**
** Inputs:
**      SCS_ENABLE                      op code to scf_call()
**      scf_cb                          no fields used for input
**
** Outputs:
**      scf_cb
**          .error
**              .err_code               E_SC_OK or...
**                  E_SC0012_NOT_DISABLED not particularly a problem, but
**					    included for completeness and to
**					    cause tracking down since it is
**					    sloppy
**	Returns:
**	    E_DB_{OK, ERROR, FATAL}
**	Exceptions:
**	    none
**
** Side Effects:
**	    All queued AIC's are delivered now.
**
** History:
**	20-Jan-86 (fred)
**          Created for jupiter.
**	29-Jun-1993 (daveb)
**	    correctly cast arg to CSget_scb().
**	2-Jul-1993 (daveb)
**	    prototyped.
[@history_line@]...
*/
DB_STATUS
ascs_enable(SCF_CB *scf_cb )
{
    DB_STATUS		status;
    SCD_SCB		*scb;

    scb = Sc_main_cb->sc_proxy_scb;
    if (!scb)
	CSget_scb((CS_SCB **)&scb);
    if (scb->scb_sscb.sscb_cfac < 0)
    {
	scf_cb->scf_error.err_code = E_SC_OK;
	status = E_DB_OK;
    }
    else
    {
	scf_cb->scf_error.err_code = E_SC002A_NOT_DISABLED;
	status = E_DB_WARN;
    }
    scb->scb_sscb.sscb_cfac = abs(scb->scb_sscb.sscb_cfac);
    if (scb->scb_sscb.sscb_interrupt == SCS_PENDING_INTR)
    {
	ascs_attn(scb->scb_sscb.sscb_eid, scb);
    }
    return(status);
}
Пример #4
0
/*
** Name: ascs_avformat	- Show info on the current thread during an AV
**
** Description:
**	This routine calls scs_iformat to show what thread was running
**	when the server got an AV.
**
** Inputs:
**	None.
**
** Outputs:
**	Returns:
**	    STATUS
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	27-feb-1992 (rog)
**	    Created.
**	29-Jun-1993 (daveb)
**	    correctly cast arg to CSget_scb().
**	02-Jul-1993 (daveb)
**	    prototyped.
**	07-jul-1993 (rog)
**	    Changed the message from saying a fatal error occurred to just
**	    saying that an error occurred.
[@history_template@]...
*/
STATUS
ascs_avformat(void)
{
	STATUS		ret_val;
	EX_CONTEXT	ex;

    if (EXdeclare(ascs_fhandler, &ex) != OK)
    {
	ret_val = FAIL;
    }
    else
    {
	SCD_SCB		*scb;
	i4		err_code;
	char 		*msg_buffer;
	i4		msg_length;

	msg_buffer = "An error occurred in the following session:";
	msg_length = STlength(msg_buffer);

	CSget_scb((CS_SCB **)&scb);
	ule_format(0, 0, ULE_MESSAGE, 0, msg_buffer, msg_length,0,&err_code,0);
	ret_val = ascs_iformat(scb, 1, 0, 1);
    }

    EXdelete();
    return(ret_val);
}
Пример #5
0
/*
** Name: CS_ef_wait - wait for specified event to occur.
**
** Description:
**	This routine suspends the current thread until a specified event
**	occurs.  The event must have been set using the CS_ef_set call before
**	the thread can call CS_ef_wait.  The event must be signaled by some
**	other thread calling CS_ef_wake with the same event flag.
**
**	The routines that drive the event mechanisms are:
**
**	    CS_ef_set	: set event wait flag for current session
**	    CS_ef_wait	: suspend current session until event is signalled
**	    CS_ef_wake	: signal event - wake up sessions waiting for event
**	    CS_ef_cancel: cancel current session's event wait flag
**
**	After calling CS_ef_set, the client must call either CS_ef_wait or
**	CS_ef_cancel.  Failing to do this will cause the system to get its
**	suspends and resumes out of sync.
**
**	The client of the event routines must be careful of the case where
**	the event desired occurs BEFORE the call to CS_ef_set.  If this
**	case cannot be handled by the client, then the calls to the event
**	routines should be protected by some sort of semaphore, as shown below.
**
**	The CS event routines are not intended to be high traffic - ultra fast
**	event drivers, they are used primarily to wait for out-of-resource
**	conditions to be resolved - these are not expected to happen very often.**
**	Event flag values should be reserved in CS.H.
**	Event waits are NOT interruptable.
**
**	The protocols for using the CS event wait calls are described above
**	in the module header.
**
** Inputs:
**      none
**
** Outputs:
**      none
**
**	Returns:
**	    none
**
**	Exceptions:
**	    none
**
** Side Effects:
**	Thread is suspended until event is signalled by another session.
**
** History:
**      24-jul-89 (rogerk)
**          Created.
*/
VOID
CS_ef_wait()
{
    CS_SCB	*scb;

    CSget_scb(&scb);
    CSsuspend(0, 0, 0);
}
Пример #6
0
DB_STATUS
sca_trace( i4  dispose_mask, i4  length, char *message )
{
    SCF_CB		*scf_cb;
    SCD_SCB		*scb;
    DB_STATUS		scf_status;

    if (    (dispose_mask &
		~(ADD_FRONTEND_MASK | ADD_ERROR_LOG_MASK | ADD_TRACE_FILE_MASK))
	||  !message
	||  (length <= 0)
	||  (length > ER_MAX_LEN))
    {
	return(E_DB_ERROR);
    }
	
    if ((dispose_mask & ADD_FRONTEND_MASK)
		&& (Sc_main_cb->sc_state == SC_OPERATIONAL))
    {
	CSget_scb((CS_SCB **)&scb);

	scf_cb = scb->scb_sscb.sscb_scccb;
	scf_cb->scf_length = sizeof(*scf_cb);
	scf_cb->scf_type = SCF_CB_TYPE;
	scf_cb->scf_facility = DB_PSF_ID;
	scf_cb->scf_len_union.scf_blength = (u_i4) length;
	scf_cb->scf_ptr_union.scf_buffer = message;
	scf_cb->scf_session = (SCF_SESSION)scb;  /* print to current session */

	scf_status = scf_call(SCC_TRACE, scf_cb);

	if (scf_status != E_DB_OK)
	{
	    TRdisplay(
		"SCA_TRACE: SCF error %d. displaying query text to user\n",
		scf_cb->scf_error.err_code);
	}
    }

    if ((dispose_mask & (ADD_TRACE_FILE_MASK | ADD_ERROR_LOG_MASK))
	    == ADD_TRACE_FILE_MASK)
    {
	TRdisplay("%t\n", length, message);
    }

    if (dispose_mask & ADD_ERROR_LOG_MASK)
    {
	sc0ePut(NULL, E_SC0318_SCA_USER_TRACE, 0, 1, length, message);
    }
    return(E_DB_OK);
}
Пример #7
0
/*{
** Name: check_list	 -  Return the number of outstanding
**			    I/O's on this thread.
**
** Description:
**	Checks for oustanding I/O's on this thread.
**
** Inputs:
**	None.
**      
** Outputs:
**      None.
**
** Returns:
**      i4 		   Number of outstanding I/O's.
**
** Exceptions:
**      none
**
** Side Effects:
**      None.
**
** History:
**	19-May-1999 (jenjo02)
**	    Created.
*/
static i4
check_list(void)
{
    DI_TGIO	*tgio;
    CS_SCB	*scb;

    CSget_scb(&scb);

    if ( (tgio = (DI_TGIO *)scb->cs_ditgiop) &&
	  tgio->tgio_scb == scb )
    {
	return(tgio->tgio_queued);
    }
    return(0);
}
Пример #8
0
/*
**  Name: CM_Initialize      - Starts the Connection Manager
**
**  Description:
**      Initialize the Connection Manager so it can accept Connect & Drop
**	During initialization the Connection Manager calls the HSH facility
**	HSH in turn allocates memory which will be freed when
**	CM_Termainte is called.  Otherwise a memory leak  will occur.
**
**  Inputs:
**
**  Outputs:
**      err		- Pointer to a error block
**
**  Returns:
**      OK if successful.
**
**  History:
**      25-Feb-98 (shero03)
**          Created.
**	01-Apr-1999 (shero03)
**	    Fixed compiler errors after api change
*/
II_EXTERN STATUS II_EXPORT
IIcm_Initialize (PTR err)
{
    i4 		flag;
    CS_SCB	*scb;

    if (pCM_cb)
        return(E_AP0006_INVALID_SEQUENCE);

     pCM_cb = &CM_cb;

    CM_cb.cm_conn_ctr = 0;
    CM_cb.cm_reuse_ctr = 0;
    CM_cb.cm_drop_ctr = 0;
    CM_cb.cm_cleanup_ctr = 0;
    CM_cb.cm_flags = 0;
    CM_cb.cm_api_disconnect = NULL;
    CM_cb.cm_libq_disconnect = NULL;

    CSget_scb(&scb);
    if (scb && CS_is_mt())
       CM_cb.cm_flags |= CM_FLAG_MULTITHREADED;
    /* FIXME - make the time interval user modifiable */
    CM_cb.cm_tm_next_purge = TMsecs() + 5 * 60;	/* 5 min	*/

    /* Initialize the Hash Indexes */
    flag = HSH_SHARED | HSH_VARIABLE | HSH_STRKEY;
    CM_cb.cm_db_index = HSH_New(NUM_KEY_INDEX, CM_KEY_OFF,
    				sizeof(CM_KEY_BLK) + 20,
    			 	1, MAXI4, "CM_DB_SEM", flag);
    CM_cb.cm_userid_index = HSH_New(NUM_KEY_INDEX, CM_KEY_OFF,
    				sizeof(CM_KEY_BLK) + 8,
    			 	1, MAXI4, "CM_USERID_SEM", flag);
    flag = HSH_SHARED;
    CM_cb.cm_used_conn_index = HSH_New(NUM_CONN_INDEX, CM_CONN_OFF_HNDL,
    				sizeof(CM_CONN_BLK),
    			 	1, MAXI4, "CM_CONN_USED_SEM", flag);
    CM_cb.cm_free_conn_index = HSH_New(NUM_CONN_INDEX, CM_CONN_OFF_DB,
    				sizeof(CM_CONN_BLK),
    			 	0, 0, "CM_CONN_FREE_SEM", flag);

    return IIAPI_ST_SUCCESS;
}		/* proc  CM_Initialize */
Пример #9
0
STATUS
CS_mod_session(char *uns_decimal_session, CS_SCB ** scbp)
{
	u_i4            scb_as_ulong;
	CS_SCB         *an_scb;
	CS_SCB         *scan_scb;
	STATUS          stat;
	CS_SCB         *this_scb;
        bool           blFound = FALSE;

	CSget_scb(&this_scb);
	CS_str_to_uns(uns_decimal_session, &scb_as_ulong);
	an_scb = (CS_SCB *) scb_as_ulong;

	for (scan_scb = Cs_srv_block.cs_known_list->cs_next;
	     scan_scb && scan_scb != Cs_srv_block.cs_known_list;
	     scan_scb = scan_scb->cs_next) {
		if (scan_scb == an_scb)
                {
                        blFound = TRUE;
			break;
                }
	}

	if ((!blFound) && ((an_scb = CS_find_scb(an_scb)) == 0)) {
		/* FIXME -- real error status */
		/* "Invalid session id" */

		stat = MO_NO_INSTANCE;
	} else if ((MEcmp(an_scb->cs_username, this_scb->cs_username,
			  sizeof(an_scb->cs_username)))) {
		/* FIXME -- better error */
		/* "Superuser or owner status required to modify sessions" */

		stat = MO_NO_WRITE;
	} else {
		*scbp = an_scb;
		stat = OK;
	}
	return (stat);
}
Пример #10
0
/*{
** Name: scs_disable	- disable AIC delivery
**
** Description:
**      This function temporarily disables AIC delivery for the entire 
**      session.  SCS_ENABLE's and SCS_DISABLE's do not nest. 
** 
**      In the event that a facility disables AIC's, all AIC's for 
**      that facility will be queued and delivered when AIC's are 
**      enabled again.
**
** Inputs:
**      SCS_DISABLE                     op code to scf_call()
**      scf_cb                          no fields used for input
**
** Outputs:
**      scf_cb
**          .error
**              .err_code               E_SC_OK or...
**                  E_SC0011_NESTED_DISABLE attempt to disable when
**					    already disabled.
**
**	Returns:
**	    E_DB_{OK, ERROR, FATAL}
**	Exceptions:
**	    none
**
** Side Effects:
**	    Note:  PAINE's are never disabled (otherwise they wouldn't be a pain)
**
** History:
**	20-Jan-86 (fred)
**          Created on Jupiter
**	29-Jun-1993 (daveb)
**	    correctly cast arg to CSget_scb().
**	2-Jul-1993 (daveb)
**	    prototyped.
*/
DB_STATUS
ascs_disable(SCF_CB *scf_cb )
{
    SCD_SCB		*scb;


    scb = Sc_main_cb->sc_proxy_scb;
    if (!scb)
	CSget_scb((CS_SCB **)&scb);
    if (scb->scb_sscb.sscb_cfac >= 0)
    {
	scb->scb_sscb.sscb_cfac = -scb->scb_sscb.sscb_cfac;
	scf_cb->scf_error.err_code = E_SC_OK;
	return(E_DB_OK);
    }
    else
    {
	scf_cb->scf_error.err_code = E_SC0011_NESTED_DISABLE;
	return(E_DB_WARN);
    }
}
Пример #11
0
static STATUS
CS_mod_session( char *uns_decimal_session, CS_SCB **scbp )
{
    u_i4 scb_as_ulong;
    CS_SID an_sid;
    CS_SCB *an_scb;
    STATUS stat;
    CS_SCB *this_scb;
    
    CSget_scb(&this_scb);

    CS_str_to_uns( uns_decimal_session, &scb_as_ulong );
    an_sid = (CS_SID)scb_as_ulong;
    
    an_scb = CS_find_scb(an_sid);

    if (an_scb == NULL)
    {
	/* FIXME -- real error status */
	/* "Invalid session id" */

	stat = MO_NO_INSTANCE;
    }
    else if ((MEcmp(an_scb->cs_username, this_scb->cs_username,
		    sizeof(an_scb->cs_username))))
    {
	/* FIXME -- better error */
	/* "Superuser or owner status required to modify sessions" */

	stat = MO_NO_WRITE;
    }
    else
    {
	*scbp = an_scb;
	stat = OK;
    }
    return( stat );
}
Пример #12
0
/*{
** Name: force_list -  Force all outstanding writes to disc.
**
** Description:
**	This routine attempts to write all outstanding requests.
**
** Inputs:
**	none
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
** Returns:
**      STATUS		     The outcome of the write requests.
** Exceptions:
**      none
**
** Side Effects:
**      None.
**
** History:
**	19-May-1999 (jenjo02)
**	    Created.
*/
static STATUS
force_list( CL_ERR_DESC *err_code )
{
    DI_TGIO 	*tgio;
    STATUS 	big_status = OK;
    CS_SCB 	*scb;

    CSget_scb(&scb);

    if ( (tgio = (DI_TGIO *)scb->cs_ditgiop) &&
	  tgio->tgio_scb == scb &&
	  tgio->tgio_queued )
    {
	/* Write all remaining on queue */
	big_status = do_writev( tgio, err_code );

	/* Clear the thread's TGIO pointer, make it available to others */
	tgio->tgio_scb->cs_ditgiop = (PTR)NULL;
	tgio->tgio_state = TGIO_INACTIVE;
    }

    return( big_status );
}
Пример #13
0
/*
** Name: thread_main
**
** Description:
**	controling function for main CUT test thread. This function creates the
**	other test threads and also creates the communications buffers which
**	are used to coordinate with the other threads.
**
** Inputs:
**	ftx	Thread execution control block
**
** Outputs:
**	None
**
** Returns:
**	None
**
** History:
**	2-jul-98 (stephenb)
**	    Created
*/
static DB_STATUS
thread_main(SCF_FTX	*ftx)
{
    CUT_RCB		cut_buf[NUM_BUFS];
    CUT_RCB		*buf_ptr[NUM_BUFS];
    CUT_BUF_INFO	cut_buf_info;
    DB_STATUS		status;
    PTR			thread;
    PTR			buf_handle;
    DB_ERROR		error;
    SCF_CB		scf;
    SCD_SCB		*scb;
    SCF_FTC		ftc;
    BUF_LIST		*buf_list;
    i4			num_threads = 3;
    COMM_BUF		comm_buf;
    CS_SID		thread_id[2];
    i4			num_cells;
    STATUS		cl_stat;
    i4			i;
    PTR			buf_dat;
    char		child_name1[] = "<CUT Test Child 1>";
    char		child_name2[] = "<CUT Test Child 2>";

    CSget_scb((CS_SCB**)&scb);
    
    /*
    ** Initialize
    */
    STcopy("CUT test, read buffer", cut_buf[READ_BUF].cut_buf_name);
    cut_buf[READ_BUF].cut_cell_size = 10;
    cut_buf[READ_BUF].cut_num_cells = 10;
    cut_buf[READ_BUF].cut_buf_use = CUT_BUF_READ;
    
    STcopy("CUT test, write buffer", cut_buf[WRITE_BUF].cut_buf_name);
    cut_buf[WRITE_BUF].cut_cell_size = 10;
    cut_buf[WRITE_BUF].cut_num_cells = 5;
    cut_buf[WRITE_BUF].cut_buf_use = CUT_BUF_WRITE;

    STcopy("CUT test, command buffer", cut_buf[COMMAND_BUF].cut_buf_name);
    cut_buf[COMMAND_BUF].cut_cell_size = sizeof(comm_buf);
    cut_buf[COMMAND_BUF].cut_num_cells = 2;
    cut_buf[COMMAND_BUF].cut_buf_use = CUT_BUF_WRITE;

    TRdisplay("%@ CUT test: Master: Initializing...\n");
    status = cut_thread_init(&thread, &error);
    if (status != E_DB_OK)
    {
	if (error.err_code > E_CUF_INTERNAL)
	    sc0e_uput(error.err_code, 0, 0, 0, (PTR)0, 0, (PTR)0, 0, 
		(PTR)0, 0, (PTR)0, 0, (PTR)0, 0, (PTR)0 );
	TRdisplay("%@ CUT test: Master: Error initializing thread in CUT, \
test terminated\n");
	return(E_DB_ERROR);
    }

    /*
    ** Create buffers
    */
    for (i = 0; i < NUM_BUFS; i++)
    {
	buf_ptr[i] = &cut_buf[i];
    }
    status = cut_create_buf(NUM_BUFS, buf_ptr, &error);
    if (status != E_DB_OK)
    {
	if (error.err_code > E_CUF_INTERNAL)
	    sc0e_uput(error.err_code, 0, 0, 0, (PTR)0, 0, (PTR)0, 0, 
		(PTR)0, 0, (PTR)0, 0, (PTR)0, 0, (PTR)0 );
	TRdisplay("%@ CUT test: Master: Error creating CUT buffers, \
test terminated\n");
	(VOID)cut_thread_term(TRUE, &error);
	return(E_DB_ERROR);
    }

    /*
    ** get data area
    */
    buf_dat = MEreqmem(0, 100, TRUE, &cl_stat);
    if (cl_stat != OK || buf_dat == NULL)
    {
	sc0e_0_put(E_SC0023_INTERNAL_MEMORY_ERROR, 0);
	TRdisplay("%@ CUT Test: Master: Can't get memory, test terminated\n");
	(VOID)cut_thread_term(TRUE, &error);
	return(E_DB_ERROR);
    }

    /*
    ** Load up buffer list
    */
    buf_list = (BUF_LIST *)MEreqmem(0, (NUM_BUFS + 1) * sizeof(BUF_LIST),
	TRUE, &cl_stat);
    if (cl_stat != OK || buf_list == NULL)
    {
	sc0e_0_put(E_SC0023_INTERNAL_MEMORY_ERROR, 0);
	TRdisplay("%@ CUT Test: Master: Can't get memory, test terminated\n");
	(VOID)cut_thread_term(TRUE, &error);
	return(E_DB_ERROR);
    }
    buf_list[READ_BUF].buf_use = CUT_BUF_WRITE;
    buf_list[READ_BUF].buf_handle = cut_buf[READ_BUF].cut_buf_handle;
    buf_list[WRITE_BUF].buf_use = CUT_BUF_READ;
    buf_list[WRITE_BUF].buf_handle = cut_buf[WRITE_BUF].cut_buf_handle;
    buf_list[COMMAND_BUF].buf_use = CUT_BUF_READ;
    buf_list[COMMAND_BUF].buf_handle = cut_buf[COMMAND_BUF].cut_buf_handle;
    /* null terminated list */
    buf_list[NUM_BUFS].buf_use = 0;
    buf_list[NUM_BUFS].buf_handle = NULL;

    /*
    ** create threads and pass buffer address
    */
    scf.scf_type = SCF_CB_TYPE;
    scf.scf_length = sizeof(SCF_CB);
    scf.scf_session = DB_NOSESSION;
    scf.scf_facility = DB_SCF_ID;
    scf.scf_ptr_union.scf_ftc = &ftc;
    
    ftc.ftc_facilities = SCF_CURFACILITIES;
    ftc.ftc_data = (PTR)buf_list;
    ftc.ftc_data_length = (NUM_BUFS + 1) * sizeof(BUF_LIST);
    ftc.ftc_thread_name = child_name1;
    ftc.ftc_priority = SCF_CURPRIORITY;
    ftc.ftc_thread_entry = thread_child;
    ftc.ftc_thread_exit = NULL;

    status = scs_atfactot(&scf, scb);
    if (status != E_DB_OK)
    {
	TRdisplay("%@ CUT Test: Master: Can't create child threads, \
exiting tests...\n");
	(VOID)cut_thread_term(TRUE, &error);
	return(E_DB_ERROR);
    }
Пример #14
0
/*{
** Name: gather_list -  Gather write requests together.
**
** Description:
**	This routine batches up write requests for later submission via     
**	the writev() routine.
**
** Inputs:
**	DI_GIO * gio	  - gio Control block for write operation.
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
**    Returns:
**          OK
**          DI_MEREQMEN_ERR       - MEreqmem failed.
**    Exceptions:
**        none
**
** Side Effects:
**        Will call do_writev if number of write requests has reached
**        GIO_MAX_QUEUED.
**
** History:
**	19-May-1999 (jenjo02)
**	    Created.
**	09-Jul-1999 (jenjo02)
**	    Watch I/O queue as it's being constructed. If pre-ordered,
**	    skip the quicksort.
**	25-Aug-2005 (schka24)
**	    Don't blindly lru-open each request;  instead, see if the
**	    file (DI_IO) is already on the queue, and share its fd with the
**	    queued request.  This is essential when doing fd-per-thread,
**	    as each call to lru-open would allocate a new fd, thus negating
**	    the ability to do a writev!  When not doing fd-per-thread,
**	    this change is effectively a no-op.
**	    Also, return without queueing if queue-flush fails.
**	25-Jan-2006 (jenjo02)
**	    Defer lru-open until do_writev to prevent hogging FDs
**	    while waiting for futhur writes.
*/
static STATUS
gather_list( DI_GIO *gio, i4 *uqueued, CL_ERR_DESC *err_code)
{
    DI_GIO	*qgio;			/* a GIO on the queue already */
    DI_TGIO     *tgio;
    STATUS 	status = OK;
    CS_SCB	*scb;

    CSget_scb(&scb);

    if ( (tgio = (DI_TGIO *)scb->cs_ditgiop) == (DI_TGIO *)NULL ||
	  tgio->tgio_scb != scb )
    {
	/*
	** No TGIO for this thread, so reuse an inactive one
	** or allocate a new one.
	*/
	CS_synch_lock( &GWthreadsSem );

	for ( tgio = GWthreads; 
	      tgio && tgio->tgio_state != TGIO_INACTIVE;
	      tgio = tgio->tgio_next );
    
	if (tgio == NULL)
	{
	    tgio = (DI_TGIO *)MEreqmem(0,
		    sizeof( DI_TGIO ),
		      TRUE, NULL);
	    if (tgio == NULL)
	    {
		CS_synch_unlock( &GWthreadsSem );
		return( DI_MEREQMEM_ERR);
	    }
	    tgio->tgio_next = GWthreads;
	    GWthreads = tgio;
	}

	scb->cs_ditgiop = (PTR)tgio;
	tgio->tgio_scb = scb;
	tgio->tgio_uqueued = uqueued;
	*tgio->tgio_uqueued = tgio->tgio_queued = 0;

	tgio->tgio_state = TGIO_ACTIVE;

	CS_synch_unlock( &GWthreadsSem );
    }

    /* If the queue is full, force the writes.
    ** If this fails, we get blamed, but someone has to report it.
    */
    if ( tgio->tgio_queued == GIO_MAX_QUEUED )
    {
	status = do_writev( tgio, err_code );
	if (status != OK)
	    return (status);
    }

    /*
    ** Check for out of sequence GIO.
    ** If all I/O's are presented in file/offset order,
    ** a sort won't be needed.
    */
    if ( (tgio->tgio_state & TGIO_UNORDERED) == 0 && tgio->tgio_queued )
    {
	qgio = tgio->tgio_queue[tgio->tgio_queued - 1];
	
	if ( gio->gio_f < qgio->gio_f ||
	    (gio->gio_f == qgio->gio_f &&
	     gio->gio_offset < qgio->gio_offset) )
	{
	    tgio->tgio_state |= TGIO_UNORDERED;
	}
    }

    /* Add this request to the queue */
    tgio->tgio_queue[tgio->tgio_queued++] = gio;
    
    /* Update caller's queued count */
    *tgio->tgio_uqueued = tgio->tgio_queued;

    return( status );
}
Пример #15
0
VOID
CS_sampler(void)
{
    CS_SCB	*an_scb;
    i4	sleeptime;
    i4	cs_thread_type;
    i4	cs_state;

    /*
    ** This thread goes into a loop:
    **	1. Lock the sampler block
    **	2. Do sampling
    **	3. Sleep for the specified interval
    ** The thread will exit normally when the sampler block pointer is NULL.
    ** The thread exits abnormally if it cannot lock the block.
    */

    for (;;)
    {
        CSget_scb(&an_scb);
        if ( CsSamplerBlkPtr == NULL ||
                (an_scb->cs_mask & CS_DEAD_MASK) ||
                (an_scb->cs_mask & CS_IRPENDING_MASK))
        {
            return;
        }

        ++CsSamplerBlkPtr->numsamples;   /* Count the number of times we sample */

        /* Loop thru all the SCBs in the server */
        for (an_scb = Cs_srv_block.cs_known_list->cs_next;
                an_scb && an_scb != Cs_srv_block.cs_known_list;
                an_scb = an_scb->cs_next)
        {
            /* skip the sampler thread and the monitor thread */
            if (an_scb == &samp_scb ||
                    (an_scb->cs_mask & CS_MNTR_MASK) )
                continue;

            if (an_scb->cs_thread_type >= -1 &&
                    an_scb->cs_thread_type <= MAXSAMPTHREADS - 1)
                cs_thread_type = an_scb->cs_thread_type;
            else
                cs_thread_type = MAXSAMPTHREADS - 1;	/* use the <invalid> type */

            if (an_scb->cs_state >= 0 &&
                    an_scb->cs_state <= MAXSTATES - 1)
                cs_state = an_scb->cs_state;
            else
                cs_state = MAXSTATES - 1;

            switch (cs_state)
            {
                i4  facility;

            case CS_COMPUTABLE:
                if ( an_scb->cs_mask == CS_MUTEX_MASK )
                {
                    /* really waiting on a mutex */
                    ++CsSamplerBlkPtr->
                    Thread[cs_thread_type].numthreadsamples;
                    ++CsSamplerBlkPtr->             /* count current state */
                    Thread[cs_thread_type].state[CS_MUTEX];
                    AddMutex( ((CS_SEMAPHORE *)an_scb->cs_sync_obj),
                              cs_thread_type );
                }
                else
                {
                    ++CsSamplerBlkPtr->
                    Thread[cs_thread_type].numthreadsamples;
                    ++CsSamplerBlkPtr->		/* count current state */
                    Thread[cs_thread_type].state[cs_state];
                    facility = (*Cs_srv_block.cs_facility)(an_scb);
                    if (facility >= MAXFACS || facility < 0)
                        facility = MAXFACS - 1;
                    ++CsSamplerBlkPtr->		/* count current facility */
                    Thread[cs_thread_type].facility[facility];
                }
                break;

            case CS_FREE:
            case CS_STACK_WAIT:
            case CS_UWAIT:
                ++CsSamplerBlkPtr->
                Thread[cs_thread_type].numthreadsamples;
                ++CsSamplerBlkPtr->		/* count current state */
                Thread[cs_thread_type].state[cs_state];
                break;

            case CS_EVENT_WAIT:
                ++CsSamplerBlkPtr->
                Thread[cs_thread_type].numthreadsamples;
                ++CsSamplerBlkPtr->		/* count current state */
                Thread[cs_thread_type].state[cs_state];
                switch (cs_thread_type)
                {
                case CS_USER_THREAD:
                    ++CsSamplerBlkPtr->numusereventsamples;
                    ++CsSamplerBlkPtr->		/* count event type */
                    userevent[(an_scb->cs_memory & CS_DIO_MASK ?
                               0 :
                               an_scb->cs_memory & CS_BIO_MASK ?
                               1 :
                               an_scb->cs_memory & CS_LOCK_MASK ?
                               2 :
                               an_scb->cs_memory & CS_LOG_MASK ?
                               3 :
                               an_scb->cs_memory & CS_LGEVENT_MASK ?
                               4 :
                               an_scb->cs_memory & CS_LKEVENT_MASK ?
                               5 :
                               /* else it is ...   unknown */
                               6)];
                    break;
                default:
                    ++CsSamplerBlkPtr->numsyseventsamples;
                    ++CsSamplerBlkPtr->		/* count event type */
                    sysevent[(an_scb->cs_memory & CS_DIO_MASK ?
                              0 :
                              an_scb->cs_memory & CS_BIO_MASK ?
                              1 :
                              an_scb->cs_memory & CS_LOCK_MASK ?
                              2 :
                              an_scb->cs_memory & CS_LOG_MASK ?
                              3 :
                              an_scb->cs_memory & CS_LGEVENT_MASK ?
                              4 :
                              an_scb->cs_memory & CS_LKEVENT_MASK ?
                              5 :
                              /* else it is ...   unknown */
                              6)];
                    break;
                } /* switch (cs_thread_type) */
                break;

            case CS_MUTEX:
                ++CsSamplerBlkPtr->
                Thread[cs_thread_type].numthreadsamples;
                ++CsSamplerBlkPtr->		/* count current state */
                Thread[cs_thread_type].state[cs_state];
                AddMutex( ((CS_SEMAPHORE *)an_scb->cs_sync_obj),
                          cs_thread_type );
                break;

            case CS_CNDWAIT:
                ++CsSamplerBlkPtr->
                Thread[cs_thread_type].numthreadsamples;
                ++CsSamplerBlkPtr->		/* count current state */
                Thread[cs_thread_type].state[cs_state];
                break;

            default:
                ++CsSamplerBlkPtr->
                Thread[cs_thread_type].numthreadsamples;
                ++CsSamplerBlkPtr->		/* count "invalid" state */
                Thread[cs_thread_type].state[MAXSTATES - 1];
                break;
            } /* switch (cs_state) */
        } /* for */


        sleeptime = CsSamplerBlkPtr->interval;

        CS_realtime_update_smclock();

        CSms_thread_nap( sleeptime );
    } /* for (;;) */

} /* CS_sampler */
Пример #16
0
/*{
** Name: DI_inproc_write -   writes page(s) to a file on disk.
**
** Description:
**	This routine was created to make DIwrite more readable once
**	error checking had been added. See DIwrite for comments.
**
** Inputs:
**      f                    Pointer to the DI file
**                           context needed to do I/O.
**	diop		     Pointer to dilru file context.
**      buf                  Pointer to page(s) to write.
**      page                 Value indicating page(s) to write.
**	num_of_pages	     number of pages to write
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
**    Returns:
**          OK
**	    other errors.
**    Exceptions:
**        none
**
** Side Effects:
**        none
**
** History:
**    30-nov-1992 (rmuth)
**	    Created.
**    03-jun-1996 (canor01)
**	    Note in the scb that this is a DI wait.
**    05-May-1997 (merja01)
**      Changed preprocessor stmt for pwrite.  Not all platforms
**      using OS_THREADS have a pwrite function.  This function
**      seems to only be available on Solaris 2.4 where async IO
**      is not yet supported.
**    14-July-1997 (schte01)
**      For those platforms that do direct i/o (where the
**      seek and the write are separate functions), do not release and
**      reaquire the semaphore on the DI_IO block. This will protect
**      against i/o being done by a different thread in between the 
**      lseek and the write.
**    14-Aug-1997 (schte01)    
**      Add xCL_DIRECT_IO as a condition to the 14-July-1997 change
**      instead of the test for !xCL_ASYNCH_IO.
**	22-Dec-1998 (jenjo02)
**	    If DI_FD_PER_THREAD is defined, call IIdio_write() instead of
**	    pwrite().
**	01-oct-1998 (somsa01)
**	    Return DI_NODISKSPACE when we are out of disk space.
**  01-Apr-2004 (fanch01)
**      Add O_DIRECT support on Linux depending on the filesystem
**      properties, pagesize.  Fixups for misaligned buffers on read()
**      and write() operations.
**    13-apr-04 (toumi01)
**	Move stack variable declaration to support "standard" C compilers.
**	29-Jan-2005 (schka24)
**	    Ditch attempt to gather diow timing stats, not useful in
**	    the real world and generates excess syscalls on some platforms.
**	15-Mar-2006 (jenjo02)
**	    io_sem is not needed with thread affinity.
**	6-Nov-2009 (kschendel) SIR 122757
**	    Make io-sem a SYNCH, avoid entirely if PRIVATE.
**	    Delete copy-to-align, caller is supposed to do it now.
**	    Don't attempt SCB updating if not backend.
*/
static STATUS
DI_inproc_write(
    DI_IO	*f,
    DI_OP	*diop,
    char        *buf,
    i4	page,
    i4	num_of_pages,
    CL_ERR_DESC *err_code )
{
    STATUS	status = OK;
    CS_SCB	*scb;
    i4		saved_state;

    /* unix variables */
    int		bytes_written;
    int		bytes_to_write;
    OFFSET_TYPE lseek_offset;
    /* 
    ** seek to place to write 
    */
    lseek_offset = 
	(OFFSET_TYPE)(f->io_bytes_per_page) * (OFFSET_TYPE)page;

    bytes_to_write = (f->io_bytes_per_page * (num_of_pages));

    if (Di_backend)
    {
	CSget_scb(&scb);
	if ( scb )
	{
	    saved_state = scb->cs_state;
	    scb->cs_state = CS_EVENT_WAIT;

	    if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	    {
		scb->cs_memory = CS_LIOW_MASK;
		scb->cs_liow++;
		Cs_srv_block.cs_wtstatistics.cs_liow_done++;
		Cs_srv_block.cs_wtstatistics.cs_liow_waits++;
		Cs_srv_block.cs_wtstatistics.cs_liow_kbytes
		    += bytes_to_write / 1024;
	    }
	    else
	    {
		scb->cs_memory = CS_DIOW_MASK;
		scb->cs_diow++;
		Cs_srv_block.cs_wtstatistics.cs_diow_done++;
		Cs_srv_block.cs_wtstatistics.cs_diow_waits++;
		Cs_srv_block.cs_wtstatistics.cs_diow_kbytes
		    += bytes_to_write / 1024;
	    }
	}
    }

# if  defined(OS_THREADS_USED) && defined(xCL_NO_ATOMIC_READ_WRITE_IO)
    if ( !Di_thread_affinity && (f->io_fprop & FPROP_PRIVATE) == 0)
	CS_synch_lock( &f->io_sem );
# endif /* OS_THREADS_USED && !xCL_NO_ATOMIC_READ_WRITE_IO */

# if  defined(OS_THREADS_USED) && !defined(xCL_NO_ATOMIC_READ_WRITE_IO)
    bytes_written =
#ifdef LARGEFILE64
 	 pwrite64( (int)diop->di_fd, buf, bytes_to_write, lseek_offset );
#else /*  LARGEFILE64 */
 	 pwrite( (int)diop->di_fd, buf, bytes_to_write, lseek_offset );
#endif /* LARGEFILE64 */
# else /* OS_THREADS_USED  !xCL_NO_ATOMIC_READ_WRITE_IO */
    bytes_written =
 	 IIdio_write( (int)diop->di_fd, buf, bytes_to_write,
 	 	       lseek_offset, 0, 
		       f->io_fprop,
		       err_code );
# endif /* OS_THREADS_USED */

    if ( bytes_written != bytes_to_write )
    {
	SETCLERR(err_code, 0, ER_write);

	switch( err_code->errnum )
	{
	case EFBIG:
	    status = DI_BADEXTEND;
	    break;
	case ENOSPC:
	    status = DI_NODISKSPACE;
	    break;
#ifdef EDQUOTA
	case EDQUOT:
	    status = DI_EXCEED_LIMIT;
	    break;
#endif
	default:
	    if (err_code->errnum == 0)
		status = DI_ENDFILE;
	    else
		status = DI_BADWRITE;
	    break;
	}
    }

# if  defined(OS_THREADS_USED) && defined(xCL_NO_ATOMIC_READ_WRITE_IO)
    if ( !Di_thread_affinity && (f->io_fprop & FPROP_PRIVATE) == 0)
	CS_synch_unlock( &f->io_sem );
# endif /* OS_THREADS_USED && xCL_NO_ATOMIC_READ_WRITE_IO */

    if ( Di_backend && scb )
    {

	scb->cs_memory &= ~(CS_DIOW_MASK | CS_LIOW_MASK);
	scb->cs_state = saved_state;
    }

    return( status );
}
Пример #17
0
/*{
** Name: DI_async_write -   writes page(s) to a file on disk.
**
** Description:
**	This routine was created to interface with async io routines
**	where such routines are available
**
** Inputs:
**      f                    Pointer to the DI file
**                           context needed to do I/O.
**	diop		     Pointer to dilru file context.
**      buf                  Pointer to page(s) to write.
**      page                 Value indicating page(s) to write.
**	num_of_pages	     number of pages to write
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
**    Returns:
**          OK
**	    other errors.
**    Exceptions:
**        none
**
** Side Effects:
**        none
**
** History:
**    20-jun-1995 (amo ICL)
**	    Created.
**	01-oct-1998 (somsa01)
**	    Return DI_NODISKSPACE when we are out of disk space.
*/
static STATUS
DI_async_write(
    DI_IO	*f,
    DI_OP	*diop,
    char        *buf,
    i4	page,
    i4	num_of_pages,
    CL_ERR_DESC *err_code )
{
    STATUS	status = OK;
    int		errnum;
    CS_SCB	*scb;
    int		saved_state;
    i4	start_time, elapsed;

    /* unix variables */
    OFFSET_TYPE lseek_offset;
    int		bytes_written;
    int		bytes_to_write;

    /* 
    ** seek to place to write 
    */
    lseek_offset = (OFFSET_TYPE)f->io_bytes_per_page * (OFFSET_TYPE)page;
    bytes_to_write = (f->io_bytes_per_page * (num_of_pages));

    CSget_scb(&scb);
    if ( scb )
    {
	saved_state = scb->cs_state;
	scb->cs_state = CS_EVENT_WAIT;

	if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	{
	    scb->cs_memory = CS_LIOW_MASK;
	    scb->cs_liow++;
	    Cs_srv_block.cs_wtstatistics.cs_liow_done++;
	    Cs_srv_block.cs_wtstatistics.cs_liow_waits++;
	    Cs_srv_block.cs_wtstatistics.cs_liow_kbytes
		+= bytes_to_write / 1024;
	}
	else
	{
	    scb->cs_memory = CS_DIOW_MASK;
	    scb->cs_diow++;
	    Cs_srv_block.cs_wtstatistics.cs_diow_done++;
	    Cs_srv_block.cs_wtstatistics.cs_diow_waits++;
	    Cs_srv_block.cs_wtstatistics.cs_diow_kbytes
		+= bytes_to_write / 1024;
	}
	start_time = CS_checktime();
    }

# if defined(OS_THREADS_USED) && !defined(xCL_ASYNC_IO)
    bytes_written =
 	 DI_thread_rw( O_WRONLY, diop, buf, bytes_to_write,
 	 	       lseek_offset, (long*)0, err_code);
# else /* OS_THREADS_USED */
    bytes_written =
 	 DI_aio_rw( O_WRONLY, diop, buf, bytes_to_write,
 	 	       lseek_offset, (long*)0, err_code);
# endif /* OS_THREADS_USED */
    if ( bytes_written != bytes_to_write )
    {
	SETCLERR(err_code, 0, ER_write);

	switch( err_code->errnum )
	{
	case EFBIG:
	    status = DI_BADEXTEND;
	    break;
	case ENOSPC:
	    status = DI_NODISKSPACE;
	    break;
#ifdef EDQUOTA
	case EDQUOT:
	    status = DI_EXCEED_LIMIT;
	    break;
#endif
	default:
	    if (err_code->errnum == 0)
		status = DI_ENDFILE;
	    else
		status = DI_BADWRITE;
	    break;
	}

    }

    if ( scb )
    {
	elapsed = CS_checktime() - start_time;

	scb->cs_memory &= ~(CS_DIOW_MASK | CS_LIOW_MASK);
	scb->cs_state = saved_state;
	if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	    Cs_srv_block.cs_wtstatistics.cs_liow_time +=
		elapsed;
	else
	    Cs_srv_block.cs_wtstatistics.cs_diow_time +=
		elapsed;
    }

    return( status );
}
Пример #18
0
/*{
** Name: DI_inproc_read -   read page(s) from a file on disk.
**
** Description:
**	This routine was created to make DIread more readable once
**	error checking had been added. See DIread for comments.
**
** Inputs:
**      f                    Pointer to the DI file
**                           context needed to do I/O.
**	diop		     Pointer to dilru file context.
**      buf                  Pointer to page(s) to read.
**      page                 Value indicating page(s) to read.
**	num_of_pages	     number of pages to read
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
**    Returns:
**          OK
**	    other errors.
**    Exceptions:
**        none
**
** Side Effects:
**        none
**
** History:
**    30-nov-1992 (rmuth)
**	    Created.
**    03-jun-1996 (canor01)
**	    Note in the scb that this is a DI wait.
**    14-July-1997 (schte01)
**      For those platforms that do direct i/o (where the
**      seek and the read are separate functions), do not release and
**      reaquire the semaphore on the DI_IO block. This will protect
**      against i/o being done by a different thread in between the 
**      lseek and the read.
**    14-Aug-1997 (schte01)    
**      Add xCL_DIRECT_IO as a condition to the 14-July-1997 change
**      instead of the test for !xCL_ASYNCH_IO.
**	22-Dec-1998 (jenjo02)
**	    If DI_FD_PER_THREAD is defined, call IIdio_read() instead of
**	    pread().
**  01-Apr-2004 (fanch01)
**      Add O_DIRECT support on Linux depending on the filesystem
**      properties, pagesize.  Fixups for misaligned buffers on read()
**      and write() operations.
**    13-apr-04 (toumi01)
**	Move stack variable declaration to support "standard" C compilers.
**	29-Jan-2005 (schka24)
**	    Ditch attempt to gather dior timing stats, not useful in
**	    the real world and generates excess syscalls on some platforms.
**	15-Mar-2006 (jenjo02)
**	    io_sem is not needed with thread affinity.
**	6-Nov-2009 (kschendel) SIR 122757
**	    Remove copy to aligned buffer, caller is supposed to do it.
*/
static STATUS
DI_inproc_read(
    DI_IO	*f,
    DI_OP	*diop,
    char        *buf,
    i4	page,
    i4	num_of_pages,
    i4	*n,
    CL_ERR_DESC *err_code )
{
    STATUS	status = OK;
    CS_SCB	*scb;
    i4		saved_state;

    /* unix variables */
    int		unix_fd;
    int		bytes_read = 0;
    int		bytes_to_read;
    OFFSET_TYPE	lseek_offset;

    /*
    ** Seek to place to read
    */
    lseek_offset  = (OFFSET_TYPE)f->io_bytes_per_page * (OFFSET_TYPE)page;

    bytes_to_read = f->io_bytes_per_page * num_of_pages;
    unix_fd = diop->di_fd;

    if (Di_backend)
    {
	CSget_scb(&scb);
	if ( scb )
	{
	    saved_state = scb->cs_state;
	    scb->cs_state = CS_EVENT_WAIT;

	    if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	    {
		scb->cs_memory = CS_LIOR_MASK;
		scb->cs_lior++;
		Cs_srv_block.cs_wtstatistics.cs_lior_done++;
		Cs_srv_block.cs_wtstatistics.cs_lior_waits++;
		Cs_srv_block.cs_wtstatistics.cs_lior_kbytes
		    += bytes_to_read / 1024;
	    }
	    else
	    {
		scb->cs_memory = CS_DIOR_MASK;
		scb->cs_dior++;
		Cs_srv_block.cs_wtstatistics.cs_dior_done++;
		Cs_srv_block.cs_wtstatistics.cs_dior_waits++;
		Cs_srv_block.cs_wtstatistics.cs_dior_kbytes
		    += bytes_to_read / 1024;
	    }
	}
    }

# if defined( OS_THREADS_USED ) && (defined (xCL_NO_ATOMIC_READ_WRITE_IO))
    if ( !Di_thread_affinity && (f->io_fprop & FPROP_PRIVATE) == 0)
    {
	CS_synch_lock( &f->io_sem );
    }
# endif /* OS_THREADS_USED && xCL_NO_ATOMIC_READ_WRITE_IO */

# if defined( OS_THREADS_USED ) && (! defined (xCL_NO_ATOMIC_READ_WRITE_IO))
#ifdef LARGEFILE64
    bytes_read = pread64( unix_fd, buf, bytes_to_read, lseek_offset );
#else /* LARGEFILE64 */
    bytes_read = pread( unix_fd, buf, bytes_to_read, lseek_offset );
#endif /* LARGEFILE64 */

    if ( bytes_read != bytes_to_read )
    {
	SETCLERR(err_code, 0, ER_read);
# else /* OS_THREADS_USED */

    bytes_read = IIdio_read( unix_fd, buf, bytes_to_read,
 	    			  lseek_offset, 0, 
				  f->io_fprop,
				  err_code );

    if ( bytes_read != bytes_to_read )
    {
# endif /* OS_THREADS_USED && ! xCL_NO_ATOMIC_READ_WRITE_IO */

	if (bytes_read == -1)
	{
	    status = DI_BADREAD;
	}
	else
	{
	    status = DI_ENDFILE;
	}
    }
# if defined( OS_THREADS_USED ) && (defined (xCL_NO_ATOMIC_READ_WRITE_IO) )
    if ( !Di_thread_affinity && (f->io_fprop & FPROP_PRIVATE) == 0)
	CS_synch_unlock( &f->io_sem );
# endif /* OS_THREADS_USED && xCL_NO_ATOMIC_READ_WRITE_IO */

    if (Di_backend)
    {
	if ( scb )
	{
	    scb->cs_memory &= ~(CS_DIOR_MASK | CS_LIOR_MASK);
	    scb->cs_state = saved_state;
	}
    }

    if ( bytes_read > 0 )
	*n = bytes_read / f->io_bytes_per_page;

    return(status);
}

# if defined(OS_THREADS_USED) || defined(xCL_ASYNC_IO)
/*{
** Name: DI_async_read -   read page(s) asynchronously from a file on disk.
**
** Description:
**	This routine was created to interface with async io routines
**	where such routines are available.
**
** Inputs:
**      f                    Pointer to the DI file
**                           context needed to do I/O.
**	diop		     Pointer to dilru file context.
**      buf                  Pointer to page(s) to read.
**      page                 Value indicating page(s) to read.
**	num_of_pages	     number of pages to read
**      
** Outputs:
**      err_code             Pointer to a variable used
**                           to return operating system 
**                           errors.
**    Returns:
**          OK
**	    other errors.
**    Exceptions:
**        none
**
** Side Effects:
**        none
**
** History:
**    20-jun-1995 (amo ICL)
**	    Created.
*/
static STATUS
DI_async_read(
    DI_IO	*f,
    DI_OP	*diop,
    char        *buf,
    i4	page,
    i4	num_of_pages,
    i4	*n,
    CL_ERR_DESC *err_code )
{
    STATUS	status = OK;
    CS_SCB	*scb;
    int		saved_state;
    i4 		start_time;

    /* unix variables */
    int		bytes_read = 0;
    int		bytes_to_read;
    OFFSET_TYPE	lseek_offset;

    /*
    ** Seek to place to read
    */
    lseek_offset  = (OFFSET_TYPE)(f->io_bytes_per_page) * (OFFSET_TYPE)(page);
    bytes_to_read = f->io_bytes_per_page * num_of_pages;

    CSget_scb(&scb);
    if ( scb )
    {
	saved_state = scb->cs_state;
	scb->cs_state = CS_EVENT_WAIT;

	if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	{
	    scb->cs_memory = CS_LIOR_MASK;
	    scb->cs_lior++;
	    Cs_srv_block.cs_wtstatistics.cs_lior_done++;
	    Cs_srv_block.cs_wtstatistics.cs_lior_waits++;
	    Cs_srv_block.cs_wtstatistics.cs_lior_kbytes
		+= bytes_to_read / 1024;
	}
	else
	{
	    scb->cs_memory = CS_DIOR_MASK;
	    scb->cs_dior++;
	    Cs_srv_block.cs_wtstatistics.cs_dior_done++;
	    Cs_srv_block.cs_wtstatistics.cs_dior_waits++;
	    Cs_srv_block.cs_wtstatistics.cs_dior_kbytes
		+= bytes_to_read / 1024;
	}
	/* Clock the read */
	start_time = CS_checktime();
    }

# if defined(OS_THREADS_USED) && !defined(xCL_ASYNC_IO)
    bytes_read = DI_thread_rw( O_RDONLY, diop, buf, bytes_to_read,
 	    			      lseek_offset, NULL, err_code);
# else /* OS_THREADS_USED */
    bytes_read = DI_aio_rw( O_RDONLY, diop, buf, bytes_to_read,
 	    			  lseek_offset, NULL, err_code);
# endif /* OS_THREADS_USED */
    if ( bytes_read != bytes_to_read )
    {
	SETCLERR(err_code, 0, ER_read);

	if (bytes_read == -1)
	{
	    status = DI_BADREAD;
	}
	else
	{
	    status = DI_ENDFILE;
	}
    }

    if ( scb )
    {
	scb->cs_memory &= ~(CS_DIOR_MASK | CS_LIOR_MASK);
	scb->cs_state = saved_state;
	if (f->io_open_flags & DI_O_LOG_FILE_MASK)
	    Cs_srv_block.cs_wtstatistics.cs_lior_time 
		+= CS_checktime() - start_time;
	else
	    Cs_srv_block.cs_wtstatistics.cs_dior_time
		+= CS_checktime() - start_time;
    }

    if ( bytes_read > 0 )
	*n = bytes_read / f->io_bytes_per_page;

    return(status);
}