ER cre_mbx( ID mbxid, T_CMBX *pk_cmbx ) { register ITRON_Mailbox_Control *the_mailbox; CORE_message_queue_Attributes the_mailbox_attributes; if ( !pk_cmbx ) return E_PAR; if ((pk_cmbx->mbxatr & (TA_TPRI | TA_MPRI)) != 0 ) return E_RSATR; _Thread_Disable_dispatch(); /* protects object pointer */ the_mailbox = _ITRON_Mailbox_Allocate( mbxid ); if ( !the_mailbox ) { _Thread_Enable_dispatch(); return _ITRON_Mailbox_Clarify_allocation_id_error( mbxid ); } the_mailbox->count = pk_cmbx->bufcnt; if (pk_cmbx->mbxatr & TA_MPRI) the_mailbox->do_message_priority = TRUE; else the_mailbox->do_message_priority = FALSE; if (pk_cmbx->mbxatr & TA_TPRI) the_mailbox_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY; else the_mailbox_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( !_CORE_message_queue_Initialize( &the_mailbox->message_queue, OBJECTS_ITRON_MAILBOXES, &the_mailbox_attributes, the_mailbox->count, sizeof(T_MSG *), NULL ) ) { /* Multiprocessing not supported */ _ITRON_Mailbox_Free(the_mailbox); _ITRON_return_errorno( E_OBJ ); } _ITRON_Objects_Open( &_ITRON_Mailbox_Information, &the_mailbox->Object ); /* * If multiprocessing were supported, this is where we would announce * the existence of the semaphore to the rest of the system. */ #if defined(RTEMS_MULTIPROCESSING) #endif _ITRON_return_errorno( E_OK ); }
int _POSIX_Message_queue_Create_support( const char *name, int pshared, unsigned int oflag, struct mq_attr *attr_ptr, POSIX_Message_queue_Control **message_queue ) { POSIX_Message_queue_Control *the_mq; CORE_message_queue_Attributes *the_mq_attr; struct mq_attr attr; _Thread_Disable_dispatch(); /* * There is no real basis for the default values. They will work * but were not compared against any existing implementation for * compatibility. See README.mqueue for an example program we * think will print out the defaults. Report anything you find with it. */ if ( attr_ptr == NULL ) { attr.mq_maxmsg = 10; attr.mq_msgsize = 16; } else { if ( attr_ptr->mq_maxmsg < 0 ){ _Thread_Enable_dispatch(); set_errno_and_return_minus_one( EINVAL ); } if ( attr_ptr->mq_msgsize < 0 ){ _Thread_Enable_dispatch(); set_errno_and_return_minus_one( EINVAL ); } attr = *attr_ptr; } #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED && !( _Objects_MP_Allocate_and_open( &_POSIX_Message_queue_Information, 0, the_mq->Object.id, FALSE ) ) ) { _POSIX_Message_queue_Free( the_mq ); _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENFILE ); } #endif the_mq = _POSIX_Message_queue_Allocate(); if ( !the_mq ) { _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENFILE ); } the_mq->process_shared = pshared; the_mq->oflag = oflag; the_mq->named = TRUE; the_mq->open_count = 1; the_mq->linked = TRUE; /* XXX * * Note that thread blocking discipline should be based on the * current scheduling policy. */ the_mq_attr = &the_mq->Message_queue.Attributes; the_mq_attr->discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( ! _CORE_message_queue_Initialize( &the_mq->Message_queue, OBJECTS_POSIX_MESSAGE_QUEUES, the_mq_attr, attr.mq_maxmsg, attr.mq_msgsize, #if 0 && defined(RTEMS_MULTIPROCESSING) _POSIX_Message_queue_MP_Send_extract_proxy #else NULL #endif ) ) { #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED ) _Objects_MP_Close( &_POSIX_Message_queue_Information, the_mq->Object.id ); #endif _POSIX_Message_queue_Free( the_mq ); _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENOSPC ); } _Objects_Open( &_POSIX_Message_queue_Information, &the_mq->Object, (char *) name ); *message_queue = the_mq; #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED ) _POSIX_Message_queue_MP_Send_process_packet( POSIX_MESSAGE_QUEUE_MP_ANNOUNCE_CREATE, the_mq->Object.id, (char *) name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return 0; }
static mqd_t _POSIX_Message_queue_Create( const char *name_arg, size_t name_len, int oflag, const struct mq_attr *attr ) { POSIX_Message_queue_Control *the_mq; char *name; /* length of name has already been validated */ if ( attr->mq_maxmsg <= 0 ){ rtems_set_errno_and_return_value( EINVAL, MQ_OPEN_FAILED ); } if ( attr->mq_msgsize <= 0 ){ rtems_set_errno_and_return_value( EINVAL, MQ_OPEN_FAILED ); } the_mq = _POSIX_Message_queue_Allocate_unprotected(); if ( !the_mq ) { rtems_set_errno_and_return_value( ENFILE, MQ_OPEN_FAILED ); } /* * Make a copy of the user's string for name just in case it was * dynamically constructed. */ name = _Workspace_String_duplicate( name_arg, name_len ); if ( !name ) { _POSIX_Message_queue_Free( the_mq ); rtems_set_errno_and_return_value( ENOMEM, MQ_OPEN_FAILED ); } the_mq->open_count = 1; the_mq->linked = true; the_mq->oflag = oflag; /* * NOTE: That thread blocking discipline should be based on the * current scheduling policy. * * Joel: Cite POSIX or OpenGroup on above statement so we can determine * if it is a real requirement. */ if ( !_CORE_message_queue_Initialize( &the_mq->Message_queue, CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO, attr->mq_maxmsg, attr->mq_msgsize ) ) { _POSIX_Message_queue_Free( the_mq ); _Workspace_Free( name ); rtems_set_errno_and_return_value( ENOSPC, MQ_OPEN_FAILED ); } _Objects_Open_string( &_POSIX_Message_queue_Information, &the_mq->Object, name ); return the_mq->Object.id; }
epos_status_code epos_message_queue_create( epos_name name, uint32_t count, size_t max_message_size, epos_attribute attribute_set, epos_id *id ) { register Message_queue_Control *the_message_queue; CORE_message_queue_Attributes the_msgq_attributes; #if defined(RTEMS_MULTIPROCESSING) bool is_global; #endif if ( !epos_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( (is_global = _Attributes_Is_global( attribute_set ) ) && !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; #endif if ( count == 0 ) return RTEMS_INVALID_NUMBER; if ( max_message_size == 0 ) return RTEMS_INVALID_SIZE; #if defined(RTEMS_MULTIPROCESSING) #if 1 /* * I am not 100% sure this should be an error. * It seems reasonable to create a que with a large max size, * and then just send smaller msgs from remote (or all) nodes. */ if ( is_global && (_MPCI_table->maximum_packet_size < max_message_size) ) return RTEMS_INVALID_SIZE; #endif #endif _Thread_Disable_dispatch(); /* protects object pointer */ the_message_queue = _Message_queue_Allocate(); if ( !the_message_queue ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( is_global && !( _Objects_MP_Allocate_and_open( &_Message_queue_Information, name, the_message_queue->Object.id, false ) ) ) { _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_message_queue->attribute_set = attribute_set; if (_Attributes_Is_priority( attribute_set ) ) the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY; else the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( ! _CORE_message_queue_Initialize( &the_message_queue->message_queue, &the_msgq_attributes, count, max_message_size ) ) { #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Objects_MP_Close( &_Message_queue_Information, the_message_queue->Object.id); #endif _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_UNSATISFIED; } _Objects_Open( &_Message_queue_Information, &the_message_queue->Object, (Objects_Name) name ); *id = the_message_queue->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Message_queue_MP_Send_process_packet( MESSAGE_QUEUE_MP_ANNOUNCE_CREATE, the_message_queue->Object.id, name, 0 ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
int _POSIX_Message_queue_Create_support( const char *name_arg, size_t name_len, int pshared, struct mq_attr *attr_ptr, POSIX_Message_queue_Control **message_queue ) { POSIX_Message_queue_Control *the_mq; CORE_message_queue_Attributes *the_mq_attr; struct mq_attr attr; char *name; /* length of name has already been validated */ _Thread_Disable_dispatch(); /* * There is no real basis for the default values. They will work * but were not compared against any existing implementation for * compatibility. See README.mqueue for an example program we * think will print out the defaults. Report anything you find with it. */ if ( attr_ptr == NULL ) { attr.mq_maxmsg = 10; attr.mq_msgsize = 16; } else { if ( attr_ptr->mq_maxmsg <= 0 ){ _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( EINVAL ); } if ( attr_ptr->mq_msgsize <= 0 ){ _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( EINVAL ); } attr = *attr_ptr; } the_mq = _POSIX_Message_queue_Allocate(); if ( !the_mq ) { _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( ENFILE ); } /* * Make a copy of the user's string for name just in case it was * dynamically constructed. */ name = _Workspace_String_duplicate( name_arg, name_len ); if ( !name ) { _POSIX_Message_queue_Free( the_mq ); _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( ENOMEM ); } the_mq->process_shared = pshared; the_mq->named = true; the_mq->open_count = 1; the_mq->linked = true; /* * NOTE: That thread blocking discipline should be based on the * current scheduling policy. * * Joel: Cite POSIX or OpenGroup on above statement so we can determine * if it is a real requirement. */ the_mq_attr = &the_mq->Message_queue.Attributes; the_mq_attr->discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( !_CORE_message_queue_Initialize( &the_mq->Message_queue, the_mq_attr, attr.mq_maxmsg, attr.mq_msgsize ) ) { _POSIX_Message_queue_Free( the_mq ); _Workspace_Free(name); _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( ENOSPC ); } _Objects_Open_string( &_POSIX_Message_queue_Information, &the_mq->Object, name ); *message_queue = the_mq; _Thread_Enable_dispatch(); return 0; }