/**
 * This function is for deregistering the client for logging algo tuning
 * variable
 *
 * @param[in] client_handle_ptr client handle pointer 
 * @return ADSPResult, retuns success or failure
 *
 */
ADSPResult algo_data_logging_deregister(algo_data_logging_t *algo_data_logging_ptr,
                                        uint32_t client_handle)
{
   client_list_t          *cur_client_list_ptr  = NULL;
   client_list_t          *prev_client_list_ptr = NULL;
   algo_log_client_info_t *log_info_ptr         = NULL;
   bool_t                 is_dereg_done         = FALSE;

   cur_client_list_ptr = (client_list_t *)algo_data_logging_ptr->client_list_ptr;
   if(NULL == cur_client_list_ptr)
   {
      MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: client list ptr is NULL, failed to dereg client :0x%lx",
            client_handle);
      return ADSP_EFAILED;
   }

   //find the client and remove the same
   while(cur_client_list_ptr)
   {
      log_info_ptr = (algo_log_client_info_t *)cur_client_list_ptr->element;

      if(log_info_ptr->client_handle == client_handle)
      {
         if(NULL == prev_client_list_ptr) // If first node is removed
         {
            algo_data_logging_ptr->client_list_ptr = cur_client_list_ptr->next;
         }
         else
         {
            prev_client_list_ptr->next = cur_client_list_ptr->next;
         }

         // commit the existing log buffer
         algo_log_commit_buf(log_info_ptr);
         qurt_elite_memory_free(log_info_ptr->log_buf_ptr);
         qurt_elite_memory_free(log_info_ptr);
         qurt_elite_memory_free(cur_client_list_ptr);
         is_dereg_done = TRUE;
         break;
      }

      prev_client_list_ptr = cur_client_list_ptr;
      cur_client_list_ptr = cur_client_list_ptr->next;
   }

   if(!is_dereg_done)
   {
      MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: failed to dereg client :0x%lx, not in client list",
            client_handle);
      return ADSP_EFAILED;
   }

   algo_data_logging_ptr->client_counter--;
   return ADSP_EOK;
}
ADSPResult HwdDevCfg_GetVirtAddress(qurt_mem_region_t *pQmem_region, uint32 phyAddr, uint32 regSize, uint32 *pVirtAddr)
{
   ADSPResult status = ADSP_EOK;
   qurt_mem_pool_t hwio_pool;

   memset((void *)pQmem_region,0,sizeof(qurt_mem_region_t));

   size_t   page_size = (size_t) regSize;
   qurt_mem_cache_mode_t cache_attribs =  QURT_MEM_CACHE_NONE;
   qurt_perm_t perm = (qurt_perm_t) (QURT_PERM_READ | QURT_PERM_WRITE | QURT_PERM_EXECUTE);

   if ( ADSP_EOK !=  (status = qurt_mem_map_static_query((qurt_addr_t *)pVirtAddr,
                              phyAddr,
                              page_size,
                              cache_attribs,
                              perm)))
   {
      if(ADSP_EOK != (status = qurt_mem_pool_attach("ADSP_DRIVER_POOL", &hwio_pool)))
      {
         MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "HwdDevCfg_GetVirtAddress: attach to driver pool failed\n");	  
      }
      else
      {
         qurt_mem_region_attr_t hwio_attr;

         qurt_mem_region_attr_init( &hwio_attr);
         qurt_mem_region_attr_set_cache_mode( &hwio_attr, QURT_MEM_CACHE_NONE);
         qurt_mem_region_attr_set_mapping  ( &hwio_attr, QURT_MEM_MAPPING_PHYS_CONTIGUOUS );
         qurt_mem_region_attr_set_physaddr ( &hwio_attr, phyAddr );

         /* create the memory region */
         if (ADSP_EOK != (status = qurt_mem_region_create(pQmem_region,
                               regSize,
                               hwio_pool,
                              &hwio_attr )))
         {
            MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "HwdDevCfg_GetVirtAddress: create mem region failed\n");
            return status;			
         }

         if (ADSP_EOK != (status = qurt_mem_region_attr_get(*pQmem_region, &hwio_attr)))
         {
            MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "HwdDevCfg_GetVirtAddress: get mem region attr failed\n");	  		 
         }
         else
         {
            unsigned int addr = 0;
            qurt_mem_region_attr_get_virtaddr(&hwio_attr, &addr);
            *pVirtAddr = (uint32) addr;
         }
	   }
   }
   else
   {
      MSG_1(MSG_SSID_QDSP6, DBG_HIGH_PRIO, "HwdDevCfg_GetVirtAddress: already mampped statically 0x%x\n", (unsigned int)phyAddr);  
   }
   return status;
}
/*===========================================================================
FUNCTION DSMBIT_BIT_COPY

DESCRIPTION
  Copies bitwise from one item into another.

DEPENDENCIES
  Only copies from one item into one item are supported.

PARAMETERS
  src_ptr     - DSM item to copy from
  dst_ptr     - DSM item to copy into
  src_offset  - Bit offset to start copying from
  dst_offset  - Bit offset to start copying into
  len         - Number of bits to copy

RETURN VALUE
  Number of bits copied

SIDE_EFFECTS

===========================================================================*/
uint16 dsmbiti_bit_copy
(
  dsm_item_type*  src_ptr,
  dsm_item_type*  dst_ptr,
  uint16          src_offset,
  uint16          dst_offset,
  uint16          len,
  const char * file,
  uint32 line
)
{
  int i;
  /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

  /* Sanity check on the item pointers */
  if ( (src_ptr == NULL) || (dst_ptr == NULL) )
  {
    MSG(MSG_SSID_DFLT, MSG_LEGACY_ERROR,"NULL dsm item pointer");
    return 0;
  }

  /* Sanity chack that we copy within the items */
  if ( ((((src_offset + len) / 8) + 1) > src_ptr->used)
       ||
       ((((dst_offset + len) / 8) + 1) > DSMI_SIZE(dst_ptr) ) ) 
  {
    MSG_1(MSG_SSID_DFLT, MSG_LEGACY_ERROR,"Copy length %d exceeds item size", len);
    return 0;
  }

  /* Copy each bit */
  for ( i=0; i<len; i++ )
  {
    /* Null out destination bit */
    *(dst_ptr->data_ptr + ((dst_offset + i) / 8)) &= 
      ~dsmbit_bit_copy_mask[(dst_offset + i) % 8];

    /* Or source bit into the very spot */
    *(dst_ptr->data_ptr + ((dst_offset + i) / 8)) |= 
      ((*(src_ptr->data_ptr + ((src_offset + i) / 8))
        & dsmbit_bit_copy_mask[(src_offset + i) % 8]) 
       >> (7 - ((src_offset + i) % 8)))
      << (7 - ((dst_offset + i) % 8));
  }

  /* Update the destination item's used field */
  dst_ptr->used = ((dst_offset + len) / 8) + (((dst_offset + len) % 8) * 1);

#ifdef FEATURE_DSM_MEM_CHK
  dsmi_touch_item(dst_ptr,file,line);
#endif

  return len;
}
/**
 * This function deinitializes logging for algo tuning variables
 *
 *
 */
void algo_data_logging_deinit(algo_data_logging_t **algo_data_logging_pptr)
{
   if(!(algo_data_logging_pptr && *algo_data_logging_pptr))
   {
      MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: Null pointer!");
   }
   else
   {
      if((*algo_data_logging_pptr)->client_counter)
      {
         MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: %u clients not yet deregistered, failed!",
               (*algo_data_logging_pptr)->client_counter);
      }

      qurt_elite_memory_free(*algo_data_logging_pptr);
      *algo_data_logging_pptr = NULL;
   }
}
ADSPResult HwdDevCfg_DestoryVirtAddress(qurt_mem_region_t qmem_region)
{
   ADSPResult status = ADSP_EOK;

   if(qmem_region)
   {
      if(QURT_EOK != (status = qurt_mem_region_delete(qmem_region)))
      {
         MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "HwdDevCfg_DestoryVirtAddress: Failed to delete memory region [status:%d]", status);
      }
   }
   else
   {
      MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "HwdDevCfg_DestoryVirtAddress: no qmem_region to destory");
   }

   return status;
}
/*=========================================================================*/
static void sns_init_once( void )
{
  int      i;
  INT8U    err;
  OS_FLAGS flags = 0;

  const sns_init_fcn init_ptrs[] = SNS_INIT_FUNCTIONS;

  if ( SNS_SUCCESS != sns_heap_init()) {
      MSG(MSG_SSID_SNS, DBG_ERROR_PRIO, "Sensors Heap Init failed, using Default heap ID");
      sns_heap_id = QURT_ELITE_HEAP_DEFAULT;
  }

  sns_init_flag_grp = sns_os_sigs_create( SNS_INIT_FLAG_DONE, &err );

  SNS_ASSERT(NULL != sns_init_flag_grp);

  for( i = 0; NULL != init_ptrs[i]; i++ ) {
    //MSG_1(MSG_SSID_QDSP6, DBG_HIGH_PRIO, "Sensors Init : %d", i);
    if( SNS_SUCCESS != init_ptrs[i]() ) {
      /* Handle error */
      //MSG_1(MSG_SSID_QDSP6, DBG_HIGH_PRIO, "Sensors Init FAIL: %d", i);
      sns_init_done();
    }

    while( !(SNS_INIT_FLAG_DONE & flags) ) {
      /* Continue polling for the flag until module init is done */
      flags = sns_os_sigs_pend( sns_init_flag_grp,
                                SNS_INIT_FLAG_DONE,
                                OS_FLAG_WAIT_SET_ANY,
                                0,
                                &err );
      MSG_1(MSG_SSID_QDSP6, DBG_HIGH_PRIO, "Sensors Init : waiting(%x)", flags);
    }
    flags = 0;
  }

  MSG(MSG_SSID_QDSP6, DBG_HIGH_PRIO, "Sensors Init : ///////////init once completed///////////");
}
/*===========================================================================
FUNCTION diagcomm_smd_dsm_high_mem_event_cb

DESCRIPTION
   This function gets triggered and disables flow, when the high item count 
   for DSM_DIAG_SMD_TX_ITEM_POOL is reached.
   (DSM_DIAG_SMD_TX_ITEM_POOL is used by smd, to receive data.)
   
===========================================================================*/
static void diagcomm_smd_dsm_high_mem_event_cb (dsm_mempool_id_type pool_id,
                                                dsm_mem_level_enum_type mem_level,
                                                dsm_mem_op_enum_type mem_op)
{
   uint32 flow_cnt = 0;
   
   /* Check to be defensive */
   if(pool_id != DSM_DIAG_SMD_TX_ITEM_POOL || 
      mem_level != DSM_MEM_LEVEL_MANY ||
      mem_op != DSM_MEM_OP_NEW)
      return;

   if (TRUE == bAllowFlow)
   {
      diagcomm_io_disable_flow( &diagcomm_io_conn[DIAGCOMM_PORT_SMD][DIAGCOMM_PORT_1] );
      diagcomm_io_disable_flow( &diagcomm_io_conn[DIAGCOMM_PORT_SMD][DIAGCOMM_PORT_2] );

      bAllowFlow = FALSE;
      flow_cnt = diagcomm_incr_flow_ctrl_count(DIAGCOMM_PORT_SMD);
      MSG_1(MSG_SSID_DIAG, MSG_LEGACY_HIGH, "diagcomm_smd_dsm_high_mem_event_cb: disabled flow (cnt=%d)", flow_cnt);
   }

} /* diagcomm_smd_dsm_high_mem_event_cb */
Esempio n. 8
0
/*===========================================================================

FUNCTION  RFLL1X_SLEEP

DESCRIPTION
  This function completes transition of RF hardware to sleep state by
  turning off the Rx and Tx LDOs.

DEPENDENCIES
  rfll1x_sleep() must have already been called.
  
RETURN VALUE
  None

SIDE EFFECTS
  None

===========================================================================*/
uint32 rfll1x_sleep  //TODO_7600
(
  rfll_device_desc_type *dev_desc,      /*lint -e818 Device descriptor 
                                          could be declared as const */
  const void *user_data_ptr,            /* Pointer to user data passed into
                                         callback */
  rfcom_cb_handler_type cb_handler      /* Call back handler or NULL */  
)
{
  
  rf_chain_state_type *rf_chain_0 = &rf_chain_status[rfcom_to_path[RFCOM_TRANSCEIVER_0]];
  rf_chain_state_type *rf_chain_1 = &rf_chain_status[rfcom_to_path[RFCOM_RECEIVER_1]];
  rf_chain_state_type *dev_chain = &rf_chain_status[ rfcom_to_path[dev_desc->device]];

  if (user_data_ptr==NULL) 
  {
	  // TODO_7600: CDMA 1X flag parameter (FALSE do nothing)
	  return 0;
  }
  /* Put RF to sleep */
  if( dev_desc->device == RFCOM_RECEIVER_DIV || dev_desc->device == RFCOM_RECEIVER_DUAL)
  {
      if(rf_chain_0->rf_state == RF_STATE_CDMA_RX)
            rf_sleep_cdma1x(RFCOM_TRANSCEIVER_0);
      else
            MSG_1( MSG_SSID_RF, MSG_LEGACY_ERROR,
                   "rfll1x_sleep, RF Primary Chain in invalid state: %d",
                   rf_chain_0->rf_state );

      if(rf_chain_1->rf_state == RF_STATE_CDMA_RX ||
           #ifndef RF_HAS_BYPASS_RF_STATE_GPS_CHECK
           rf_chain_1->rf_state == RF_STATE_GPS ||    
           #endif
           rf_chain_1->rf_state == RF_STATE_DIVERSITY)
           rf_sleep_cdma1x(RFCOM_RECEIVER_1);
      else

           MSG_1( MSG_SSID_RF, MSG_LEGACY_ERROR,
                  "rfll1x_sleep, RF Secondary Chain in invalid state: %d", 
                  rf_chain_1->rf_state);

  }
  else if(dev_desc->device == RFCOM_TRANSCEIVER_0)
  {
      if(dev_chain->rf_state == RF_STATE_CDMA_RXTX)
      {
          rf_tx_shutdown();
          rf_sleep_cdma1x(dev_desc->device);
      }
      else if(dev_chain->rf_state == RF_STATE_CDMA_RX)
      {
          rf_sleep_cdma1x(dev_desc->device);
      }
      else

          MSG_2( MSG_SSID_RF, MSG_LEGACY_ERROR,
                 "rfll1x_sleep, RF Chain:%d in invalid state: %d",
                 dev_desc->device,dev_chain->rf_state); 
  }
  else
  {
      if(dev_chain->rf_state == RF_STATE_CDMA_RX)
          rf_sleep_cdma1x(dev_desc->device);
      else
          MSG_2( MSG_SSID_RF, MSG_LEGACY_ERROR,
                 "rfll1x_sleep, RF Chain:%d in invalid state: %d",
                 dev_desc->device,dev_chain->rf_state); 
  }
  
  return 0;
} /* rfll1x_stay_asleep() */
/**
 * This function does the logging for algo tuning variables
 *
 * @param[in] log_buffer Ptr to the buffer to be logged. 
 * @param[in] log_size logging size in bytes 
 * @param[in] client_handle_ptr client handle pointer 
 *  
 */
ADSPResult algo_data_log_now(algo_data_logging_t *algo_data_logging_ptr,
                             int8_t *log_buffer,
                             uint32_t log_size,
                             uint32_t client_handle)
{
   uint32_t log_buf_size;
   uint32_t log_size_used = 0;
   int8_t *log_buf_ptr = NULL;

   algo_log_client_info_t *algo_log_client_info_ptr = NULL;

   if(NULL == (algo_log_client_info_ptr = algo_find_client_info(algo_data_logging_ptr, client_handle)))
   {
      MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: algo client is not found 0x%lx", client_handle);
      return ADSP_EFAILED;
   }

   /* Validate the log size */
   if(log_size != algo_log_client_info_ptr->log_size)
   {
      MSG_2(MSG_SSID_QDSP6, DBG_ERROR_PRIO,
            "Algo Log: log size registered %ld is different from log size %ld being logged",
            algo_log_client_info_ptr->log_size, log_size);
      return ADSP_EFAILED;
   }

   if(NULL != algo_log_client_info_ptr->log_buf_ptr)
   {
      log_buf_size  = algo_log_client_info_ptr->log_buf_size;
      log_size_used = algo_log_client_info_ptr->log_size_used;
      log_buf_ptr   = algo_log_client_info_ptr->log_buf_ptr;

      if((log_size_used + log_size) <= log_buf_size)
      {
         //dest buffer will have the free space of  (log_buf_size-log_size_used)
         memscpy(log_buf_ptr + (log_size_used), (log_buf_size - log_size_used), log_buffer,
               log_size);

         /* Update used log buffer size */
         algo_log_client_info_ptr->log_size_used += log_size;

         /* If all the log buffer size has been used, do commit the log buffer. */
         if(algo_log_client_info_ptr->log_size_used >= (log_buf_size))
         {
            algo_log_commit_buf(algo_log_client_info_ptr);
         }
      }
      else
      {
         algo_log_commit_buf(algo_log_client_info_ptr);
         //dest buffer will have the free space of  (log_buf_size-log_size_used)

         /* algo_log_client_info_ptr->log_size_used is reset to 0 inside commit func */
         memscpy(log_buf_ptr, log_buf_size, log_buffer, log_size);

         /* Update used log buffer size */
         algo_log_client_info_ptr->log_size_used += log_size;
      }
   }

   return ADSP_EOK;
}
/**
 * This function is for registering for logging algo tuning variabls
 *
 * @param[in] log_interval_in_ms logging interval in milli sec 
 * @param[in] log_size logging size in bytes
 * @param[in] client_handle_ptr pointer to the client handle
 *  	 pointer
 * @return ADSPResult, retuns success or failure
 *
 */
ADSPResult algo_data_logging_register(algo_data_logging_t *algo_data_logging_ptr,
                                      uint16_t log_interval_in_ms,
                                      uint16_t log_commit_interval_in_ms,
                                      uint32_t log_size,
                                      uint32_t log_code,
                                      uint32_t *client_handle_ptr)
{
   client_list_t          *list_node                = NULL;
   algo_log_client_info_t *algo_log_client_info_ptr = NULL;
   int8_t                 *log_buf_ptr              = NULL;
   client_list_t          **client_list_pptr        = NULL;
   uint32_t               log_size_for_max_algo_interval;

   // return failure if log size is zero
   if(!log_size)
   {
      MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: Non zero logging size need to be provided %ld",
            log_size);
      return ADSP_EUNSUPPORTED;
   }

   // return failure if the log interval size exceeds max log interval
   if(log_interval_in_ms > log_commit_interval_in_ms)
   {
      MSG_1(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Algo Log: log interval is greater than the max log interval allowed %d",
            log_interval_in_ms);
      return ADSP_EUNSUPPORTED;
   }

   log_size_for_max_algo_interval = ((log_commit_interval_in_ms / log_interval_in_ms) * log_size);

   // Adding client node
   list_node = (client_list_t *)qurt_elite_memory_malloc((sizeof(client_list_t)), QURT_ELITE_HEAP_DEFAULT);
   if(NULL == list_node)
   {
      MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Out of memory creating client list node.");
      return ADSP_ENOMEMORY;
   }
   //update the client info here
   algo_log_client_info_ptr = (algo_log_client_info_t *)qurt_elite_memory_malloc(
         sizeof(algo_log_client_info_t), QURT_ELITE_HEAP_DEFAULT);
   if(NULL == algo_log_client_info_ptr)
   {
      MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, "Out of memory creating afe algo client list node.");
      qurt_elite_memory_free(list_node);
      return ADSP_ENOMEMORY;
   }

   list_node->element = (void*)algo_log_client_info_ptr;
   list_node->next = NULL;

   memset(algo_log_client_info_ptr, 0, sizeof(algo_log_client_info_t));

   log_buf_ptr = (int8_t *)qurt_elite_memory_malloc(log_size_for_max_algo_interval, QURT_ELITE_HEAP_DEFAULT);
   if(NULL == log_buf_ptr)
   {
      MSG(MSG_SSID_QDSP6, DBG_ERROR_PRIO, " Error allocating memory for algo log buffer");
      qurt_elite_memory_free(list_node);
      qurt_elite_memory_free(algo_log_client_info_ptr);
      return ADSP_ENOMEMORY;
   }

   algo_log_client_info_ptr->client_handle      = (uint32_t)algo_log_client_info_ptr;
   algo_log_client_info_ptr->log_buf_ptr        = log_buf_ptr;
   algo_log_client_info_ptr->log_buf_size       = log_size_for_max_algo_interval;
   algo_log_client_info_ptr->log_interval_in_ms = log_interval_in_ms;
   algo_log_client_info_ptr->log_size           = log_size;
   algo_log_client_info_ptr->log_size_used      = 0;
   algo_log_client_info_ptr->log_code           = (elite_qxdm_log_code)log_code;

   client_list_pptr = (client_list_t **)&algo_data_logging_ptr->client_list_ptr;

   // Adding the client to the front
   if(*client_list_pptr)
   {
      list_node->next = *client_list_pptr;
   }
   *client_list_pptr = list_node;

   *client_handle_ptr = algo_log_client_info_ptr->client_handle;
   algo_data_logging_ptr->client_counter++;

   return ADSP_EOK;
}
/*===========================================================================

FUNCTION DIAGCOMM_BUFFER_ENQUEUE_STREAM

DESCRIPTION
This function enqueues data onto the sio_tx_wmq depending on the current mode.  
 
PARAMETERS
    item_ptr - pointer to the DSM item that has to be enqueued
    port_type - Control channel port type (SIO/SMD)
    stream_id - Specifies which stream should the data be enqueued
  
RETURN VALUE
  None
  
===========================================================================*/
void diagcomm_buffer_enqueue_stream( dsm_item_type ** item_ptr, 
									 diagcomm_port_type port_type,
								     uint8 stream_id )
{
  dsm_item_type * dsm_dequeue_item_ptr = NULL;
  dsm_item_type * dsm_chained_ptr = NULL;
  uint32 dsm_cnt = 0;
  uint32 dsm_dropped_cnt = 0;
  uint32 bytes_dropped_cnt = 0;
  uint32 dsm_alloc_cnt = 0;
  uint32 bytes_alloc_cnt = 0;
  uint8 stream_index = 0;

  if( (stream_id < DIAG_MIN_STREAM_ID) || (stream_id > DIAG_MAX_STREAM_ID) )
  {
    MSG_1(MSG_SSID_DIAG, MSG_LEGACY_ERROR, "diagcomm_buffer_enqueue_stream - Invalid stream_id (%d)", stream_id);
    return;
  }

  stream_index = DIAG_ID_TO_INDEX(stream_id);
    
  /* Dequeue and count dropped DSMs only in Buffered Cir mode. */
  if( diag_tx_mode[stream_index].mode == DIAG_TX_MODE_BUFFERED_CIR )
  {
    dsm_cnt = diag_buffering_pool_used_cnt( stream_id );
	diag_tx_mode[stream_index].cur_dsm_cnt[port_type] = dsm_cnt; //Save dsm count read

	if( DIAG_BUFFERED_DSM_CNT_TO_BYTES(dsm_cnt) >= diag_tx_mode[stream_index].buffered_many_bytes_mark )
	{
	  if( port_type == DIAGCOMM_PORT_SMD )
	    dsm_dequeue_item_ptr = diagcomm_io_dequeue_rx_wmq( &diagcomm_io_conn[port_type][DIAGCOMM_PORT_1] );
	  else if( port_type == DIAGCOMM_PORT_SIO )
	    dsm_dequeue_item_ptr = diagcomm_io_dequeue_tx_wmq( &diagcomm_io_conn[port_type][DIAGCOMM_PORT_1], 0 );
		
	  ASSERT( dsm_dequeue_item_ptr != NULL );
	  
	  //bytes_dropped_cnt += dsm_length_packet( dsm_dequeue_item_ptr );
	  dsm_chained_ptr = dsm_dequeue_item_ptr;
	  do
      {  
	    dsm_dropped_cnt += 1;
		bytes_dropped_cnt += dsm_chained_ptr->used;
		
		// Point to next DSM in the chain 
        dsm_chained_ptr = dsm_chained_ptr->pkt_ptr;
		
        // Continue to count chained DSMs 
	  } while( dsm_chained_ptr != NULL );
      //} while( (dsm_chained_ptr != NULL) && (dsm_chained_ptr->data_ptr != NULL) && (dsm_chained_ptr->used > 0) );
	  
	  // Update dropped count health statistics 
	  if( (diag_tx_mode[stream_index].dsm_dropped_cnt + dsm_dropped_cnt) >= MAX_VALUE_UINT32 )
	    diag_tx_mode[stream_index].dsm_dropped_cnt = MAX_VALUE_UINT32;
	  else
	    diag_tx_mode[stream_index].dsm_dropped_cnt += dsm_dropped_cnt;
	  
	  if( (diag_tx_mode[stream_index].byte_dropped_cnt + bytes_dropped_cnt) >= MAX_VALUE_UINT32 )
	    diag_tx_mode[stream_index].byte_dropped_cnt = MAX_VALUE_UINT32;
	  else
        diag_tx_mode[stream_index].byte_dropped_cnt += bytes_dropped_cnt;
	  
	  // Free the dequeued DSM item, including it's chained items 
	  dsm_free_packet( &dsm_dequeue_item_ptr );
	}
  }

  /* We don't drop/dequeue DSMs in Buffered Threshold mode. Just count alloc
     counts and enqueue. */
  if( (diag_tx_mode[stream_index].mode == DIAG_TX_MODE_BUFFERED_THRESH) || 
      (diag_tx_mode[stream_index].mode == DIAG_TX_MODE_BUFFERED_CIR) )
  {
	//bytes_alloc_cnt += dsm_length_packet( *item_ptr );
	dsm_chained_ptr = *item_ptr;
	do
	{
	   dsm_alloc_cnt += 1;
	   bytes_alloc_cnt += dsm_chained_ptr->used;
	   
	   // Point to next DSM in the chain 
       dsm_chained_ptr = dsm_chained_ptr->pkt_ptr;
	   
	  // Continue to count chained DSMs 
	} while( dsm_chained_ptr != NULL );
	
	// Update alloc count health statistics 
	if( (diag_tx_mode[stream_index].dsm_alloc_cnt + dsm_alloc_cnt) >= MAX_VALUE_UINT32 )
	  diag_tx_mode[stream_index].dsm_alloc_cnt = MAX_VALUE_UINT32;
	else
	  diag_tx_mode[stream_index].dsm_alloc_cnt += dsm_alloc_cnt;
	  
	if( (diag_tx_mode[stream_index].byte_alloc_cnt + bytes_alloc_cnt) >= MAX_VALUE_UINT32 )
	  diag_tx_mode[stream_index].byte_alloc_cnt = MAX_VALUE_UINT32;
	else
	  diag_tx_mode[stream_index].byte_alloc_cnt += bytes_alloc_cnt;
	#ifdef DIAG_SIO_SUPPORT
	if( port_type == DIAGCOMM_PORT_SMD )
	  dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.sio_params.params.rx_queue, item_ptr);
	else if( port_type == DIAGCOMM_PORT_SIO )
	  dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.sio_params.params.tx_queue, item_ptr);
	#elif defined (DIAG_SMDL_SUPPORT)
	  dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.smdl_params.tx_queue, item_ptr);
	#endif
	
  }
  else
  {
    /* Streaming mode; Just enqueue it. */
	#ifdef DIAG_SIO_SUPPORT
	if( port_type == DIAGCOMM_PORT_SMD )
      dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.sio_params.params.rx_queue, item_ptr);
	else if( port_type == DIAGCOMM_PORT_SIO )
	  dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.sio_params.params.tx_queue, item_ptr);
	#elif defined (DIAG_SMDL_SUPPORT)
	   dsm_enqueue (diagcomm_io_conn[port_type][DIAGCOMM_PORT_1].open_params.smdl_params.tx_queue, item_ptr);
	#endif
  }

} /* diagcomm_buffer_enqueue_stream */