Example #1
0
struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
	      unsigned long in3, unsigned long in4, unsigned long in5,
	      unsigned long in6, unsigned long in7)
{
	struct ia64_sal_retval ret_stuff;
	unsigned long r9  = 0;
	unsigned long r10 = 0;
	long r11 = 0;
	long status;

	debugger_event(XEN_IA64_DEBUG_ON_SAL);

	status = 0;
	switch (index) {
	    case SAL_FREQ_BASE:
		if (likely(!running_on_sim))
			status = ia64_sal_freq_base(in1,&r9,&r10);
		else switch (in1) {
		      case SAL_FREQ_BASE_PLATFORM:
			r9 = 200000000;
			break;

		      case SAL_FREQ_BASE_INTERVAL_TIMER:
			r9 = 700000000;
			break;

		      case SAL_FREQ_BASE_REALTIME_CLOCK:
			r9 = 1;
			break;

		      default:
			status = -1;
			break;
		}
		break;
	    case SAL_PCI_CONFIG_READ:
		if (current->domain == dom0) {
			u64 value;
			// note that args 2&3 are swapped!!
			status = ia64_sal_pci_config_read(in1,in3,in2,&value);
			r9 = value;
		}
		else
		     printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
		break;
	    case SAL_PCI_CONFIG_WRITE:
		if (current->domain == dom0) {
			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
			    (in4 > 1) ||
			    (in2 > 8) || (in2 & (in2-1)))
				printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
					in1,in4,in2,in3);
			// note that args are in a different order!!
			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
		}
		else
		     printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
		break;
	    case SAL_SET_VECTORS:
 		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
 			if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
 				/* Sanity check: cs_length1 must be 0,
 				   second vector is reserved.  */
 				status = -2;
 			}
 			else {
				struct domain *d = current->domain;
				d->arch.sal_data->boot_rdv_ip = in2;
				d->arch.sal_data->boot_rdv_r1 = in3;
			}
 		}
 		else
		{
			if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
				BUG();
			sal_vectors[in1].vector_type	= in1;
			sal_vectors[in1].handler_addr1	= in2;
			sal_vectors[in1].gp1		= in3;
			sal_vectors[in1].handler_len1	= in4;
			sal_vectors[in1].handler_addr2	= in5;
			sal_vectors[in1].gp2		= in6;
			sal_vectors[in1].handler_len2	= in7;
		}
		break;
	    case SAL_GET_STATE_INFO:
		if (current->domain == dom0) {
			sal_queue_entry_t *e;
			unsigned long flags;
			struct smp_call_args_t arg;

			spin_lock_irqsave(&sal_queue_lock, flags);
			if (!sal_queue || list_empty(&sal_queue[in1])) {
				sal_log_record_header_t header;
				XEN_GUEST_HANDLE(void) handle =
					*(XEN_GUEST_HANDLE(void)*)&in3;

				IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
				               "no sal_queue entry found.\n",
				               rec_name[in1]);
				memset(&header, 0, sizeof(header));

				if (copy_to_guest(handle, &header, 1)) {
					printk("sal_emulator: "
					       "SAL_GET_STATE_INFO can't copy "
					       "empty header to user: 0x%lx\n",
					       in3);
				}
				status = IA64_SAL_NO_INFORMATION_AVAILABLE;
				r9 = 0;
				spin_unlock_irqrestore(&sal_queue_lock, flags);
				break;
			}
			e = list_entry(sal_queue[in1].next,
			               sal_queue_entry_t, list);

			list_del(&e->list);
			spin_unlock_irqrestore(&sal_queue_lock, flags);

			IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
			               "on CPU#%d.\n",
			               rec_name[e->sal_info_type],
			               rec_name[in1], e->cpuid);

			arg.type = e->sal_info_type;
			arg.target = in3;
			arg.corrected = !!((in1 != e->sal_info_type) && 
			                (e->sal_info_type == SAL_INFO_TYPE_MCA));
			arg.domain = current->domain;
			arg.status = 0;

			if (e->cpuid == smp_processor_id()) {
				IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
				get_state_info_on(&arg);
			} else {
				int ret;
				IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
				ret = smp_call_function_single(e->cpuid,
				                               get_state_info_on,
				                               &arg, 0, 1);
				if (ret < 0) {
					printk("SAL_GET_STATE_INFO "
					       "smp_call_function_single error:"
					       " %d\n", ret);
					arg.ret = 0;
					arg.status =
					     IA64_SAL_NO_INFORMATION_AVAILABLE;
				}
			}
			r9 = arg.ret;
			status = arg.status;
			if (r9 == 0) {
				xfree(e);
			} else {
				/* Re-add the entry to sal_queue */
				spin_lock_irqsave(&sal_queue_lock, flags);
				list_add(&e->list, &sal_queue[in1]);
				spin_unlock_irqrestore(&sal_queue_lock, flags);
			}
		} else {
Example #2
0
/**============================================================================
  @brief hdd_tx_fetch_packet_cbk() - Callback function invoked by TL to 
  fetch a packet for transmission.

  @param vosContext   : [in] pointer to VOS context  
  @param staId        : [in] Station for which TL is requesting a pkt
  @param ac           : [in] access category requested by TL
  @param pVosPacket   : [out] pointer to VOS packet packet pointer
  @param pPktMetaInfo : [out] pointer to meta info for the pkt 
  
  @return             : VOS_STATUS_E_EMPTY if no packets to transmit
                      : VOS_STATUS_E_FAILURE if any errors encountered 
                      : VOS_STATUS_SUCCESS otherwise
  ===========================================================================*/
VOS_STATUS hdd_tx_fetch_packet_cbk( v_VOID_t *vosContext,
                                    v_U8_t *pStaId,
                                    WLANTL_ACEnumType  ac,
                                    vos_pkt_t **ppVosPacket,
                                    WLANTL_MetaInfoType *pPktMetaInfo )
{
   VOS_STATUS status = VOS_STATUS_E_FAILURE;
   hdd_adapter_t *pAdapter = NULL;
   hdd_context_t *pHddCtx = NULL;
   hdd_list_node_t *anchor = NULL;
   skb_list_node_t *pktNode = NULL;
   struct sk_buff *skb = NULL;
   vos_pkt_t *pVosPacket = NULL;
   v_MACADDR_t* pDestMacAddress = NULL;
   v_TIME_t timestamp;
   WLANTL_ACEnumType newAc;
   v_SIZE_t size = 0;
   tANI_U8   acAdmitted, i;

   //Sanity check on inputs
   if ( ( NULL == vosContext ) || 
        ( NULL == pStaId ) || 
        ( NULL == ppVosPacket ) ||
        ( NULL == pPktMetaInfo ) )
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Null Params being passed", __FUNCTION__);
      return VOS_STATUS_E_FAILURE;
   }

   //Get the HDD context.
   pHddCtx = (hdd_context_t *)vos_get_context( VOS_MODULE_ID_HDD, vosContext );
   if(pHddCtx == NULL)
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: HDD adapter context is Null", __FUNCTION__);
      return VOS_STATUS_E_FAILURE;
   }
 
   pAdapter = pHddCtx->sta_to_adapter[*pStaId];
   if( NULL == pAdapter )
   {
      VOS_ASSERT(0);
      return VOS_STATUS_E_FAILURE;
   }

   ++pAdapter->hdd_stats.hddTxRxStats.txFetched;

   *ppVosPacket = NULL;

   //Make sure the AC being asked for is sane
   if( ac >= WLANTL_MAX_AC || ac < 0)
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Invalid AC %d passed by TL", __FUNCTION__, ac);
      return VOS_STATUS_E_FAILURE;
   }

   ++pAdapter->hdd_stats.hddTxRxStats.txFetchedAC[ac];

#ifdef HDD_WMM_DEBUG
   VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,"%s: AC %d passed by TL", __FUNCTION__, ac);
#endif // HDD_WMM_DEBUG

   // We find an AC with packets
   // or we determine we have no more packets to send
   // HDD is not allowed to change AC.

   // has this AC been admitted? or 
   // To allow EAPOL packets when not authenticated
   if (unlikely((0==pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessAllowed) &&
                (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.uIsAuthenticated))
   {
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchEmpty;
#ifdef HDD_WMM_DEBUG
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
                 "%s: no packets pending", __FUNCTION__);
#endif // HDD_WMM_DEBUG
      return VOS_STATUS_E_FAILURE;
   }
      
   // do we have any packets pending in this AC?
   hdd_list_size( &pAdapter->wmm_tx_queue[ac], &size ); 
   if( size >  0 )
   {
       // yes, so process it
#ifdef HDD_WMM_DEBUG
       VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
                       "%s: AC %d has packets pending", __FUNCTION__, ac);
#endif // HDD_WMM_DEBUG
   }
   else
   {
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchEmpty;
#ifdef HDD_WMM_DEBUG
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
                   "%s: no packets pending", __FUNCTION__);
#endif // HDD_WMM_DEBUG
      return VOS_STATUS_E_FAILURE;
   }

   //Get the vos packet. I don't want to dequeue and enqueue again if we are out of VOS resources 
   //This simplifies the locking and unlocking of Tx queue
   status = vos_pkt_wrap_data_packet( &pVosPacket, 
                                      VOS_PKT_TYPE_TX_802_3_DATA, 
                                      NULL, //OS Pkt is not being passed
                                      hdd_tx_low_resource_cbk, 
                                      pAdapter );

   if (status == VOS_STATUS_E_ALREADY || status == VOS_STATUS_E_RESOURCES)
   {
      //Remember VOS is in a low resource situation
      pAdapter->isVosOutOfResource = VOS_TRUE;
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchLowResources;
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: VOSS in Low Resource scenario", __FUNCTION__);
      //TL will now think we have no more packets in this AC
      return VOS_STATUS_E_FAILURE;
   }

   //Remove the packet from the queue
   spin_lock_bh(&pAdapter->wmm_tx_queue[ac].lock);
   status = hdd_list_remove_front( &pAdapter->wmm_tx_queue[ac], &anchor );
   spin_unlock_bh(&pAdapter->wmm_tx_queue[ac].lock);

   if(VOS_STATUS_SUCCESS == status)
   {
      //If success then we got a valid packet from some AC
      pktNode = list_entry(anchor, skb_list_node_t, anchor);
      skb = pktNode->skb;
   }
   else
   {
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError;
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: Error in de-queuing "
         "skb from Tx queue status = %d", __FUNCTION__, status );
      vos_pkt_return_packet(pVosPacket);
      return VOS_STATUS_E_FAILURE;
   }

   //Attach skb to VOS packet.
   status = vos_pkt_set_os_packet( pVosPacket, skb );
   if (status != VOS_STATUS_SUCCESS)
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: Error attaching skb", __FUNCTION__);
      vos_pkt_return_packet(pVosPacket);
      ++pAdapter->stats.tx_dropped;
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError;
      kfree_skb(skb);
      return VOS_STATUS_E_FAILURE;
   }

   //Just being paranoid. To be removed later
   if(pVosPacket == NULL)
   {
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: VOS packet returned by VOSS is NULL", __FUNCTION__);
      ++pAdapter->stats.tx_dropped;
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError;
      kfree_skb(skb);
      return VOS_STATUS_E_FAILURE;
   }

   //Return VOS packet to TL;
   *ppVosPacket = pVosPacket;

   //Fill out the meta information needed by TL
   //FIXME This timestamp is really the time stamp of wrap_data_packet
   vos_pkt_get_timestamp( pVosPacket, &timestamp );
   pPktMetaInfo->usTimeStamp = (v_U16_t)timestamp;
   
   if(pAdapter->sessionCtx.station.conn_info.uIsAuthenticated == VOS_TRUE)
      pPktMetaInfo->ucIsEapol = 0;       
   else 
      pPktMetaInfo->ucIsEapol = hdd_IsEAPOLPacket( pVosPacket ) ? 1 : 0;

#ifdef FEATURE_WLAN_WAPI
   // Override usIsEapol value when its zero for WAPI case
      pPktMetaInfo->ucIsWai = hdd_IsWAIPacket( pVosPacket ) ? 1 : 0;
#endif /* FEATURE_WLAN_WAPI */

   if ((HDD_WMM_USER_MODE_NO_QOS == pHddCtx->cfg_ini->WmmMode) ||
       (!pAdapter->hddWmmStatus.wmmQap))
   {
      // either we don't want QoS or the AP doesn't support QoS
      pPktMetaInfo->ucUP = 0;
      pPktMetaInfo->ucTID = 0;
   }
   else
   {
      /* 1. Check if ACM is set for this AC 
       * 2. If set, check if this AC had already admitted 
       * 3. If not already admitted, downgrade the UP to next best UP */
      if(!pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessRequired ||
         pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcTspecValid)
      {
        pPktMetaInfo->ucUP = pktNode->userPriority;
        pPktMetaInfo->ucTID = pPktMetaInfo->ucUP;
      }
      else
      {
        //Downgrade the UP
        acAdmitted = pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcTspecValid;
        newAc = WLANTL_AC_BK;
        for (i=ac-1; i>0; i--)
        {
            if (pAdapter->hddWmmStatus.wmmAcStatus[i].wmmAcAccessRequired == 0)
            {
                newAc = i;
                break;
            }
        }
        pPktMetaInfo->ucUP = hddWmmAcToHighestUp[newAc];
        pPktMetaInfo->ucTID = pPktMetaInfo->ucUP;
        VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,"Downgrading UP %d to UP %d ", pktNode->userPriority, pPktMetaInfo->ucUP);
      }
   }

   pPktMetaInfo->ucType = 0;          //FIXME Don't know what this is
   pPktMetaInfo->ucDisableFrmXtl = 0; //802.3 frame so we need to xlate
   if ( 1 < size )
   {
       pPktMetaInfo->bMorePackets = 1; //HDD has more packets to send
   }
   else
   {
       pPktMetaInfo->bMorePackets = 0;
   }

   //Extract the destination address from ethernet frame
   pDestMacAddress = (v_MACADDR_t*)skb->data;
   pPktMetaInfo->ucBcast = vos_is_macaddr_broadcast( pDestMacAddress ) ? 1 : 0;
   pPktMetaInfo->ucMcast = vos_is_macaddr_group( pDestMacAddress ) ? 1 : 0;

   

   // if we are in a backpressure situation see if we can turn the hose back on
   if ( (pAdapter->isTxSuspended[ac]) &&
        (size <= HDD_TX_QUEUE_LOW_WATER_MARK) )
   {
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchDePressured;
      ++pAdapter->hdd_stats.hddTxRxStats.txFetchDePressuredAC[ac];
      VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
                 "%s: TX queue[%d] re-enabled", __FUNCTION__, ac);
      pAdapter->isTxSuspended[ac] = VOS_FALSE;      
      netif_tx_wake_queue(netdev_get_tx_queue(pAdapter->dev, 
                                        skb_get_queue_mapping(skb) ));
   }


   // We're giving the packet to TL so consider it transmitted from
   // a statistics perspective.  We account for it here instead of
   // when the packet is returned for two reasons.  First, TL will
   // manipulate the skb to the point where the len field is not
   // accurate, leading to inaccurate byte counts if we account for
   // it later.  Second, TL does not provide any feedback as to
   // whether or not the packet was successfully sent over the air,
   // so the packet counts will be the same regardless of where we
   // account for them
   pAdapter->stats.tx_bytes += skb->len;
   ++pAdapter->stats.tx_packets;
   ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeued;
   ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeuedAC[ac];

   if(pHddCtx->cfg_ini->thermalMitigationEnable)
   {
      if(mutex_lock_interruptible(&pHddCtx->tmInfo.tmOperationLock))
      {
         VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
                    "%s: Tm Lock fail", __FUNCTION__);
         return VOS_STATUS_E_FAILURE;
      }
      if(WLAN_HDD_TM_LEVEL_1 < pHddCtx->tmInfo.currentTmLevel)
      {
         if(0 == pHddCtx->tmInfo.txFrameCount)
         {
            /* Just recovered from sleep timeout */
            pHddCtx->tmInfo.lastOpenTs = timestamp;
         }

         if(((timestamp - pHddCtx->tmInfo.lastOpenTs) > (pHddCtx->tmInfo.tmAction.txOperationDuration / 10)) &&
            (pHddCtx->tmInfo.txFrameCount >= pHddCtx->tmInfo.tmAction.txBlockFrameCountThreshold))
         {
            spin_lock(&pAdapter->wmm_tx_queue[ac].lock);
            /* During TX open duration, TX frame count is larger than threshold
             * Block TX during Sleep time */
            netif_tx_stop_all_queues(pAdapter->dev);
            spin_unlock(&pAdapter->wmm_tx_queue[ac].lock);
            pHddCtx->tmInfo.lastblockTs = timestamp;
            if(VOS_TIMER_STATE_STOPPED == vos_timer_getCurrentState(&pHddCtx->tmInfo.txSleepTimer))
            {
               vos_timer_start(&pHddCtx->tmInfo.txSleepTimer, pHddCtx->tmInfo.tmAction.txSleepDuration);
            }
         }
         else if(((timestamp - pHddCtx->tmInfo.lastOpenTs) > (pHddCtx->tmInfo.tmAction.txOperationDuration / 10)) &&
                 (pHddCtx->tmInfo.txFrameCount < pHddCtx->tmInfo.tmAction.txBlockFrameCountThreshold))
         {
            /* During TX open duration, TX frame count is less than threshold
             * Reset count and timestamp to prepare next cycle */
            pHddCtx->tmInfo.lastOpenTs = timestamp;
            pHddCtx->tmInfo.txFrameCount = 0;
         }
         else
         {
            /* Do Nothing */
         }
         pHddCtx->tmInfo.txFrameCount++;
      }
      mutex_unlock(&pHddCtx->tmInfo.tmOperationLock);
   }


#ifdef HDD_WMM_DEBUG
   VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,"%s: Valid VOS PKT returned to TL", __FUNCTION__);
#endif // HDD_WMM_DEBUG

   return status;
}
Example #3
0
/*
 * Parse a locale section in a localization file (UTF-8, no BOM)
 * NB: this call is reentrant for the "base" command support
 * TODO: Working on memory rather than on file would improve performance
 */
BOOL get_loc_data_file(const char* filename, loc_cmd* lcmd)
{
	size_t bufsize = 1024;
	static FILE* fd = NULL;
	static BOOL populate_default = FALSE;
	char *buf = NULL;
	size_t i = 0;
	int r = 0, line_nr_incr = 1;
	int c = 0, eol_char = 0;
	int start_line, old_loc_line_nr = 0;
	BOOL ret = FALSE, eol = FALSE, escape_sequence = FALSE, reentrant = (fd != NULL);
	long offset, cur_offset = -1, end_offset;
	// The default locale is always the first one
	loc_cmd* default_locale = list_entry(locale_list.next, loc_cmd, list);

	if ((lcmd == NULL) || (default_locale == NULL)) {
		uprintf("localization: no %slocale", (default_locale == NULL)?"default ":" ");
		goto out;
	}

	if (msg_table == NULL) {
		// Initialize the default message table (usually en-US)
		msg_table = default_msg_table;
		uprintf("localization: initializing default message table");
		populate_default = TRUE;
		get_loc_data_file(filename, default_locale);
		populate_default = FALSE;
	}

	if (reentrant) {
		// Called, from a 'b' command - no need to reopen the file,
		// just save the current offset and current line number
		cur_offset = ftell(fd);
		old_loc_line_nr = loc_line_nr;
	} else {
		if ((filename == NULL) || (filename[0] == 0))
			return FALSE;
		if (!populate_default) {
			if (lcmd == default_locale) {
				// The default locale has already been populated => nothing to do
				msg_table = default_msg_table;
				return TRUE;
			}
			msg_table = current_msg_table;
		}
		free_dialog_list();
		fd = open_loc_file(filename);
		if (fd == NULL)
			goto out;
	}

	offset = (long)lcmd->num[0];
	end_offset = (long)lcmd->num[1];
	start_line = lcmd->line_nr;
	loc_line_nr = start_line;
	buf = (char*) malloc(bufsize);
	if (buf == NULL) {
		uprintf("localization: could not allocate line buffer\n");
		goto out;
	}

	if (fseek(fd, offset, SEEK_SET) != 0) {
		uprintf("localization: could not rewind\n");
		goto out;
	}

	do {	// custom readline handling for string collation, realloc, line numbers, etc.
		c = getc(fd);
		switch(c) {
		case EOF:
			buf[i] = 0;
			if (!eol)
				loc_line_nr += line_nr_incr;
			get_loc_data_line(buf);
			break;
		case '\r':
		case '\n':
			if (escape_sequence) {
				escape_sequence = FALSE;
				break;
			}
			// This assumes that the EOL sequence is always the same throughout the file
			if (eol_char == 0)
				eol_char = c;
			if (c == eol_char) {
				if (eol) {
					line_nr_incr++;
				} else {
					loc_line_nr += line_nr_incr;
					line_nr_incr = 1;
				}
			}
			buf[i] = 0;
			if (!eol) {
				// Strip trailing spaces (for string collation)
				for (r = ((int)i)-1; (r>0) && ((buf[r]==space[0])||(buf[r]==space[1])); r--);
				if (r < 0)
					r = 0;
				eol = TRUE;
			}
			break;
		case ' ':
		case '\t':
			if (escape_sequence) {
				escape_sequence = FALSE;
				break;
			}
			if (!eol) {
				buf[i++] = (char)c;
			}
			break;
		case '\\':
			if (!escape_sequence) {
				escape_sequence = TRUE;
				break;
			}
			// fall through on escape sequence
		default:
			if (escape_sequence) {
				switch (c) {
				case 'n':	// \n -> CRLF
					buf[i++] = '\r';
					buf[i++] = '\n';
					break;
				case '"':	// \" carried as is
					buf[i++] = '\\';
					buf[i++] = '"';
					break;
				case '\\':
					buf[i++] = '\\';
					break;
				default:	// ignore any other escape sequence
					break;
				}
				escape_sequence = FALSE;
			} else {
				// Collate multiline strings
				if ((eol) && (c == '"') && (buf[r] == '"')) {
					i = r;
					eol = FALSE;
					break;
				}
				if (eol) {
					get_loc_data_line(buf);
					eol = FALSE;
					i = 0;
					r = 0;
				}
				buf[i++] = (char)c;
			}
			break;
		}
		if ((c == EOF) || (ftell(fd) > end_offset))
			break;
		// Have at least 2 chars extra, for \r\n sequences
		if (i >= bufsize-2) {
			bufsize *= 2;
			if (bufsize > 32768) {
				uprintf("localization: requested line buffer is larger than 32K!\n");
				goto out;
			}
			buf = (char*) _reallocf(buf, bufsize);
			if (buf == NULL) {
				uprintf("localization: could not grow line buffer\n");
				goto out;
			}
		}
	} while(1);
	ret = TRUE;

out:
	// Don't close on a reentrant call
	if (reentrant) {
		if ((cur_offset < 0) || (fseek(fd, cur_offset, SEEK_SET) != 0)) {
			uprintf("localization: unable to reset reentrant position\n");
			ret = FALSE;
		}
		loc_line_nr = old_loc_line_nr;
	} else if (fd != NULL) {
		fclose(fd);
		fd = NULL;
	}
	safe_free(buf);
	return ret;
}
Example #4
0
void del_node(list_head *entry) {
    list_del(entry);
    st_struct *s = list_entry(entry, st_struct, list);
    free(s);
}
Example #5
0
File: lib-msg.c Project: pscedu/pfl
void
lnet_finalize (__unusedx lnet_ni_t *ni, lnet_msg_t *msg, int status)
{
#ifdef __KERNEL__
        int                i;
        int                my_slot;
#endif
        lnet_libmd_t      *md;

        LASSERT (!in_interrupt ());

        if (msg == NULL)
                return;
#if 0
        CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
               lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
               msg->msg_target_is_router ? "t" : "",
               msg->msg_routing ? "X" : "",
               msg->msg_ack ? "A" : "",
               msg->msg_sending ? "S" : "",
               msg->msg_receiving ? "R" : "",
               msg->msg_delayed ? "d" : "",
               msg->msg_txcredit ? "C" : "",
               msg->msg_peertxcredit ? "c" : "",
               msg->msg_rtrcredit ? "F" : "",
               msg->msg_peerrtrcredit ? "f" : "",
               msg->msg_onactivelist ? "!" : "",
               msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
               msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
#endif
        LNET_LOCK();

        LASSERT (msg->msg_onactivelist);

        msg->msg_ev.status = status;

        md = msg->msg_md;
        if (md != NULL) {
                int      unlink;

                /* Now it's safe to drop my caller's ref */
                md->md_refcount--;
                LASSERT (md->md_refcount >= 0);

                unlink = lnet_md_unlinkable(md);

                msg->msg_ev.unlinked = unlink;

                if (md->md_eq != NULL)
                        lnet_enq_event_locked(md->md_eq, &msg->msg_ev);

                if (unlink)
                        lnet_md_unlink(md);

                msg->msg_md = NULL;
        }

        list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq);

        /* Recursion breaker.  Don't complete the message here if I am (or
         * enough other threads are) already completing messages */

#ifdef __KERNEL__
        my_slot = -1;
        for (i = 0; i < the_lnet.ln_nfinalizers; i++) {
                if (the_lnet.ln_finalizers[i] == cfs_current())
                        goto out;
                if (my_slot < 0 && the_lnet.ln_finalizers[i] == NULL)
                        my_slot = i;
        }
        if (my_slot < 0)
                goto out;

        the_lnet.ln_finalizers[my_slot] = cfs_current();
#else
        if (the_lnet.ln_finalizing)
                goto out;

        the_lnet.ln_finalizing = 1;
#endif

        while (!list_empty(&the_lnet.ln_finalizeq)) {
                msg = list_entry(the_lnet.ln_finalizeq.next,
                                 lnet_msg_t, msg_list);
                
                list_del(&msg->msg_list);

                /* NB drops and regains the lnet lock if it actually does
                 * anything, so my finalizing friends can chomp along too */
                lnet_complete_msg_locked(msg);
        }

#ifdef __KERNEL__
        the_lnet.ln_finalizers[my_slot] = NULL;
#else
        the_lnet.ln_finalizing = 0;
#endif

 out:
        LNET_UNLOCK();
}
Example #6
0
void dump_fd_list_item(item_t * it) {
	fd_item_t * fd_item = list_entry(it, fd_item_t, item);
	dump_fd_item(fd_item);
}
Example #7
0
/***********************************************************************
F* Function:     static int process_mon_chains(void) P*A*Z*
 *
P* Parameters:   none
P*
P* Returnvalue:  int
P*                - 0 if the function returns at all
 *
Z* Intention:    This is the core function of the chain functionality.
Z*               The list with the monitored chain is processed and
Z*               expired entries handled appropriately by stepping up
Z*               the escalation ladder.  The escalation actions are
Z*               triggered from here.
 *
D* Design:       [email protected]
C* Coding:       [email protected]
V* Verification: [email protected]
 ***********************************************************************/
static int process_mon_chains(void)
{
	struct list_head *ptr;
	monitored_chain_t *entry;
	int sig;

	spin_lock(&mon_lock);

	for (ptr = mon_list.next; ptr != &mon_list; ptr = ptr->next) {
		entry = list_entry(ptr, monitored_chain_t, list);
		if (time_after_eq(jiffies, entry->expires)) {
			debugk("%s: WD monitor expired for id %d\n",
				__FUNCTION__, entry->chainid);
			switch (entry->action[entry->escalation]) {
			case WD_ACTION_SIGNAL:
				debugk("WD: sending user signal for key "
					"%d...\n", entry->chainid);
				sig = (entry->signal) ? entry->signal : SIGTERM;
				if (entry->pid)
					kill_proc_info(sig, SEND_SIG_PRIV, entry->pid);
				break;
			case WD_ACTION_KILL:
				debugk("WD: sending KILL signal for key "
					"%d...\n", entry->chainid);
				if (entry->pid)
					kill_proc_info(SIGKILL, SEND_SIG_PRIV, entry->pid);
				break;
			case WD_ACTION_REBOOT:
				spin_unlock(&mon_lock);
				wd_unregister_mon_chain(entry->chainid);
				printk("WD: Rebooting system for key "
					"%d...\n", entry->chainid);
				flush_cache_all();
				/*
				 * XXX This is not safe to call in interrupt
				 * context.
				 */
				sys_reboot(LINUX_REBOOT_MAGIC1,
					LINUX_REBOOT_MAGIC2,
					LINUX_REBOOT_CMD_RESTART,
					NULL);
				break;
			case WD_ACTION_RESET:
				printk("WD: Resetting system for key "
					"%d...\n", entry->chainid);
				BUG_ON(wd_hw_functions.wd_machine_restart
					== NULL);
				wd_hw_functions.wd_machine_restart();
				break;

			default:
				debugk("WD: undefined action %d\n",
					entry->action[entry->escalation]);
				break;
			}
			entry->escalation++;
			entry->expires = jiffies + HZ * 
				entry->timer_count[entry->escalation];
			list_del(&entry->list);
			insert_mon_chain(entry);
		} else
			/* The list is sorted, so we can stop here */
			break;
	}

	spin_unlock(&mon_lock);

	return 0;
}
Example #8
0
/*******************************************************************************
 *
 * Function   :  PlxNotificationWait
 *
 * Description:  Put the process to sleep until wake-up event occurs or timeout
 *
 ******************************************************************************/
PLX_STATUS
PlxNotificationWait(
    DEVICE_EXTENSION *pdx,
    VOID             *pUserWaitObject,
    PLX_UINT_PTR      Timeout_ms
    )
{
    long              Wait_rc;
    PLX_STATUS        rc;
    PLX_UINT_PTR      Timeout_sec;
    unsigned long     flags;
    struct list_head *pEntry;
    PLX_WAIT_OBJECT  *pWaitObject;


    // Find the wait object in the list
    spin_lock_irqsave(
        &(pdx->Lock_WaitObjectsList),
        flags
        );

    pEntry = pdx->List_WaitObjects.next;

    // Find the wait object and wait for wake-up event
    while (pEntry != &(pdx->List_WaitObjects))
    {
        // Get the wait object
        pWaitObject =
            list_entry(
                pEntry,
                PLX_WAIT_OBJECT,
                ListEntry
                );

        // Check if the object address matches the Tag
        if (pWaitObject == pUserWaitObject)
        {
            spin_unlock_irqrestore(
                &(pdx->Lock_WaitObjectsList),
                flags
                );

            DebugPrintf((
                "Waiting for Interrupt wait object (%p) to wake-up\n",
                pWaitObject
                ));

            /*********************************************************
             * Convert milliseconds to jiffies.  The following
             * formula is used:
             *
             *                      ms * HZ
             *           jiffies = ---------
             *                       1,000
             *
             *
             *  where:  HZ      = System-defined clock ticks per second
             *          ms      = Timeout in milliseconds
             *          jiffies = Number of HZ's per second
             *
             *  Note: Since the timeout is stored as a "long" integer,
             *        the conversion to jiffies is split into two operations.
             *        The first is on number of seconds and the second on
             *        the remaining millisecond precision.  This mimimizes
             *        overflow when the specified timeout is large and also
             *        keeps millisecond precision.
             ********************************************************/

            // Perform conversion if not infinite wait
            if (Timeout_ms != PLX_TIMEOUT_INFINITE)
            {
                // Get number of seconds
                Timeout_sec = Timeout_ms / 1000;

                // Store milliseconds precision
                Timeout_ms = Timeout_ms - (Timeout_sec * 1000);

                // Convert to jiffies
                Timeout_sec = Timeout_sec * HZ;
                Timeout_ms  = (Timeout_ms * HZ) / 1000;

                // Compute total jiffies
                Timeout_ms = Timeout_sec + Timeout_ms;
            }

            // Timeout parameter is signed and can't be negative
            if ((signed long)Timeout_ms < 0)
            {
                // Shift out negative bit
                Timeout_ms = Timeout_ms >> 1;
            }

            // Increment number of sleeping threads
            atomic_inc( &pWaitObject->SleepCount );

            do
            {
                // Wait for interrupt event
                Wait_rc =
                    wait_event_interruptible_timeout(
                        pWaitObject->WaitQueue,
                        (pWaitObject->state != PLX_STATE_WAITING),
                        Timeout_ms
                        );
            }
            while ((Wait_rc == 0) && (Timeout_ms == PLX_TIMEOUT_INFINITE));

            if (Wait_rc > 0)
            {
                // Condition met or interrupt occurred
                DebugPrintf(("Interrupt wait object awakened\n"));
                rc = ApiSuccess;
            }
            else if (Wait_rc == 0)
            {
                // Timeout reached
                DebugPrintf(("Timeout waiting for interrupt\n"));
                rc = ApiWaitTimeout;
            }
            else
            {
                // Interrupted by a signal
                DebugPrintf(("Interrupt wait object interrupted by signal or error\n"));
                rc = ApiWaitCanceled;
            }

            // If object is in triggered state, rest to waiting state
            if (pWaitObject->state == PLX_STATE_TRIGGERED)
                pWaitObject->state = PLX_STATE_WAITING;

            // Decrement number of sleeping threads
            atomic_dec( &pWaitObject->SleepCount );

            return rc;
        }
Example #9
0
int main(int argc, char **argv)  
{   
#if 1
     if(0 == isArbiterExist())
    {
        marbit_send_log(ERROR,"arbiter is not start!\n");
        exit(1);
    }
#endif 
    //init sys info
    if(0 != init_sys_info())
    {
        marbit_send_log(ERROR,"Failed to init_sys_info!\n");
        exit(1);
    }
  
    //parse the parameters
    if(0 != parse_input_parameters(argc, argv))
    {
        destroy_info();
        
        input_error();
        
        exit(1);
    }

    //time_t begin_time = time(NULL);
  //  time_t do_process_data_stream_time= time(NULL);
   // time_t do_process_data_aggregation_time= time(NULL);
   // time_t do_process_sort_time = time(NULL);
   // time_t print_baselink_time = time(NULL);
    //time_t print_sorted_list_time = time(NULL);


    struct timeval start, getFromArbiter, printLink, dataAggregation, quickSort, printSort, end;
    gettimeofday( &start, NULL );
        
#if 1
    //data_stream, check need get data stream from arbiter or configure file
    if(0 == isNeedReadFromDB(DATA_STREAM_FILE_PATH, g_flush_interval))
    {  
        //set_timer();
        if(0 != do_process_from_arbiter())
        {
            marbit_send_log(ERROR,"Failed to get data stream from arbiter\n");
            exit(1);
        }
    }
    gettimeofday( &getFromArbiter, NULL );
#endif

    if(SORTED_BASE_LINK == g_sorted_list_index)
    {
        print_baselink_info(DATA_STREAM_FILE_PATH);
        gettimeofday( &printLink, NULL );
    }
    else if(SORTED_BASE_APP == g_sorted_list_index || SORTED_BASE_IP == g_sorted_list_index)
    {
        //data_aggregation
        do_process_data_aggregation();
        gettimeofday( &dataAggregation, NULL );

        #if 1
        
        MergeSort(&merglist);  
        gettimeofday( &quickSort, NULL );
        
        printMergeList(merglist);
         gettimeofday( &printSort, NULL );
         
        destroyMergelist(merglist);

        #else
        struct list_head *head = &sorted_list_arry[SORTED_BASE_AGG].list;
        //struct list_head *head = &sorted_list_arry[g_sorted_list_index].list;
        
        struct list_head *first = head->next;
        struct list_head *last = head->prev;

        sorted_node_t *pstHead = list_entry(head, sorted_node_t, list); 

        quick_sort(head, first, last);
        gettimeofday( &quickSort, NULL );

        print_sorted_list(SORTED_BASE_AGG);
        //print_sorted_list(g_sorted_list_index);
        gettimeofday( &printSort, NULL );
        #endif
    }

    destroy_info();
    gettimeofday( &end, NULL );

    if(logdebug.g_trace_enable_flag > 0)
    {
        marbit_send_log(INFO,"=======================================\n");
        int timeuse = 0;
        
        if(SORTED_BASE_LINK == g_sorted_list_index)
        {
            timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
            marbit_send_log(INFO,"all used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( getFromArbiter.tv_sec - start.tv_sec ) + getFromArbiter.tv_usec - start.tv_usec;
            marbit_send_log(INFO,"getFromArbiter used time = %lu us\n", timeuse);


            timeuse = 1000000 * ( printLink.tv_sec - getFromArbiter.tv_sec ) + printLink.tv_usec - getFromArbiter.tv_usec;
            marbit_send_log(INFO,"printLink used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( end.tv_sec - printLink.tv_sec ) + end.tv_usec - printLink.tv_usec;
            marbit_send_log(INFO,"destroy used time = %lu us\n", timeuse);
        }
        else
        {
             timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
            marbit_send_log(INFO,"all used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( getFromArbiter.tv_sec - start.tv_sec ) + getFromArbiter.tv_usec - start.tv_usec;
            marbit_send_log(INFO,"getFromArbiter used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( dataAggregation.tv_sec - getFromArbiter.tv_sec ) + dataAggregation.tv_usec - getFromArbiter.tv_usec;
            marbit_send_log(INFO,"dataAggregation used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( quickSort.tv_sec - dataAggregation.tv_sec ) + quickSort.tv_usec - dataAggregation.tv_usec;
            marbit_send_log(INFO,"quickSort used time = %lu us\n", timeuse);


            timeuse = 1000000 * ( printSort.tv_sec - quickSort.tv_sec ) + printSort.tv_usec - quickSort.tv_usec;
            marbit_send_log(INFO,"printSort used time = %lu us\n", timeuse);

            timeuse = 1000000 * ( end.tv_sec - printSort.tv_sec ) + end.tv_usec - printSort.tv_usec;
            marbit_send_log(INFO,"destroy used time = %lu us\n", timeuse);
        }
    }
    
    
    return 0;  
}  
Example #10
0
#include "initium_elf.h"

/** Size to use for tag list area. */
#define INITIUM_TAGS_SIZE       12288

/**
 * Helper functions.
 */

/** Find a tag in the image tag list.
 * @param loader        Loader internal data.
 * @param type          Type of tag to find.
 * @return              Pointer to tag, or NULL if not found. */
void *initium_find_itag(initium_loader_t *loader, uint32_t type) {
  list_foreach(&loader->itags, iter) {
    initium_itag_t *itag = list_entry(iter, initium_itag_t, header);

    if (itag->type == type)
      return itag->data;
  }

  return NULL;
}

/** Get the next tag of the same type.
 * @param loader        Loader internal data.
 * @param data          Current tag.
 * @return              Pointer to next tag of same type, or NULL if none found. */
void *initium_next_itag(initium_loader_t *loader, void *data) {
  initium_itag_t *itag = container_of(data, initium_itag_t, data);
  uint32_t type = itag->type;
Example #11
0
/*******************************************************************************
 *
 * Function   :  PlxPciPhysicalMemoryFree
 *
 * Description:  Free previously allocated physically contiguous page-locked memory
 *
 ******************************************************************************/
PLX_STATUS
PlxPciPhysicalMemoryFree(
    DEVICE_EXTENSION *pdx,
    PLX_PHYSICAL_MEM *pPciMem
    )
{
    struct list_head    *pEntry;
    PLX_PHYS_MEM_OBJECT *pMemObject;


    spin_lock(
        &(pdx->Lock_PhysicalMemList)
        );

    pEntry = pdx->List_PhysicalMem.next;

    // Traverse list to find the desired list object
    while (pEntry != &(pdx->List_PhysicalMem))
    {
        // Get the object
        pMemObject =
            list_entry(
                pEntry,
                PLX_PHYS_MEM_OBJECT,
                ListEntry
                );

        // Check if the physical addresses matches
        if (pMemObject->BusPhysical == pPciMem->PhysicalAddr)
        {
            // Remove the object from the list
            list_del(
                pEntry
                );

            spin_unlock(
                &(pdx->Lock_PhysicalMemList)
                );

            // Release the buffer
            Plx_dma_buffer_free(
                pdx,
                pMemObject
                );

            // Release the list object
            kfree(
                pMemObject
                );

            return ApiSuccess;
        }

        // Jump to next item in the list
        pEntry = pEntry->next;
    }

    spin_unlock(
        &(pdx->Lock_PhysicalMemList)
        );

    DebugPrintf(("ERROR - buffer object not found in list\n"));

    return ApiInvalidData;
}
Example #12
0
static inline talpa_mount_struct *next_slave(talpa_mount_struct *p)
{
        return list_entry(p->mnt_slave.next, talpa_mount_struct, mnt_slave);
}
Example #13
0
static inline talpa_mount_struct *next_peer(talpa_mount_struct *p)
{
        return list_entry(p->mnt_share.next, talpa_mount_struct, mnt_share);
}
Example #14
0
int iterateFilesystems(struct vfsmount* root, int (*callback) (struct vfsmount* mnt, unsigned long flags, bool fromMount))
{
    talpa_mount_struct *mnt, *nextmnt, *prevmnt;
    struct list_head *nexthead = NULL;
    int ret;
    unsigned m_seq = 1;

    mnt = real_mount(root);
    talpa_mntget(mnt); /* Take extra reference count for the loop */
    do
    {
        struct vfsmount* vfsmnt = vfs_mount(mnt);
        dbg("VFSMNT: 0x%p (at 0x%p), sb: 0x%p, dev: %s, flags: 0x%lx, fs: %s", mnt, mnt->mnt_parent,
                vfsmnt->mnt_sb, mnt->mnt_devname, vfsmnt->mnt_sb->s_flags, vfsmnt->mnt_sb->s_type->name);

        ret = callback(vfsmnt, vfsmnt->mnt_sb->s_flags, false);
        if (ret)
        {
            break;
        }

        talpa_vfsmount_lock(&m_seq); /* locks dcache_lock on 2.4 */

        /* Go down the tree for a child if there is one */
        if ( !list_empty(&mnt->mnt_mounts) )
        {
            nextmnt = list_entry(mnt->mnt_mounts.next, talpa_mount_struct, mnt_child);
        }
        else
        {
            nextmnt = mnt;
            /* If no children, go up until we found some. Abort on root. */
            while ( nextmnt != nextmnt->mnt_parent )
            {
                nexthead = nextmnt->mnt_child.next;
                /* Take next child if available */
                if ( nexthead != &nextmnt->mnt_parent->mnt_mounts )
                {
                    break;
                }
                /* Otherwise go up the tree */
                nextmnt = nextmnt->mnt_parent;
            }

            /* Abort if we are at the root */
            if ( nextmnt == nextmnt->mnt_parent )
            {
                talpa_vfsmount_unlock(&m_seq); /* unlocks dcache_lock on 2.4 */
                talpa_mntput(mnt);
                break;
            }

            /* Take next mount from the list */
            nextmnt = list_entry(nexthead, talpa_mount_struct, mnt_child);
        }

        talpa_mntget(nextmnt);
        prevmnt = mnt;
        mnt = nextmnt;
        talpa_vfsmount_unlock(&m_seq); /* unlocks dcache_lock on 2.4 */
        talpa_mntput(prevmnt);
    } while (mnt);

    /* Don't mntput root as we didn't take a reference for ourselves */

    return ret;
}
Example #15
0
ssize_t session_readlink (sid_t sid, lid_t lid, char *buf, size_t bufsize)
{
  struct list_elem *e;
  session_t * session = NULL;
  link_t * link = NULL;
  ssize_t ret_len;

  bool cb_done = 0;
  void *(*cb)(void *);
  cb_arg_t *arg;

 start:
  session = NULL;
  link = NULL;

  pthread_mutex_lock (&session_mutex);

  for (e = list_begin (&session_list); e != list_end (&session_list);
       e = list_next (e))
    {
      session_t *s = list_entry (e, session_t, elem);
      if (s->sid == sid)
	  session = s;
    }

  if (session == NULL) {
    pthread_mutex_unlock (&session_mutex);
    return -1;
  }
  
  for (e = list_begin (&session->link_list); e != list_end (&session->link_list);
       e = list_next (e))
    {
      link_t *l = list_entry (e, link_t, elem);
      if (l->lid == lid)
	  link = l;
    }

  if (link == NULL) {
    pthread_mutex_unlock (&session_mutex);
    return -1;
  }

  if (!cb_done) 
    {
      cb = session->readlink_cb;
      arg = session->arg;
      arg->lid = lid;
      arg->sid = sid;

      pthread_mutex_unlock (&session_mutex);
      
      if (cb != NULL) {
	pthread_t id;
	pthread_create (&id, NULL, cb, (void *) arg);
	pthread_detach (id);
      }
      
      cb_done = true;
      
      goto start;
    }
    
  pthread_mutex_lock (&link->mutex);
  pthread_mutex_unlock (&session_mutex);

  if (link->path == NULL) {
    link->waiter_cnt ++;
    pthread_cond_wait (&link->cond_set, &link->mutex);
    link->waiter_cnt --;
  }
  
  if (link->path == NULL) {
    pthread_mutex_unlock (&link->mutex);
    return -1;
  }

  if (strlen (link->path) >= bufsize-1) {
    pthread_mutex_unlock (&link->mutex);
    return -1;
  }

  memcpy (buf, link->path, bufsize);
  ret_len = strlen (link->path);
  
  pthread_mutex_unlock (&link->mutex);
  
  return ret_len;  
}
static int muticast_connect_manger_uphold_muti_list( bool muti_flags )
{
	int ret = -1;
	T_Ptrconference_recieve_model ptr_recv_model = NULL;
	uint32_t query_timeout = (uint32_t)gmuticast_manager_pro.mm_sys_flags.query_timeout*1000;	

	ptr_recv_model = list_entry( gmuticast_manager_pro.ptr_curcfc_recv_model, tconference_recieve_model, list );
	if( ptr_recv_model != NULL )
	{	
		if( ptr_recv_model->query_stop )
		{
			if( muti_flags )
			{/* start query */
				ptr_recv_model->query_stop = false;
				host_timer_start( query_timeout, &ptr_recv_model->muticast_query_timer );
			}
		}
		else if( !host_timer_timeout( &ptr_recv_model->muticast_query_timer ) )
		{
		        if (host_timer_is_stop(&ptr_recv_model->muticast_query_timer))
                        {
                                if (ptr_recv_model->solid_pnode != NULL && \
                                            ptr_recv_model->solid_pnode->solid.connect_flag == CONNECT)// reconnect?
                                {
                                        host_timer_start( query_timeout, &ptr_recv_model->muticast_query_timer );
                                }
                        }
                
			ret = -1;
		}
		else/* not end query update and timeout?*/
		{
			T_pInChannel_universe ptr_muti_Inchn = NULL;
			T_pccuTModel ptr_muticastor = NULL;
			T_pOutChannel pOutChannel = NULL;
			struct list_head *p_recv_muti_model = ptr_recv_model->p_ccu_muticast_channel;
			struct list_head *p_ccut_mo = gmuticast_manager_pro.ptr_muticastor;
			struct list_head *p_ccut_out = gmuticast_manager_pro.ptr_muticastor_output;

			if ((p_recv_muti_model != NULL) && (p_ccut_mo != NULL)
                                && (p_ccut_out != NULL))
			{
				muticast_connect_manager_inout_channel_entry_get( p_recv_muti_model,
													p_ccut_mo,
													p_ccut_out, 
													&ptr_muti_Inchn, 
													&ptr_muticastor, 
													&pOutChannel );
				assert( ptr_muti_Inchn  && ptr_muticastor && pOutChannel );
				if ((ptr_muti_Inchn != NULL) && (ptr_muticastor != NULL)
                                        && (pOutChannel != NULL))
				{
					if( muti_flags )
					{
						ret = muticast_connect_manger_pro_terminal_by_selfstate( ptr_muti_Inchn, 
							ptr_muticastor->tarker_id,
							ptr_recv_model->listener_id,
							gmuticast_manager_pro.muticast_exist, 
							pOutChannel,
							gmuticast_manager_pro.mm_sys_flags.offline_connect,
							gmuticast_manager_pro.mm_sys_flags.reconnect_self,
							gmuticast_manager_pro.mm_sys_flags.failed_connect_count,
							ptr_recv_model );

						host_timer_update( query_timeout, &ptr_recv_model->muticast_query_timer );
						ret = 0;
					}
					else
					{
						ret = muti_cnnt_mngr_unmutic_pro_tmnl_by_selfstate( ptr_muti_Inchn, 
							ptr_muticastor->tarker_id,
							ptr_recv_model->listener_id,
							gmuticast_manager_pro.muticast_exist, 
							pOutChannel,
							gmuticast_manager_pro.mm_sys_flags.offline_connect,
							gmuticast_manager_pro.mm_sys_flags.reconnect_self,
							gmuticast_manager_pro.mm_sys_flags.failed_connect_count,
							ptr_recv_model );
					}
				}
				else 
				{
					ret = -1;
				}
			}
			else 
				ret = -1;
		}
	}

	return ret;
}
void request_done(ifxpcd_ep_t *_ifxep, ifxpcd_request_t *_ifxreq, int _status)
{
	IFX_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ifxep,_ifxreq);

	if(!_ifxep)
	{
		IFX_ERROR("%s() %d invalid _ifxep\n",__func__,__LINE__);
		return;
	}
	if(!_ifxreq)
	{
		IFX_ERROR("%s() %d invalid _ifxreq\n",__func__,__LINE__);
		return;
	}

	if(_ifxep->num==0)
	{
		request_done_ep0(_ifxreq, _status);
		return;
	}

	_ifxreq->sysreq.status = _status;

	if(_ifxep->type==IFXUSB_EP_TYPE_INTR)
	{
		list_del_init(&_ifxreq->trq);

		if(_ifxreq->sysreq.complete)
		{
			#ifdef __DO_PCD_UNLOCK__
				SPIN_UNLOCK(&ifxusb_pcd.lock);
			#endif

			_ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq);

			#ifdef __DO_PCD_UNLOCK__
				SPIN_LOCK(&ifxusb_pcd.lock);
			#endif
		}

	}
	else if(_ifxep->is_in) // Tx
	{
		#if defined(__GADGET_TASKLET_TX__)
			list_del_init(&_ifxreq->trq);
			list_add_tail(&_ifxreq->trq, &_ifxep->queue_cmpt);

			if(!_ifxreq->sysreq.no_interrupt && !_ifxep->cmpt_tasklet_in_process)
			{
				#ifdef __GADGET_TASKLET_HIGH__
					tasklet_hi_schedule(&_ifxep->cmpt_tasklet);
				#else
					tasklet_schedule(&_ifxep->cmpt_tasklet);
				#endif
			}
		#else
			list_del_init(&_ifxreq->trq);

			if(!_ifxreq->sysreq.no_interrupt)
			{
				while (!list_empty(&_ifxep->queue_cmpt))
				{
					ifxpcd_request_t   *req;
					req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq);
					list_del_init(&req->trq);

					if(req->sysreq.complete)
					{
						#ifdef __DO_PCD_UNLOCK__
							SPIN_UNLOCK(&ifxusb_pcd.lock);
						#endif
						req->sysreq.complete(&_ifxep->sysep, &req->sysreq);
						#ifdef __DO_PCD_UNLOCK__
							SPIN_LOCK(&ifxusb_pcd.lock);
						#endif
					}
					else
					{
						#ifdef __req_num_dbg__
							IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid);
						#else
							IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req);
						#endif
					}
				}

				if(_ifxreq->sysreq.complete)
				{
					#ifdef __DO_PCD_UNLOCK__
						SPIN_UNLOCK(&ifxusb_pcd.lock);
					#endif
					_ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq);
					#ifdef __DO_PCD_UNLOCK__
						SPIN_LOCK(&ifxusb_pcd.lock);
					#endif
				}
				else
				{
					#ifdef __req_num_dbg__
						IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, _ifxreq->reqid);
					#else
						IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, _ifxreq);
					#endif
				}

			}
			else
				list_add_tail(&_ifxreq->trq, &_ifxep->queue_cmpt);
		#endif
	}
	else  // Rx
	{
		#if defined(__GADGET_TASKLET_RX__)
			if(list_empty(&_ifxep->queue)) // Rx Empty, Reuse
			{
				_ifxreq->sysreq.actual=0;
				_ifxreq->sysreq.status=0;
			}
			else if(!_ifxreq->sysreq.no_interrupt && !_ifxep->cmpt_tasklet_in_process)
			{
				_ifxep->cmpt_tasklet_in_process=1;
				list_move_tail(&_ifxreq->trq, &_ifxep->queue_cmpt);
				#ifdef __GADGET_TASKLET_HIGH__
					tasklet_hi_schedule(&_ifxep->cmpt_tasklet);
				#else
					tasklet_schedule(&_ifxep->cmpt_tasklet);
				#endif
			}
			else
				list_move_tail(&_ifxreq->trq, &_ifxep->queue_cmpt);
		#else
			if(!_ifxreq->sysreq.no_interrupt)
			{
				ifxpcd_request_t   *req2;

				while (!list_empty(&_ifxep->queue_cmpt))
				{
					req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq);
					list_del_init(&req->trq);

					if(req->sysreq.complete)
					{
						#ifdef __DO_PCD_UNLOCK__
							SPIN_UNLOCK(&ifxusb_pcd.lock);
						#endif
						req->sysreq.complete(&_ifxep->sysep, &req->sysreq);
						#ifdef __DO_PCD_UNLOCK__
							SPIN_LOCK(&ifxusb_pcd.lock);
						#endif
					}
					else
					{
					#ifdef __req_num_dbg__
						IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid);
					#else
						IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req);
					#endif
					}
				}

				if(list_empty(&_ifxep->queue) // Rx Empty, Reuse
				{
					_ifxreq->sysreq.actual=0;
					_ifxreq->sysreq.status=0;
					list_add_tail(&_ifxreq->trq, &_ifxep->queue);
				}
				else
				{
					if(_ifxreq->sysreq.complete)
					{
						#ifdef __DO_PCD_UNLOCK__
							SPIN_UNLOCK(&ifxusb_pcd.lock);
						#endif
						_ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq);
						#ifdef __DO_PCD_UNLOCK__
							SPIN_LOCK(&ifxusb_pcd.lock);
						#endif
					}
					else
					{
						#ifdef __req_num_dbg__
							IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid);
						#else
							IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req);
						#endif
					}
				}
			}
Example #18
0
/*
************************************************************************************************************************
*                                    Update system tick time
*
* Description: This function is called to update system tick time.
*
* Arguments  :None
*                  
*                
*				         
*				         
* Returns   None
*				   
* Note(s) :This function is called by internal, users shoud not touch this function.
*
*             
************************************************************************************************************************
*/
void tick_list_update(void)
{
	
	LIST     *tick_head_ptr;
	RAW_TASK_OBJ            *p_tcb;
	LIST                            *iter;
	LIST                            *iter_temp;

	RAW_U16   spoke;

	RAW_SR_ALLOC();

	RAW_CRITICAL_ENTER();
	
	raw_tick_count++;                                                     
	spoke    = (RAW_U16)(raw_tick_count &  (TICK_HEAD_ARRAY - 1) );
	tick_head_ptr  = &tick_head[spoke];
	iter    = tick_head_ptr->next;
	
	while (RAW_TRUE) {

		/*search all the time list if possible*/
		if (iter != tick_head_ptr) {

			iter_temp =  iter->next;
			p_tcb =  list_entry(iter, RAW_TASK_OBJ, tick_list);

			/*Since time list is sorted by remain time, so just campare  the absolute time*/
			if (raw_tick_count == p_tcb->tick_match) {
			
				switch (p_tcb->task_state) {
					case RAW_DLY:
						
						p_tcb->block_status = RAW_B_OK; 
						p_tcb->task_state = RAW_RDY;  
						tick_list_remove(p_tcb);
						add_ready_list(&raw_ready_queue, p_tcb);
						break; 

					case RAW_PEND_TIMEOUT:
						
						tick_list_remove(p_tcb);
						/*remove task on the block list because task is timeout*/
						list_delete(&p_tcb->task_list); 
						add_ready_list(&raw_ready_queue, p_tcb);
						
						#if (CONFIG_RAW_MUTEX > 0)
						mutex_state_change(p_tcb);
						#endif
						
						p_tcb->block_status = RAW_B_TIMEOUT; 
						p_tcb->task_state = RAW_RDY; 
						p_tcb->block_obj = 0;
						break;
						
					case RAW_PEND_TIMEOUT_SUSPENDED:

						tick_list_remove(p_tcb);
						/*remove task on the block list because task is timeout*/
						list_delete(&p_tcb->task_list); 

						#if (CONFIG_RAW_MUTEX > 0)
						mutex_state_change(p_tcb);
						#endif
						
						p_tcb->block_status = RAW_B_TIMEOUT; 
						p_tcb->task_state = RAW_SUSPENDED;  
						p_tcb->block_obj = 0;
						break;
					 
					case RAW_DLY_SUSPENDED:
										      
						p_tcb->task_state  =  RAW_SUSPENDED;
						p_tcb->block_status = RAW_B_OK; 
						tick_list_remove(p_tcb);                   
						break;

					default:
						
						RAW_ASSERT(0);
											
				}

				iter  = iter_temp;
			}

		/*if current task time out absolute time is not equal current system time, just break because timer list is sorted*/
			else {
			
				break;

			}

		}

		
		/*finish all the time list search */ 
		
		else {
			
			break;
		}
		
	}

	RAW_CRITICAL_EXIT();
}
Example #19
0
// for position update rule, every rule is start by time, so we can
// test the begin time, if current time is over the begin time, we
// find the active rule.
// Tips: 
//       inzone : 1 --> in zone detect
//       inzone : 0 --> out zone detect
//
static void Get_Active_Cond(struct list_head *pHead, const GpsInfo *pGPS, Zone_Cond_Info *pInfo, int inzone)
{
	struct list_head 		*plist;
	Zone_Condition 	        *pCondition;
	Time_D					*pTime;
	
	int						mach = 0;
	int 					TimeSet_sum;
	int 					i;
	TimeSlot_Struct			*pTimeSlot;

	char					Zone_Shape;

	// first, we find a active time proid

	if(pInfo->Time_active == 1){				            // we aleady have a condition, we check it
		
		pCondition = pInfo->pcond;
		pTime = (Time_D *)&((pCondition->TimeSet).BE_Time[(pInfo->time_set_index)].Etime);

		if( Is_BeyondTime_D( pTime ) == 1 ){				// out of the time

			DEBUG("%s : >>>> Out of the Etime...\n", __func__);
			
            pInfo->active         = 0;
			pInfo->Time_active    = 0;
			pInfo->SendCount      = 0;
            pInfo->last_send_time = 0;
			
            pCondition   = NULL;
			pInfo->pcond = NULL;
		}

	} else {	// else, we search for a active condition

        memset(pInfo, 0, sizeof(Zone_Cond_Info));

        OutZone_Cond_lock();
		list_for_each(plist, pHead){

			pCondition = list_entry(plist, Zone_Condition, list);

			TimeSet_sum = pCondition->TimeSet.TimeSet_Count;

			for(i=0; i<TimeSet_sum; i++){
			
				pTimeSlot = &(pCondition->TimeSet.BE_Time[i]);

				if( (Is_BeyondTime_D( &(pTimeSlot->Btime) ) == 1) && 
					(Is_BeyondTime_D( &(pTimeSlot->Etime) ) == 0) ) {
					
					mach = 1;
					break;
				}
			}

			if(mach == 1){
				break;
			}

		}
        OutZone_Cond_unlock();


		if(plist!=pHead){	// we get the rule

			DEBUG("%s : > we get the rule...\n", __func__);
			pInfo->Time_active   = 1;
			pInfo->time_set_index = i;
			pInfo->pcond          = pCondition;
		}
	}
RAW_U16 queue_buffer_post(RAW_QUEUE_BUFFER *q_b, RAW_VOID *p_void, MSG_SIZE_TYPE msg_size, RAW_U8 opt_send_method)
{

	LIST *block_list_head;
	RAW_TASK_OBJ *task_ptr;

 	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &q_b->common_block_obj.block_list;
	
	if (!is_queue_buffer_free(q_b,  msg_size)) {

		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_BUFFER_MAX(raw_task_active, q_b, p_void, msg_size, opt_send_method); 
		
		return RAW_QUEUE_BUFFER_FULL;
	}


	/*Queue buffer is not full here, if there is no blocked receive task*/
	if (is_list_empty(block_list_head)) {        

		if (opt_send_method == SEND_TO_END)  { 
			msg_to_end_buffer(q_b, p_void, msg_size);
		}

		else {


		}
		
		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_BUFFER_POST(raw_task_active, q_b, p_void, msg_size, opt_send_method);
		
		return RAW_SUCCESS;
	}
	
	task_ptr = list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);
	
	raw_memcpy(task_ptr->msg, p_void, msg_size);
	task_ptr->qb_msg_size = msg_size;
	
	raw_wake_object(task_ptr);
		
	RAW_CRITICAL_EXIT();

	TRACE_QUEUE_BUFFER_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, msg_size, opt_send_method);

	raw_sched();    
	return RAW_SUCCESS;
	

}
Example #21
0
/****************************************************************************
 *
 * FUNCTION:	bm_osl_read_event
 *
 * DESCRIPTION: Handles reads to the 'event' file by blocking user-mode
 *              threads until data (an event) is generated.
 *
 ****************************************************************************/
static ssize_t
bm_osl_read_event(
	struct file		*file,
	char			*buf,
	size_t			count,
	loff_t			*ppos)
{
	BM_OSL_EVENT		*event = NULL;
	unsigned long		flags = 0;
	static char		str[BM_MAX_STRING_LENGTH];
	static int		chars_remaining = 0;
	static char 		*ptr;

	if (!chars_remaining) {
		DECLARE_WAITQUEUE(wait, current);

		if (list_empty(&bm_event_list)) {

			if (file->f_flags & O_NONBLOCK)
				return -EAGAIN;

			set_current_state(TASK_INTERRUPTIBLE);
			add_wait_queue(&bm_event_wait_queue, &wait);

			if (list_empty(&bm_event_list)) {
				schedule();
			}

			remove_wait_queue(&bm_event_wait_queue, &wait);
			set_current_state(TASK_RUNNING);

			if (signal_pending(current)) {
				return -ERESTARTSYS;
			}
		}

		spin_lock_irqsave(&bm_osl_event_lock, flags);
		event = list_entry(bm_event_list.next, BM_OSL_EVENT, list);
		list_del(&event->list);
		spin_unlock_irqrestore(&bm_osl_event_lock, flags);

		chars_remaining = sprintf(str, "%s %s %08x %08x\n",
			event->device_type, event->device_instance,
			event->event_type, event->event_data);
		ptr = str;

		acpi_os_free(event->device_type);
		acpi_os_free(event->device_instance);
		acpi_os_free(event);
	}

	if (chars_remaining < count)
		count = chars_remaining;
	
	if (copy_to_user(buf, ptr, count))
		return -EFAULT;

	*ppos += count;
	chars_remaining -= count;
	ptr += count;

	return count;
}
Example #22
0
static t_elt *currentContext() {
	return list_entry(pile.next, t_elt, head);
}
Example #23
0
static ssize_t dabusb_read (struct file *file, char *buf, size_t count, loff_t * ppos)
{
	pdabusb_t s = (pdabusb_t) file->private_data;
	unsigned long flags;
	unsigned ret = 0;
	int rem;
	int cnt;
	pbuff_t b;
	struct urb *purb = NULL;

	dbg("dabusb_read");

	if (*ppos)
		return -ESPIPE;

	if (s->remove_pending)
		return -EIO;


	if (!s->usbdev)
		return -EIO;

	while (count > 0) {
		dabusb_startrek (s);

		spin_lock_irqsave (&s->lock, flags);

		if (list_empty (&s->rec_buff_list)) {

			spin_unlock_irqrestore(&s->lock, flags);

			err("error: rec_buf_list is empty");
			goto err;
		}
		
		b = list_entry (s->rec_buff_list.next, buff_t, buff_list);
		purb = b->purb;

		spin_unlock_irqrestore(&s->lock, flags);

		if (purb->status == -EINPROGRESS) {
			if (file->f_flags & O_NONBLOCK)		// return nonblocking
			 {
				if (!ret)
					ret = -EAGAIN;
				goto err;
			}

			interruptible_sleep_on (&s->wait);

			if (signal_pending (current)) {
				if (!ret)
					ret = -ERESTARTSYS;
				goto err;
			}

			spin_lock_irqsave (&s->lock, flags);

			if (list_empty (&s->rec_buff_list)) {
				spin_unlock_irqrestore(&s->lock, flags);
				err("error: still no buffer available.");
				goto err;
			}
			spin_unlock_irqrestore(&s->lock, flags);
			s->readptr = 0;
		}
		if (s->remove_pending) {
			ret = -EIO;
			goto err;
		}

		rem = purb->actual_length - s->readptr;		// set remaining bytes to copy

		if (count >= rem)
			cnt = rem;
		else
			cnt = count;

		dbg("copy_to_user:%p %p %d",buf, purb->transfer_buffer + s->readptr, cnt);

		if (copy_to_user (buf, purb->transfer_buffer + s->readptr, cnt)) {
			err("read: copy_to_user failed");
			if (!ret)
				ret = -EFAULT;
			goto err;
		}

		s->readptr += cnt;
		count -= cnt;
		buf += cnt;
		ret += cnt;

		if (s->readptr == purb->actual_length) {
			// finished, take next buffer
			if (dabusb_add_buf_tail (s, &s->free_buff_list, &s->rec_buff_list))
				err("read: dabusb_add_buf_tail failed");
			s->readptr = 0;
		}
	}
      err:			//up(&s->mutex);
	return ret;
}
Example #24
0
}
#else
/** async signal number */
static const int snd_async_signo = SIGIO;
#endif

static LIST_HEAD(snd_async_handlers);

static void snd_async_handler(int signo ATTRIBUTE_UNUSED, siginfo_t *siginfo, void *context ATTRIBUTE_UNUSED)
{
	int fd;
	struct list_head *i;
	//assert(siginfo->si_code == SI_SIGIO);
	fd = siginfo->si_fd;
	list_for_each(i, &snd_async_handlers) {
		snd_async_handler_t *h = list_entry(i, snd_async_handler_t, glist);
		if (h->fd == fd && h->callback)
			h->callback(h);
	}
}

/**
 * \brief Registers an async handler.
 * \param handler The function puts the pointer to the new async handler
 *                object at the address specified by \p handler.
 * \param fd The file descriptor to be associated with the callback.
 * \param callback The async callback function.
 * \param private_data Private data for the async callback function.
 * \result Zero if successful, otherwise a negative error code.
 *
 * This function associates the callback function with the given file,
Example #25
0
//-- Queuing  --//
void *
CommonQueueDispatch(void *queueContext)
{
    HFS_STATUS  ret;
    PHFS_QUEUE_PROCESSOR pQProcessor;
    PHFS_QUEUE_ITEM  pDeQueuedItem;
    struct list_head *pDeQueuedItemlh;

    HFS_ENTRY(QueueDispatch);
    pQProcessor = (PHFS_QUEUE_PROCESSOR)queueContext;


    if (NULL == pQProcessor || NULL == pQProcessor->process) {
        HFS_LOG_ERROR("Queue thread cannot be started invalid context passed");
        ret = HFS_INTERNAL_ERROR;
        goto leave;
    }

    HFS_LOG_INFO("Starting dispatch thread for Q %p", pQProcessor);
    while (QUEUE_STATE_STOPPED!=pQProcessor->state)
        {
            hfsSemWait(&pQProcessor->semPendingItems);
            // Ok We should now have at least one item to process //

            // Lock the queue
            ret = hfsMutexLock(&pQProcessor->queueLock);
            if (!HFS_SUCCESS(ret)) {
                HFS_LOG_ERROR("Processing thread cannot acquire mutex for q");
                ret = HFS_STATUS_LOCKING_ERROR;
                goto leave;
            }

            // Remove the element
            if (list_empty(&pQProcessor->lstAnchorPendingCmds))
                {
                    HFS_LOG_ERROR("Processing Thread Smells Fish for %p or last nudge",
                                  pQProcessor);
                    ret = hfsMutexUnlock(&pQProcessor->queueLock);
                    if (!HFS_SUCCESS(ret)) {
                        HFS_LOG_ERROR("Processing thread cannot Release mutex for q");
                        ret = HFS_STATUS_LOCKING_ERROR;
                        goto leave;
                    }
                    continue;
                }else {
                pDeQueuedItemlh = NULL;
                pDeQueuedItemlh = pQProcessor->lstAnchorPendingCmds.next;
                list_del_init(pDeQueuedItemlh);
                pDeQueuedItem = list_entry(pDeQueuedItemlh, HFS_QUEUE_ITEM, listHead);
            }

            // Unlock the queue
            ret = hfsMutexUnlock(&pQProcessor->queueLock);
            if (!HFS_SUCCESS(ret)) {
                HFS_LOG_ERROR("Processing thread cannot acquire mutex for q");
                ret = HFS_STATUS_LOCKING_ERROR;
                goto leave;
            }

            // Process the element
            ret = pQProcessor->process(pDeQueuedItem, HFS_STATUS_SUCCESS);
            if (!HFS_SUCCESS(ret)) {
                HFS_LOG_ERROR("Processing has failed on queue item %p", pDeQueuedItem);
                HFS_LOG_ERROR("Some client will face music");
            }
        }

    ret = HFS_STATUS_SUCCESS;
 leave:
    HFS_LOG_INFO("Exiting dispatch thread for Q %p", pQProcessor);
    HFS_LEAVE(QueueDispatch);
    return ((void *) ret);
}
Example #26
0
/**
 * 这里有一个竞争条件:如果这里不能建立libevent连接,或者发送HD_CMD_SS5_ACT之前就收到了 
 * EOF的事件,那么客户端就会存在一个僵尸的trans连接,客户端目前是单线程的,必须消除这种 
 * 消耗 
 *  
 * 目前想到的处理方式就是,在拆除trans的同时,额外的向客户端主通道发送一个命令 
 */
static void thread_process(int fd, short which, void *arg) 
{
    P_THREAD_OBJ p_threadobj = (P_THREAD_OBJ)arg; 
    P_TRANS_ITEM p_trans = NULL;
    P_SLIST_HEAD p_list = NULL;
    P_C_ITEM p_c_item = NULL;
    struct bufferevent *new_bev = NULL;
    char buf[1];
    CTL_HEAD head;

    if (read(fd, buf, 1) != 1)
    {
        st_d_error("Can't read from libevent pipe\n");
        return;
    }

    switch (buf[0]) 
    {
        case 'D':   // DAEMON->USR
            p_list = slist_fetch(&p_threadobj->conn_queue);
            if (!p_list)
            {
                st_d_error("无法从任务队列中获取任务!");
                return;
            }

            p_c_item = list_entry(p_list, C_ITEM, list);
            p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; 

            new_bev = 
                bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); 
            bufferevent_setcb(new_bev, thread_bufferread_cb, NULL, thread_bufferevent_cb, p_trans);
            bufferevent_enable(new_bev, EV_READ|EV_WRITE);

            p_trans->bev_d = new_bev;
            free(p_c_item);

            if (p_trans->bev_u == NULL || p_trans->usr_lport == 0 || p_trans->p_activ_item == NULL) 
            {
                SYS_ABORT("USR SIDE SHOULD BE OK ALREAY!!!");
            }

            st_d_print("WORKTHREAD-> DAEMON_USR(%d) OK!", p_trans->usr_lport); 
            st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]],任务队列:[[ %d ]]", 
                       slist_count(&p_trans->p_activ_item->trans), slist_count(&p_threadobj->conn_queue)); 

            st_d_print("激活客户端Bufferevent使能!");
            memset(&head, 0, CTL_HEAD_LEN);
            head.direct = USR_DAEMON; 
            head.cmd = HD_CMD_CONN_ACT; 
            head.extra_param = p_trans->usr_lport; 
            head.mach_uuid = p_trans->p_activ_item->mach_uuid; 
            bufferevent_write(p_trans->p_activ_item->bev_daemon, &head, CTL_HEAD_LEN); 
            head.direct = DAEMON_USR; 
            bufferevent_write(p_trans->p_activ_item->bev_usr, &head, CTL_HEAD_LEN); 

            break;

        case 'U':   //USR->DAEMON
            p_list = slist_fetch(&p_threadobj->conn_queue);
            if (!p_list)
            {
                st_d_error("无法从任务队列中获取任务!");
                return;
            }

            p_c_item = list_entry(p_list, C_ITEM, list);
            p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; 

            new_bev = 
                bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); 
            bufferevent_setcb(new_bev, thread_bufferread_cb, NULL, thread_bufferevent_cb, p_trans);
            bufferevent_enable(new_bev, EV_READ|EV_WRITE);

            p_trans->bev_u = new_bev;
            free(p_c_item);

            st_d_print("WORKTHREAD-> USR_DAEMON(%d) OK!", p_trans->usr_lport); 

            break;

        case 'S':   // DAEMON->USR
            p_list = slist_fetch(&p_threadobj->conn_queue);
            if (!p_list)
            {
                st_d_error("无法从任务队列中获取任务!");
                return;
            }
            p_c_item = list_entry(p_list, C_ITEM, list);
            p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; 

            assert(p_trans->is_enc);
            assert(p_trans->dat); 

            encrypt_ctx_init(&p_trans->ctx_enc, p_trans->usr_lport, p_trans->p_activ_item->enc_key, 1); 
            encrypt_ctx_init(&p_trans->ctx_dec, p_trans->usr_lport, p_trans->p_activ_item->enc_key, 0);

            int remote_socket = 0;
            char* buf = (char *)p_trans->dat;

            if (buf[3] == 0x01)
            {
                struct sockaddr_in sin;
                memset(&sin, 0, sizeof(sin));

                sin.sin_family = AF_INET;
                memcpy(&sin.sin_addr.s_addr, &buf[4], 4);
                memcpy(&sin.sin_port, &buf[4+4], 2);
                free(p_trans->dat);

                st_d_print("REQUEST: %s:%d", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port));

                remote_socket = ss_connect_srv(&sin);
                if (remote_socket == -1)
                {
                    free(p_c_item);
                    st_d_error("CONNECT ERROR!");
                    return;
                }
            }
            else
            {
                char remote_addr[128];
                unsigned short remote_port = 0;
                memset(remote_addr, 0, sizeof(remote_addr));
                strncpy(remote_addr, &buf[4+1], buf[4]);
                memcpy(&remote_port, &buf[4+1+buf[4]], 2);
                free(p_trans->dat);

                P_DNS_STRUCT p_dns = (P_DNS_STRUCT)calloc(sizeof(DNS_STRUCT), 1);
                if (!p_dns)
                {
                    st_d_error("申请内存失败:%d", sizeof(DNS_STRUCT));
                    free(p_c_item);
                    return;
                }

                st_d_print("REQUEST: %s:%d", remote_addr, ntohs(remote_port));

                strncpy(p_dns->hostname, remote_addr, sizeof(p_dns->hostname));
                p_dns->port = remote_port;
                p_dns->p_c_item = p_c_item;
                p_dns->p_threadobj = p_threadobj;
                p_dns->p_trans = p_trans;

                struct evutil_addrinfo hints;
                struct evdns_getaddrinfo_request *req;
                memset(&hints, 0, sizeof(hints));
                hints.ai_family = AF_INET;
                hints.ai_flags = EVUTIL_AI_CANONNAME;
                /* Unless we specify a socktype, we'll get at least two entries for
                 * each address: one for TCP and one for UDP. That's not what we
                 * want. */
                hints.ai_socktype = SOCK_STREAM;
                hints.ai_protocol = IPPROTO_TCP;


                req = evdns_getaddrinfo(
                    srvopt.evdns_base, remote_addr, NULL /* no service name given */, 
                                  &hints, dns_query_cb, p_dns);
                if (req == NULL) {
                  printf("    [request for %s returned immediately]\n", remote_addr);
                  /* No need to free user_data or decrement n_pending_requests; that
                   * happened in the callback. */
                }

                return;
            }

            evutil_make_socket_nonblocking(p_c_item->socket);
            struct bufferevent *new_bev = 
                bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); 
            assert(new_bev);
            bufferevent_setcb(new_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_trans);
            bufferevent_enable(new_bev, EV_READ|EV_WRITE);

            evutil_make_socket_nonblocking(remote_socket);
            struct bufferevent *new_ext_bev = 
                bufferevent_socket_new(p_threadobj->base, remote_socket , BEV_OPT_CLOSE_ON_FREE); 
            assert(new_ext_bev);
            bufferevent_setcb(new_ext_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_trans);
            bufferevent_enable(new_ext_bev, EV_READ|EV_WRITE);

            p_trans->bev_d = new_bev;
            p_trans->bev_u = new_ext_bev;
            free(p_c_item);

            st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]], 任务队列:[[ %d ]]", 
                       slist_count(&p_trans->p_activ_item->trans), slist_count(&p_threadobj->conn_queue)); 

            st_d_print("SS5激活客户端Bufferevent使能!");
            memset(&head, 0, CTL_HEAD_LEN);
            head.direct = USR_DAEMON; 
            head.cmd = HD_CMD_SS5_ACT; 
            head.extra_param = p_trans->usr_lport; 
            head.mach_uuid = p_trans->p_activ_item->mach_uuid; 
            bufferevent_write(p_trans->p_activ_item->bev_daemon, &head, CTL_HEAD_LEN); 
            break;

    default:
        SYS_ABORT("WHAT DO I GET: %c", buf[0]);
        break;
    }

    return;
}
Example #27
0
static int print_request_stats (threadinfo_t *ti, char *page, unsigned int skip_count, unsigned int max_count)
{
	struct list_head *head, *curr;
	tux_req_t *req;
	unsigned int count = 0, size, line_off, len;
	char stat_line [LINE_SIZE];

	if (!max_count)
		BUG();

	head = &ti->all_requests;
	curr = head->next;

	while (curr != head) {
		req = list_entry(curr, tux_req_t, all);
		curr = curr->next;
		count++;
		if (count <= skip_count)
			continue;
		line_off = 0;
#define SP(x...) \
	line_off += sprintf(stat_line + line_off, x)

		if (req->proto == &tux_proto_http)
			SP("0 ");
		else
			SP("1 ");
	
		SP("%p ", req);
		SP("%d ", req->atom_idx);
		if (req->atom_idx >= 1)
			SP("%p ", req->atoms[0]);
		else
			SP("........ ");
		if (req->atom_idx >= 2)
			SP("%p ", req->atoms[1]);
		else
			SP("........ ");
		if (!list_empty(&req->work))	SP("W");	else SP(".");
		if (!list_empty(&req->free))	SP("F");	else SP(".");
		if (!list_empty(&req->lru))	SP("L");	else SP(".");
		if (req->keep_alive)		SP("K");	else SP(".");
		if (req->idle_input)		SP("I");	else SP(".");
		if (timer_pending(&req->keepalive_timer))
						SP("T(%lu/%lu)",jiffies,req->keepalive_timer.expires);	else SP(".");
		if (req->wait_output_space)	SP("O");	else SP(".");
		if (timer_pending(&req->output_timer))
						SP("T");	else SP(".");
		SP(" %d ", req->error);
		SP(" %d ", req->status);

#define SP_HOST(ip,port) \
		SP("%d.%d.%d.%d:%d ",NIPQUAD(ip),port)

		if (req->sock) {
			if (req->sock->sk)
				SP("%d:", req->sock->sk->state);
			else
				SP("-2:");
		} else
			SP("-1:");
		SP_HOST(req->client_addr, req->client_port);

		SP("%Ld ", req->total_file_len);
		SP("%Ld ", req->in_file.f_pos);
		if (req->proto == &tux_proto_http) {
			SP("%d ", req->method);
			SP("%d ", req->version);
		}
		if (req->proto == &tux_proto_ftp) {
			SP("%d ", req->ftp_command);
			if (req->data_sock) {
				if (req->data_sock->sk)
					SP("%d:",req->data_sock->sk->state);
				else
					SP("-2:");
				if (req->data_sock->sk)
					SP_HOST(req->data_sock->sk->daddr,
						req->data_sock->sk->dport);
				else
					SP("-1:-1 ");
			} else
				SP("-1 ");
		}
		SP("%p/%p %p/%p ", req->sock, req->sock ? req->sock->sk : (void *)-1, req->data_sock, req->data_sock ? req->data_sock->sk : (void *)-1);

		SP("%d\n", req->parsed_len);
		len = req->headers_len;
		if (len > 500)
			len = 500;
		SP("\n%d\n", len);
		memcpy(stat_line + line_off, req->headers, len);
		line_off += len;
		len = req->objectname_len;
		if (len > 100)
			len = 100;
		SP("\n%d\n", len);
		memcpy(stat_line + line_off, req->objectname, len);
		line_off += len;
		SP("\n\n<END>");
		if (line_off >= LINE_SIZE)
			BUG();
		Dprintk("printing req %p, count %d, page %p: {%s}.\n", req, count, page, stat_line);
		size = sprintf(page, "%-*s\n", LINE_SIZE-1, stat_line);
		if (size != LINE_SIZE)
			BUG();
		page += LINE_SIZE;
		if (count-skip_count >= max_count)
			break;
	}

	Dprintk("count: %d.\n", count-skip_count);
	return count - skip_count;
}
Example #28
0
/**
 * Take down the DRM device.
 *
 * \param dev DRM device structure.
 *
 * Frees every resource in \p dev.
 *
 * \sa drm_device
 */
int drm_lastclose(drm_device_t * dev)
{
	drm_magic_entry_t *pt, *next;
	drm_map_list_t *r_list;
	drm_vma_entry_t *vma, *vma_next;
	int i;

	DRM_DEBUG("\n");

	if (dev->driver->lastclose)
		dev->driver->lastclose(dev);
	DRM_DEBUG("driver lastclose completed\n");

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique=NULL;
		dev->unique_len=0;
	}

	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	down(&dev->struct_sem);
	del_timer(&dev->timer);

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique = NULL;
		dev->unique_len = 0;
	}

	/* Clear pid list */
	for (i = 0; i < DRM_HASH_SIZE; i++) {
		for (pt = dev->magiclist[i].head; pt; pt = next) {
			next = pt->next;
			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
		}
		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
	}

	/* Clear AGP information */
	if (drm_core_has_AGP(dev) && dev->agp) {
		drm_agp_mem_t *entry;
		drm_agp_mem_t *nexte;

		/* Remove AGP resources, but leave dev->agp
		   intact until drv_cleanup is called. */
		for (entry = dev->agp->memory; entry; entry = nexte) {
			nexte = entry->next;
			if (entry->bound)
				drm_unbind_agp(entry->memory);
			drm_free_agp(entry->memory, entry->pages);
			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
		}
		dev->agp->memory = NULL;

		if (dev->agp->acquired)
			drm_agp_release(dev);

		dev->agp->acquired = 0;
		dev->agp->enabled = 0;
	}
	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
		drm_sg_cleanup(dev->sg);
		dev->sg = NULL;
	}

	/* Clear vma list (only built for debugging) */
	if (dev->vmalist) {
		for (vma = dev->vmalist; vma; vma = vma_next) {
			vma_next = vma->next;
			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
		}
		dev->vmalist = NULL;
	}

	if (dev->maplist) {
		while (!list_empty(&dev->maplist->head)) {
			struct list_head *list = dev->maplist->head.next;
			r_list = list_entry(list, drm_map_list_t, head);
			drm_rmmap_locked(dev, r_list->map);
		}
	}

	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
		for (i = 0; i < dev->queue_count; i++) {

			if (dev->queuelist[i]) {
				drm_free(dev->queuelist[i],
					 sizeof(*dev->queuelist[0]),
					 DRM_MEM_QUEUES);
				dev->queuelist[i] = NULL;
			}
		}
		drm_free(dev->queuelist,
			 dev->queue_slots * sizeof(*dev->queuelist),
			 DRM_MEM_QUEUES);
		dev->queuelist = NULL;
	}
	dev->queue_count = 0;

	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
		drm_dma_takedown(dev);

	if (dev->lock.hw_lock) {
		dev->sigdata.lock = dev->lock.hw_lock = NULL;	/* SHM removed */
		dev->lock.filp = NULL;
		wake_up_interruptible(&dev->lock.lock_queue);
	}
	up(&dev->struct_sem);

	DRM_DEBUG("lastclose completed\n");
	return 0;
}
Example #29
0
/**
   @fn Int32 KPDP_GetRsp()
*/
Int32 KPDP_GetRsp(struct file *filp, UInt32 cmd, UInt32 arg)
{
    Int32 rv = 0;
    KPDP_Param_t *priv = filp->private_data;
    KPDP_Response_t rsp;
    struct list_head *entry;
    KPDP_ResultQueue_t *buf_handle = NULL;
    UInt32    flags;

    if (copy_from_user(&rsp, (KPDP_Response_t*)arg, sizeof(rsp)))
    {
        KPDP_DEBUG(DBG_INFO, "KPDP_GetRsp() error in copy_from_user() \n");
        rv = -EFAULT;
        return rv;
    }

    /* We claim a mutex because we don't want two
       users getting something from the queue at a time.
       Since we have to release the spinlock before we can
       copy the data to the user, it's possible another
       user will grab something from the queue, too.  Then
       the messages might get out of order if something
       fails and the message gets put back onto the
       queue.  This mutex prevents that problem. */
    mutex_lock(&priv->recv_mutex);

    /* Grab the message off the list. */
    spin_lock_irqsave(&(priv->recv_lock), flags);
    if (list_empty(&(gKpdpResultQueue.list)))
    {
        spin_unlock_irqrestore(&(priv->recv_lock), flags);
        rv = -EAGAIN;
        KPDP_DEBUG(DBG_ERROR, "ERROR: KPDP Result List is empty %p\n", &(gKpdpResultQueue.list));
        goto recv_err;
    }
    entry = gKpdpResultQueue.list.next;
    buf_handle = list_entry(entry, KPDP_ResultQueue_t, list);
    list_del(entry);
    spin_unlock_irqrestore(&(priv->recv_lock), flags);

    if ((NULL == buf_handle->result_info.data) || (0 == buf_handle->result_info.datalen))
    {
        if ((NULL == buf_handle->result_info.data) != (0 == buf_handle->result_info.datalen))
        {
            KPDP_DEBUG(DBG_ERROR, "ERROR: KPDP Result data is Wrong %p, len %d\n", buf_handle->result_info.data, buf_handle->result_info.datalen);
            //Must enter data abort here
            goto recv_putback_on_err;
        }
    }

    rsp.result = buf_handle->result_info.result;
    rsp.CmdID = buf_handle->result_info.CmdID;
    rsp.datalen = buf_handle->result_info.datalen;

    if (0 != buf_handle->result_info.datalen)
    {
        if (copy_to_user(rsp.data, buf_handle->result_info.data, buf_handle->result_info.datalen))
        {
            rv = -EFAULT;
            KPDP_DEBUG(DBG_ERROR, "ERROR: KPDP copy response dara to user Fail\n");
            goto recv_putback_on_err;
        }
    }


    if (copy_to_user(arg, &rsp, sizeof(KPDP_Response_t)))
    {
        rv = -EFAULT;
        KPDP_DEBUG(DBG_ERROR, "ERROR: KPDP copy response infor to user Fail\n");
        goto recv_putback_on_err;
    }
    if (0 != buf_handle->result_info.datalen && buf_handle->result_info.data )
    {
        kfree( buf_handle->result_info.data );
    }
    kfree(buf_handle);
    buf_handle = NULL;

    if (false == list_empty(&(gKpdpResultQueue.list))) //not empty
    {
        KPDP_DEBUG(DBG_INFO, "rsp continue read list:%p, next:%p\n", &(gKpdpResultQueue.list), gKpdpResultQueue.list.next);
        rv = RESULT_NOT_EMPTY;
    }
    mutex_unlock(&priv->recv_mutex);
    return rv;

recv_putback_on_err:
    /* If we got an error, put the message back onto
       the head of the queue. */
    //KPDP_DEBUG(DBG_INFO, "recv_putback_on_err handle s_addr:%p, d_addr:%p\n", entry, &(gKpdpResultQueue.list));
    spin_lock_irqsave(&(priv->recv_lock), flags);
    list_add(entry, &(gKpdpResultQueue.list));
    spin_unlock_irqrestore(&(priv->recv_lock), flags);
    mutex_unlock(&priv->recv_mutex);
    return rv;
 
recv_err:
    mutex_unlock(&priv->recv_mutex);
    return rv;
}
Example #30
0
RAW_U16 semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all)
{
	LIST *block_list_head;
	
	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &semaphore_ptr->common_block_obj.block_list;
	
	/*if no block task on this list just return*/
	if (is_list_empty(block_list_head)) {        
	    
		if (semaphore_ptr->count == RAW_SEMAPHORE_COUNT) {

			RAW_CRITICAL_EXIT();
			TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr);
			return RAW_SEMAPHORE_OVERFLOW;

		}
		/*increase resource*/
		semaphore_ptr->count++;                                      
	    
		RAW_CRITICAL_EXIT();
		
		/*if semphore is registered with notify function just call it*/		
		if (semaphore_ptr->semphore_send_notify) {

			semaphore_ptr->semphore_send_notify(semaphore_ptr);	
		}

		TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr);
		return RAW_SUCCESS;
	}

	/*wake all the task blocked on this semphore*/
	if (opt_wake_all) {

		while (!is_list_empty(block_list_head)) {
			
			raw_wake_object(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

			
			TRACE_SEM_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
			
		}

	}

	else {
		
		/*Wake up the highest priority task block on the semaphore*/
		raw_wake_object(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

		TRACE_SEM_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
		
	}
	
	RAW_CRITICAL_EXIT();

	raw_sched();    

	return RAW_SUCCESS;



}