Ejemplo n.º 1
0
static int mcp_sa11x0_remove(struct platform_device *pdev)
{
	struct mcp *mcp = platform_get_drvdata(pdev);
	struct mcp_sa11x0 *priv = priv(mcp);
	struct resource *res_mem;
	u32 size;

	platform_set_drvdata(pdev, NULL);
	mcp_host_unregister(mcp);

	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_mem) {
		size = res_mem->end - res_mem->start + 1;
		release_mem_region(res_mem->start, size);
	}
	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res_mem) {
		size = res_mem->end - res_mem->start + 1;
		release_mem_region(res_mem->start, size);
	}
	iounmap(priv->mccr0_base);
	iounmap(priv->mccr1_base);
	return 0;
}
Ejemplo n.º 2
0
bool ValidateECDSA(const byte *input, const size_t inputLength, const int secLevelIndex)
{
	string description = generateDetailedDescription("ECDSA", securityLevels[secLevelIndex], 1, inputLength);

	// from Sample Test Vectors for P1363
	GF2NT gf2n(191, 9, 0);
	byte a[]="\x28\x66\x53\x7B\x67\x67\x52\x63\x6A\x68\xF5\x65\x54\xE1\x26\x40\x27\x6B\x64\x9E\xF7\x52\x62\x67";
	byte b[]="\x2E\x45\xEF\x57\x1F\x00\x78\x6F\x67\xB0\x08\x1B\x94\x95\xA3\xD9\x54\x62\xF5\xDE\x0A\xA1\x85\xEC";
	EC2N ec(gf2n, PolynomialMod2(a,24), PolynomialMod2(b,24));

	EC2N::Point P;
	ec.DecodePoint(P, (byte *)"\x04\x36\xB3\xDA\xF8\xA2\x32\x06\xF9\xC4\xF2\x99\xD7\xB2\x1A\x9C\x36\x91\x37\xF2\xC8\x4A\xE1\xAA\x0D"
		"\x76\x5B\xE7\x34\x33\xB3\xF9\x5E\x33\x29\x32\xE7\x0E\xA2\x45\xCA\x24\x18\xEA\x0E\xF9\x80\x18\xFB", ec.EncodedPointSize());
	Integer n("40000000000000000000000004a20e90c39067c893bbb9a5H");
	Integer d("340562e1dda332f9d2aec168249b5696ee39d0ed4d03760fH");
	EC2N::Point Q(ec.Multiply(d, P));
	ECDSA<EC2N, SHA>::Signer priv(ec, P, n, d);
	ECDSA<EC2N, SHA>::Verifier pub(priv);

	bool pass = ProfileSignatureValidate(priv, pub, input, inputLength, description);
	assert(pass);

	return pass;
}
Ejemplo n.º 3
0
void
O2Profile::
SetRSAKey(const byte *priv, size_t privlen, const byte *pub, size_t publen)
{
	bool valid = false;

	if (priv && privlen && pub && publen) {
		if (privlen == RSA_PRIVKEY_SIZE && publen == RSA_PUBKEY_SIZE) {
			PrivKey.assign(priv, privlen);
			PubKey.assign(pub, publen);
			valid = true;
		}
	}

	if (!valid) {
		GUID guid;
		CoCreateGuid(&guid);

		CryptoPP::RandomPool randpool;
		randpool.Put((byte*)&guid, sizeof(GUID));

		byte tmpPriv[RSA_PRIVKEY_SIZE];
		CryptoPP::RSAES_OAEP_SHA_Decryptor priv(randpool, RSA_KEYLENGTH);
		CryptoPP::ArraySink privArray(tmpPriv, RSA_PRIVKEY_SIZE);
		priv.DEREncode(privArray);
		privArray.MessageEnd();
		PrivKey.assign(tmpPriv, RSA_PRIVKEY_SIZE);

		byte tmpPub[RSA_PUBKEY_SIZE];
		CryptoPP::RSAES_OAEP_SHA_Encryptor pub(priv);
		CryptoPP::ArraySink pubArray(tmpPub, RSA_PUBKEY_SIZE);
		pub.DEREncode(pubArray);
		pubArray.MessageEnd();
		PubKey.assign(tmpPub, RSA_PUBKEY_SIZE);
	}
}
Ejemplo n.º 4
0
/*===========================================================================*
 *			      do_endksig				     *
 *===========================================================================*/
int do_endksig(struct proc * caller, message * m_ptr)
{
/* Finish up after a kernel type signal, caused by a SYS_KILL message or a 
 * call to cause_sig by a task. This is called by a signal manager after
 * processing a signal it got with SYS_GETKSIG.
 */
  register struct proc *rp;
  int proc_nr;

  /* Get process pointer and verify that it had signals pending. If the 
   * process is already dead its flags will be reset. 
   */
  if(!isokendpt(m_ptr->m_sigcalls.endpt, &proc_nr))
	return EINVAL;

  rp = proc_addr(proc_nr);
  if (caller->p_endpoint != priv(rp)->s_sig_mgr) return(EPERM);
  if (!RTS_ISSET(rp, RTS_SIG_PENDING)) return(EINVAL);

  /* The signal manager has finished one kernel signal. Is the process ready? */
  if (!RTS_ISSET(rp, RTS_SIGNALED)) 		/* new signal arrived */
	RTS_UNSET(rp, RTS_SIG_PENDING);	/* remove pending flag */
  return(OK);
}
Ejemplo n.º 5
0
static void profile_sample(struct proc * p, void * pc)
{
/* This executes on every tick of the CMOS timer. */

  /* Are we profiling, and profiling memory not full? */
  if (!sprofiling || sprof_info.mem_used == -1)
	  return;

  /* Check if enough memory available before writing sample. */
  if (sprof_info.mem_used + sizeof(sprof_info) +
		  2*sizeof(struct sprof_sample) +
		  2*sizeof(struct sprof_sample) > sprof_mem_size) {
	sprof_info.mem_used = -1;
	return;
  }

  /* Runnable system process? */
  if (p->p_endpoint == IDLE)
	sprof_info.idle_samples++;
  else if (p->p_endpoint == KERNEL ||
		(priv(p)->s_flags & SYS_PROC && proc_is_runnable(p))) {

	if (!(p->p_misc_flags & MF_SPROF_SEEN)) {
		p->p_misc_flags |= MF_SPROF_SEEN;
		sprof_save_proc(p);
	}

	sprof_save_sample(p, pc);
	sprof_info.system_samples++;
  } else {
	/* User process. */
	sprof_info.user_samples++;
  }
  
  sprof_info.total_samples++;
}
Ejemplo n.º 6
0
/*
 * The boot processos timer interrupt handler. In addition to non-boot cpus it
 * keeps real time and notifies the clock task if need be
 */
PUBLIC int timer_int_handler(void)
{
	/* Update user and system accounting times. Charge the current process
	 * for user time. If the current process is not billable, that is, if a
	 * non-user process is running, charge the billable process for system
	 * time as well.  Thus the unbillable process' user time is the billable
	 * user's system time.
	 */

	struct proc * p, * billp;

	/* FIXME watchdog for slave cpus! */
#ifdef CONFIG_WATCHDOG
	/*
	 * we need to know whether local timer ticks are happening or whether
	 * the kernel is locked up. We don't care about overflows as we only
	 * need to know that it's still ticking or not
	 */
	watchdog_local_timer_ticks++;
#endif

	if (cpu_is_bsp(cpuid))
		realtime++;

	/* Update user and system accounting times. Charge the current process
	 * for user time. If the current process is not billable, that is, if a
	 * non-user process is running, charge the billable process for system
	 * time as well.  Thus the unbillable process' user time is the billable
	 * user's system time.
	 */

	p = get_cpulocal_var(proc_ptr);
	billp = get_cpulocal_var(bill_ptr);

	p->p_user_time++;

	if (! (priv(p)->s_flags & BILLABLE)) {
		billp->p_sys_time++;
	}

	/* Decrement virtual timers, if applicable. We decrement both the
	 * virtual and the profile timer of the current process, and if the
	 * current process is not billable, the timer of the billed process as
	 * well.  If any of the timers expire, do_clocktick() will send out
	 * signals.
	 */
	if ((p->p_misc_flags & MF_VIRT_TIMER)){
		p->p_virt_left--;
	}
	if ((p->p_misc_flags & MF_PROF_TIMER)){
		p->p_prof_left--;
	}
	if (! (priv(p)->s_flags & BILLABLE) &&
			(billp->p_misc_flags & MF_PROF_TIMER)){
		billp->p_prof_left--;
	}

	/*
	 * Check if a process-virtual timer expired. Check current process, but
	 * also bill_ptr - one process's user time is another's system time, and
	 * the profile timer decreases for both!
	 */
	vtimer_check(p);

	if (p != billp)
		vtimer_check(billp);

	/* Update load average. */
	load_update();

	if (cpu_is_bsp(cpuid)) {
		/* if a timer expired, notify the clock task */
		if ((next_timeout <= realtime)) {
			tmrs_exptimers(&clock_timers, realtime, NULL);
			next_timeout = (clock_timers == NULL) ?
				TMR_NEVER : clock_timers->tmr_exp_time;
		}

		if (do_serial_debug)
			do_ser_debug();
	}

	return(1);					/* reenable interrupts */
}
Ejemplo n.º 7
0
/*
 * Transmit a packet
 */
static int
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
	unsigned int ptr, next_ptr;

	if (priv(dev)->broken) {
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		netif_start_queue(dev);
		return NETDEV_TX_OK;
	}

	length = (length + 1) & ~1;
	if (length != skb->len) {
		if (skb_padto(skb, length))
			goto out;
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;

	local_irq_save(flags);

	if (priv(dev)->tx_tail == next_ptr) {
		local_irq_restore(flags);
		return NETDEV_TX_BUSY;	/* unable to queue */
	}

	ptr		 = 0x600 * priv(dev)->tx_head;
	priv(dev)->tx_head = next_ptr;
	next_ptr	*= 0x600;

#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)

	ether3_setbuffer(dev, buffer_write, next_ptr);
	ether3_writelong(dev, 0);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writelong(dev, 0);
	ether3_writebuffer(dev, skb->data, length);
	ether3_writeword(dev, htons(next_ptr));
	ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writeword(dev, htons((ptr + length + 4)));
	ether3_writeword(dev, TXHDR_FLAGS >> 16);
	ether3_ledon(dev);

	if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
		ether3_outw(ptr, REG_TRANSMITPTR);
		ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;
	local_irq_restore(flags);

	dev_kfree_skb(skb);

	if (priv(dev)->tx_tail == next_ptr)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Ejemplo n.º 8
0
static int __devinit ether3_init_2(struct net_device *dev)
{
	int i;

	priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8;
	priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC;
	priv(dev)->regs.command = 0;

	/*
	 * Set up our hardware address
	 */
	ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
	for (i = 0; i < 6; i++)
		ether3_outb(dev->dev_addr[i], REG_BUFWIN);

	if (dev->flags & IFF_PROMISC)
		priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
	else if (dev->flags & IFF_MULTICAST)
		priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
	else
		priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;

	/*
	 * There is a problem with the NQ8005 in that it occasionally loses the
	 * last two bytes.  To get round this problem, we receive the CRC as
	 * well.  That way, if we do lose the last two, then it doesn't matter.
	 */
	ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
	ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
	ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
	ether3_outw(0, REG_TRANSMITPTR);
	ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
	ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
	ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
	ether3_outw(priv(dev)->regs.command, REG_COMMAND);

	i = ether3_ramtest(dev, 0x5A);
	if(i)
		return i;
	i = ether3_ramtest(dev, 0x1E);
	if(i)
		return i;

	ether3_setbuffer(dev, buffer_write, 0);
	ether3_writelong(dev, 0);
	return 0;
}
/***
 * Starts up a SimpleGraspControlServer.
 * Launch this node with the following launch file (or include it in another launch file):
 *
 * `` \`rospack find grasp_execution\`/launch/simple_grasp_control_server.launch `` 
 *
 * Please also refer to this file (and the SimpleGraspControlServer header documentation)
 * for more details about the required parameters.
 *
 * \author Jennifer Buehler
 * \date March 2016
 */
int main(int argc, char**argv){
	ros::init(argc, argv, "simple_grasp_action");

	ros::NodeHandle priv("~");
	ros::NodeHandle pub;

	std::string JOINT_STATES_TOPIC="/joint_states";
	priv.param<std::string>("joint_states_topic", JOINT_STATES_TOPIC, JOINT_STATES_TOPIC);
	
    std::string JOINT_CONTROL_TOPIC="/joint_control";
	priv.param<std::string>("joint_control_topic", JOINT_CONTROL_TOPIC, JOINT_CONTROL_TOPIC);

	std::string GRASP_ACTION_TOPIC="/grasp_control_action";
	priv.param<std::string>("grasp_control_action_topic", GRASP_ACTION_TOPIC, GRASP_ACTION_TOPIC);
	
    std::string ROBOT_NAMESPACE;
	if (!priv.hasParam("robot_namespace"))
    {
        ROS_ERROR_STREAM(ros::this_node::getName()<<": Must have at least 'robot_namespace' defined in private node namespace");
        return 0;
    }
	priv.param<std::string>("robot_namespace", ROBOT_NAMESPACE, ROBOT_NAMESPACE);

	double CHECK_FINGER_STATE_RATE=DEFAULT_CHECK_FINGER_STATE_RATE;
	priv.param<double>("check_movement_rate", CHECK_FINGER_STATE_RATE, CHECK_FINGER_STATE_RATE);
	
	double NO_MOVE_TOLERANCE=DEFAULT_NO_MOVE_TOLERANCE;
	priv.param<double>("no_move_tolerance", NO_MOVE_TOLERANCE, NO_MOVE_TOLERANCE);
	
    int NO_MOVE_STILL_CNT=DEFAULT_NO_MOVE_STILL_CNT;
	priv.param<int>("no_move_still_cnt", NO_MOVE_STILL_CNT, NO_MOVE_STILL_CNT);

	double GOAL_TOLERANCE=DEFAULT_GOAL_TOLERANCE;
	priv.param<double>("goal_tolerance", GOAL_TOLERANCE, GOAL_TOLERANCE);

    ROS_INFO("Launching arm components name manager");
    arm_components_name_manager::ArmComponentsNameManager jointsManager(ROBOT_NAMESPACE, false);
    float maxWait=5;
    ROS_INFO("Waiting for joint info parameters to be loaded...");
    if (!jointsManager.waitToLoadParameters(1,maxWait,1))
    {
        ROS_ERROR("Joint names (ArmComponentsNameManager) could not be launched due to missing ROS parameters.");
        return 0;
    }

	grasp_execution::SimpleGraspControlServer actionServer(
        pub,
        GRASP_ACTION_TOPIC,
        JOINT_STATES_TOPIC, 
		JOINT_CONTROL_TOPIC,
        jointsManager,
        GOAL_TOLERANCE,
        NO_MOVE_TOLERANCE,
        NO_MOVE_STILL_CNT,
        CHECK_FINGER_STATE_RATE);

	actionServer.init();

    // ros::MultiThreadedSpinner spinner(4); // Use 4 threads
    // spinner.spin(); // spin() will not return until the node has been shutdown
	ros::spin();
    return 0;
}
/* tasklet for iwlagn interrupt */
void iwl_irq_tasklet(struct iwl_trans *trans)
{
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_mask;
#endif

	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;


	spin_lock_irqsave(&trans->shrd->lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
	iwl_write32(bus(trans), CSR_INT,
		trans_pcie->inta | ~trans_pcie->inta_mask);

	inta = trans_pcie->inta;

#ifdef CONFIG_IWLWIFI_DEBUG
	if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
		/* just for debug */
		inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
				inta, inta_mask);
	}
#endif

	spin_unlock_irqrestore(&trans->shrd->lock, flags);

	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");

		/* Tell the device to stop sending interrupts */
		iwl_disable_interrupts(trans);

		isr_stats->hw++;
		iwl_irq_handle_error(trans);

		handled |= CSR_INT_BIT_HW_ERR;

		return;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
	if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
			IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
				      "the frame/frames.\n");
			isr_stats->sch++;
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
			isr_stats->alive++;
		}
	}
#endif
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
		int hw_rf_kill = 0;
		if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
			hw_rf_kill = 1;

		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
				hw_rf_kill ? "disable radio" : "enable radio");

		isr_stats->rfkill++;

		/* driver only loads ucode once setting the interface up.
		 * the driver allows loading the ucode even if the radio
		 * is killed. Hence update the killswitch state here. The
		 * rfkill handler will care about restarting if needed.
		 */
		if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
			if (hw_rf_kill)
				set_bit(STATUS_RF_KILL_HW,
					&trans->shrd->status);
			else
				clear_bit(STATUS_RF_KILL_HW,
					  &trans->shrd->status);
			iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill);
		}

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
		isr_stats->ctkill++;
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
		IWL_ERR(trans, "Microcode SW error detected. "
			" Restarting 0x%X.\n", inta);
		isr_stats->sw++;
		iwl_irq_handle_error(trans);
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
		for (i = 0; i < hw_params(trans).max_txq_num; i++)
			iwl_txq_update_write_ptr(trans,
						 &trans_pcie->txq[i]);

		isr_stats->wakeup++;

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
			CSR_INT_BIT_RX_PERIODIC)) {
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
			iwl_write32(bus(trans), CSR_FH_INT_STATUS,
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
			iwl_write32(bus(trans),
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
		iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
			    CSR_INT_PERIODIC_DIS);
		iwl_rx_handle(trans);

		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
			iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
				    CSR_INT_PERIODIC_ENA);

		isr_stats->rx++;
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
		iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
		isr_stats->tx++;
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
		trans->ucode_write_complete = 1;
		wake_up(&trans->shrd->wait_command_queue);
	}

	if (inta & ~handled) {
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
		isr_stats->unhandled++;
	}

	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
	if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
		iwl_enable_interrupts(trans);
	/* Re-enable RF_KILL if it occurred */
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(priv(trans));
}
/**
 * iwl_print_event_log - Dump error event log to syslog
 *
 */
static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
			       u32 num_events, u32 mode,
			       int pos, char **buf, size_t bufsz)
{
	u32 i;
	u32 base;       /* SRAM byte address of event log header */
	u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
	u32 ptr;        /* SRAM byte address of log data */
	u32 ev, time, data; /* event log data */
	unsigned long reg_flags;
	struct iwl_priv *priv = priv(trans);

	if (num_events == 0)
		return pos;

	base = priv->device_pointers.log_event_table;
	if (priv->ucode_type == IWL_UCODE_INIT) {
		if (!base)
			base = priv->init_evtlog_ptr;
	} else {
		if (!base)
			base = priv->inst_evtlog_ptr;
	}

	if (mode == 0)
		event_size = 2 * sizeof(u32);
	else
		event_size = 3 * sizeof(u32);

	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);

	/* Make sure device is powered up for SRAM reads */
	spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
	iwl_grab_nic_access(bus(trans));

	/* Set starting address; reads will auto-increment */
	iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
	rmb();

	/* "time" is actually "data" for mode 0 (no timestamp).
	* place event id # at far right for easier visual parsing. */
	for (i = 0; i < num_events; i++) {
		ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
		time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
		if (mode == 0) {
			/* data, ev */
			if (bufsz) {
				pos += scnprintf(*buf + pos, bufsz - pos,
						"EVT_LOG:0x%08x:%04u\n",
						time, ev);
			} else {
				trace_iwlwifi_dev_ucode_event(priv, 0,
					time, ev);
				IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
					time, ev);
			}
		} else {
			data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
			if (bufsz) {
				pos += scnprintf(*buf + pos, bufsz - pos,
						"EVT_LOGT:%010u:0x%08x:%04u\n",
						 time, data, ev);
			} else {
				IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
					time, data, ev);
				trace_iwlwifi_dev_ucode_event(priv, time,
					data, ev);
			}
		}
	}

	/* Allow device to power down */
	iwl_release_nic_access(bus(trans));
	spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
	return pos;
}
/*===========================================================================*
 *				do_irqctl				     *
 *===========================================================================*/
int do_irqctl(struct proc * caller, message * m_ptr)
{
  /* Dismember the request message. */
  int irq_vec;
  int irq_hook_id;
  int notify_id;
  int r = OK;
  int i;
  irq_hook_t *hook_ptr;
  struct priv *privp;

  /* Hook identifiers start at 1 and end at NR_IRQ_HOOKS. */
  irq_hook_id = m_ptr->m_lsys_krn_sys_irqctl.hook_id - 1;
  irq_vec = m_ptr->m_lsys_krn_sys_irqctl.vector;

  /* See what is requested and take needed actions. */
  switch(m_ptr->m_lsys_krn_sys_irqctl.request) {

  /* Enable or disable IRQs. This is straightforward. */
  case IRQ_ENABLE:           
  case IRQ_DISABLE: 
      if (irq_hook_id >= NR_IRQ_HOOKS || irq_hook_id < 0 ||
          irq_hooks[irq_hook_id].proc_nr_e == NONE) return(EINVAL);
      if (irq_hooks[irq_hook_id].proc_nr_e != caller->p_endpoint) return(EPERM);
      if (m_ptr->m_lsys_krn_sys_irqctl.request == IRQ_ENABLE) {
          enable_irq(&irq_hooks[irq_hook_id]);	
      }
      else 
          disable_irq(&irq_hooks[irq_hook_id]);	
      break;

  /* Control IRQ policies. Set a policy and needed details in the IRQ table.
   * This policy is used by a generic function to handle hardware interrupts. 
   */
  case IRQ_SETPOLICY:  

      /* Check if IRQ line is acceptable. */
      if (irq_vec < 0 || irq_vec >= NR_IRQ_VECTORS) return(EINVAL);

      privp= priv(caller);
      if (!privp)
      {
	printf("do_irqctl: no priv structure!\n");
	return EPERM;
      }
      if (privp->s_flags & CHECK_IRQ)
      {
	for (i= 0; i<privp->s_nr_irq; i++)
	{
		if (irq_vec == privp->s_irq_tab[i])
			break;
	}
	if (i >= privp->s_nr_irq)
	{
		printf(
		"do_irqctl: IRQ check failed for proc %d, IRQ %d\n",
			caller->p_endpoint, irq_vec);
		return EPERM;
	}
    }

      /* When setting a policy, the caller must provide an identifier that
       * is returned on the notification message if a interrupt occurs.
       */
      notify_id = m_ptr->m_lsys_krn_sys_irqctl.hook_id;
      if (notify_id > CHAR_BIT * sizeof(irq_id_t) - 1) return(EINVAL);

      /* Try to find an existing mapping to override. */
      hook_ptr = NULL;
      for (i=0; !hook_ptr && i<NR_IRQ_HOOKS; i++) {
          if (irq_hooks[i].proc_nr_e == caller->p_endpoint
              && irq_hooks[i].notify_id == notify_id) {
              irq_hook_id = i;
              hook_ptr = &irq_hooks[irq_hook_id];	/* existing hook */
              rm_irq_handler(&irq_hooks[irq_hook_id]);
          }
      }

      /* If there is nothing to override, find a free hook for this mapping. */
      for (i=0; !hook_ptr && i<NR_IRQ_HOOKS; i++) {
          if (irq_hooks[i].proc_nr_e == NONE) {
              irq_hook_id = i;
              hook_ptr = &irq_hooks[irq_hook_id];	/* free hook */
          }
      }
      if (hook_ptr == NULL) return(ENOSPC);

      /* Install the handler. */
      hook_ptr->proc_nr_e = caller->p_endpoint;	/* process to notify */
      hook_ptr->notify_id = notify_id;		/* identifier to pass */   	
      hook_ptr->policy = m_ptr->m_lsys_krn_sys_irqctl.policy;	/* policy for interrupts */
      put_irq_handler(hook_ptr, irq_vec, generic_handler);
      DEBUGBASIC(("IRQ %d handler registered by %s / %d\n",
			      irq_vec, caller->p_name, caller->p_endpoint));

      /* Return index of the IRQ hook in use. */
      m_ptr->m_krn_lsys_sys_irqctl.hook_id = irq_hook_id + 1;
      break;

  case IRQ_RMPOLICY:
      if (irq_hook_id < 0 || irq_hook_id >= NR_IRQ_HOOKS ||
               irq_hooks[irq_hook_id].proc_nr_e == NONE) {
           return(EINVAL);
      } else if (caller->p_endpoint != irq_hooks[irq_hook_id].proc_nr_e) {
           return(EPERM);
      }
      /* Remove the handler and return. */
      rm_irq_handler(&irq_hooks[irq_hook_id]);
      irq_hooks[irq_hook_id].proc_nr_e = NONE;
      break;

  default:
      r = EINVAL;				/* invalid IRQ REQUEST */
  }
  return(r);
}
Ejemplo n.º 13
0
/*===========================================================================*
 *			kmain 	                             		*
 *===========================================================================*/
void kmain(kinfo_t *local_cbi)
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register int i, j;

  /* save a global copy of the boot parameters */
  memcpy(&kinfo, local_cbi, sizeof(kinfo));
  memcpy(&kmess, kinfo.kmess, sizeof(kmess));

#ifdef __arm__
  /* We want to initialize serial before we do any output */
  omap3_ser_init();
#endif
  /* We can talk now */
  printf("MINIX booting\n");

  /* Kernel may use bits of main memory before VM is started */
  kernel_may_alloc = 1;

  assert(sizeof(kinfo.boot_procs) == sizeof(image));
  memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs));

  cstart();

  BKL_LOCK();
 
   DEBUGEXTRA(("main()\n"));

   proc_init();

   if(NR_BOOT_MODULES != kinfo.mbi.mods_count)
   	panic("expecting %d boot processes/modules, found %d",
		NR_BOOT_MODULES, kinfo.mbi.mods_count);

  /* Set up proc table entries for processes in boot image. */
  for (i=0; i < NR_BOOT_PROCS; ++i) {
	int schedulable_proc;
	proc_nr_t proc_nr;
	int ipc_to_m, kcalls;
	sys_map_t map;

	ip = &image[i];				/* process' attributes */
	DEBUGEXTRA(("initializing %s... ", ip->proc_name));
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
	make_zero64(rp->p_cpu_time_left);
	if(i < NR_TASKS)			/* name (tasks only) */
		strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));

	if(i >= NR_TASKS) {
		/* Remember this so it can be passed to VM */
		multiboot_module_t *mb_mod = &kinfo.module_list[i - NR_TASKS];
		ip->start_addr = mb_mod->mod_start;
		ip->len = mb_mod->mod_end - mb_mod->mod_start;
	}
	
	reset_proc_accounting(rp);

	/* See if this process is immediately schedulable.
	 * In that case, set its privileges now and allow it to run.
	 * Only kernel tasks and the root system process get to run immediately.
	 * All the other system processes are inhibited from running by the
	 * RTS_NO_PRIV flag. They can only be scheduled once the root system
	 * process has set their privileges.
	 */
	proc_nr = proc_nr(rp);
	schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr) ||
		proc_nr == VM_PROC_NR);
	if(schedulable_proc) {
	    /* Assign privilege structure. Force a static privilege id. */
            (void) get_priv(rp, static_priv_id(proc_nr));

            /* Priviliges for kernel tasks. */
	    if(proc_nr == VM_PROC_NR) {
                priv(rp)->s_flags = VM_F;
                priv(rp)->s_trap_mask = SRV_T;
		ipc_to_m = SRV_M;
		kcalls = SRV_KC;
                priv(rp)->s_sig_mgr = SELF;
                rp->p_priority = SRV_Q;
                rp->p_quantum_size_ms = SRV_QT;
	    }
	    else if(iskerneln(proc_nr)) {
                /* Privilege flags. */
                priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
                /* Allowed traps. */
                priv(rp)->s_trap_mask = (proc_nr == CLOCK 
                    || proc_nr == SYSTEM  ? CSK_T : TSK_T);
                ipc_to_m = TSK_M;                  /* allowed targets */
                kcalls = TSK_KC;                   /* allowed kernel calls */
            }
            /* Priviliges for the root system process. */
            else {
	    	assert(isrootsysn(proc_nr));
                priv(rp)->s_flags= RSYS_F;        /* privilege flags */
                priv(rp)->s_trap_mask= SRV_T;     /* allowed traps */
                ipc_to_m = SRV_M;                 /* allowed targets */
                kcalls = SRV_KC;                  /* allowed kernel calls */
                priv(rp)->s_sig_mgr = SRV_SM;     /* signal manager */
                rp->p_priority = SRV_Q;	          /* priority queue */
                rp->p_quantum_size_ms = SRV_QT;   /* quantum size */
            }

            /* Fill in target mask. */
            memset(&map, 0, sizeof(map));

            if (ipc_to_m == ALL_M) {
                for(j = 0; j < NR_SYS_PROCS; j++)
                    set_sys_bit(map, j);
            }

            fill_sendto_mask(rp, &map);

            /* Fill in kernel call mask. */
            for(j = 0; j < SYS_CALL_MASK_SIZE; j++) {
                priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
            }
	}
	else {
	    /* Don't let the process run for now. */
            RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM);
	}

	/* Arch-specific state initialization. */
	arch_boot_proc(ip, rp);

	/* scheduling functions depend on proc_ptr pointing somewhere. */
	if(!get_cpulocal_var(proc_ptr))
		get_cpulocal_var(proc_ptr) = rp;

	/* Process isn't scheduled until VM has set up a pagetable for it. */
	if(rp->p_nr != VM_PROC_NR && rp->p_nr >= 0) {
		rp->p_rts_flags |= RTS_VMINHIBIT;
		rp->p_rts_flags |= RTS_BOOTINHIBIT;
	}

	rp->p_rts_flags |= RTS_PROC_STOP;
	rp->p_rts_flags &= ~RTS_SLOT_FREE;
	DEBUGEXTRA(("done\n"));
  }

  /* update boot procs info for VM */
  memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs));

#define IPCNAME(n) { \
	assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \
	assert(!ipc_call_names[n]);	\
	ipc_call_names[n] = #n; \
}

  arch_post_init();

  IPCNAME(SEND);
  IPCNAME(RECEIVE);
  IPCNAME(SENDREC);
  IPCNAME(NOTIFY);
  IPCNAME(SENDNB);
  IPCNAME(SENDA);

  /* System and processes initialization */
  memory_init();
  DEBUGEXTRA(("system_init()... "));
  system_init();
  DEBUGEXTRA(("done\n"));

  /* The bootstrap phase is over, so we can add the physical
   * memory used for it to the free list.
   */
  add_memmap(&kinfo, kinfo.bootstrap_start, kinfo.bootstrap_len);

#ifdef CONFIG_SMP
  if (config_no_apic) {
	  BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else if (config_no_smp) {
	  BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else {
	  smp_init();
	  /*
	   * if smp_init() returns it means that it failed and we try to finish
	   * single CPU booting
	   */
	  bsp_finish_booting();
  }
#else
  /* 
   * if configured for a single CPU, we are already on the kernel stack which we
   * are going to use everytime we execute kernel code. We finish booting and we
   * never return here
   */
  bsp_finish_booting();
#endif

  NOT_REACHABLE;
}
WKCRect
HTMLAreaElement::getRect(RenderObject* object)
{
    return ((HTMLAreaElementPrivate&)priv()).getRect(object);
}
HTMLImageElement*
HTMLAreaElement::imageElement() const
{
    return ((HTMLAreaElementPrivate&)priv()).imageElement();
}
/**
 * iwl_rx_handle - Main entry function for receiving responses from uCode
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
static void iwl_rx_handle(struct iwl_trans *trans)
{
	struct iwl_rx_mem_buffer *rxb;
	struct iwl_rx_packet *pkt;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
	struct iwl_device_cmd *cmd;
	u32 r, i;
	int reclaim;
	unsigned long flags;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;
	int index, cmd_index;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
		IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
		int len, err;
		u16 sequence;

		rxb = rxq->queue[i];

		/* If an RXB doesn't have a Rx queue slot associated with it,
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		if (WARN_ON(rxb == NULL)) {
			i = (i + 1) & RX_QUEUE_MASK;
			continue;
		}

		rxq->queue[i] = NULL;

		dma_unmap_page(bus(trans)->dev, rxb->page_dma,
			       PAGE_SIZE << hw_params(trans).rx_page_order,
			       DMA_FROM_DEVICE);
		pkt = rxb_addr(rxb);

		IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
			i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(priv(trans), pkt, len);

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
			(pkt->hdr.cmd != REPLY_RX) &&
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

		if (reclaim)
			cmd = txq->cmd[cmd_index];
		else
			cmd = NULL;

		/* warn if this is cmd response / notification and the uCode
		 * didn't set the SEQ_RX_FRAME for a frame that is
		 * uCode-originated
		 * If you saw this code after the second half of 2012, then
		 * please remove it
		 */
		WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
		     (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
		     "reclaim is false, SEQ_RX_FRAME unset: %s\n",
		     get_cmd_string(pkt->hdr.cmd));

		err = iwl_rx_dispatch(priv(trans), rxb, cmd);

		/*
		 * XXX: After here, we should always check rxb->page
		 * against NULL before touching it or its virtual
		 * memory (pkt). Because some rx_handler might have
		 * already taken or freed the pages.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (rxb->page)
				iwl_tx_cmd_complete(trans, rxb, err);
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		/* Reuse the page if possible. For notification packets and
		 * SKBs that fail to Rx correctly, add them back into the
		 * rx_free list for reuse later. */
		spin_lock_irqsave(&rxq->lock, flags);
		if (rxb->page != NULL) {
			rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
				0, PAGE_SIZE <<
				    hw_params(trans).rx_page_order,
				DMA_FROM_DEVICE);
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);

		spin_unlock_irqrestore(&rxq->lock, flags);

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
				iwlagn_rx_replenish_now(trans);
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
		iwlagn_rx_replenish_now(trans);
	else
		iwlagn_rx_queue_restock(trans);
}
static void iwl_dump_nic_error_log(struct iwl_trans *trans)
{
	u32 base;
	struct iwl_error_event_table table;
	struct iwl_priv *priv = priv(trans);
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	base = priv->device_pointers.error_event_table;
	if (priv->ucode_type == IWL_UCODE_INIT) {
		if (!base)
			base = priv->init_errlog_ptr;
	} else {
		if (!base)
			base = priv->inst_errlog_ptr;
	}

	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
		IWL_ERR(trans,
			"Not valid error log pointer 0x%08X for %s uCode\n",
			base,
			(priv->ucode_type == IWL_UCODE_INIT)
					? "Init" : "RT");
		return;
	}

	iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));

	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
			trans->shrd->status, table.valid);
	}

	trans_pcie->isr_stats.err_code = table.error_id;

	trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
				      table.data1, table.data2, table.line,
				      table.blink1, table.blink2, table.ilink1,
				      table.ilink2, table.bcon_time, table.gp1,
				      table.gp2, table.gp3, table.ucode_ver,
				      table.hw_ver, table.brd_ver);
	IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
		desc_lookup(table.error_id));
	IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
	IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
	IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
	IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
	IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
	IWL_ERR(trans, "0x%08X | data1\n", table.data1);
	IWL_ERR(trans, "0x%08X | data2\n", table.data2);
	IWL_ERR(trans, "0x%08X | line\n", table.line);
	IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
	IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
	IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
	IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
	IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
	IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
	IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
	IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
	IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
	IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
}
Ejemplo n.º 18
0
/*===========================================================================*
 *				do_privctl				     *
 *===========================================================================*/
PUBLIC int do_privctl(struct proc * caller, message * m_ptr)
{
/* Handle sys_privctl(). Update a process' privileges. If the process is not
 * yet a system process, make sure it gets its own privilege structure.
 */
  struct proc *rp;
  proc_nr_t proc_nr;
  sys_id_t priv_id;
  int ipc_to_m, kcalls;
  int i, r;
  struct io_range io_range;
  struct mem_range mem_range;
  struct priv priv;
  int irq;

  /* Check whether caller is allowed to make this call. Privileged proceses 
   * can only update the privileges of processes that are inhibited from 
   * running by the RTS_NO_PRIV flag. This flag is set when a privileged process
   * forks. 
   */
  if (! (priv(caller)->s_flags & SYS_PROC)) return(EPERM);
  if(m_ptr->CTL_ENDPT == SELF) proc_nr = _ENDPOINT_P(caller->p_endpoint);
  else if(!isokendpt(m_ptr->CTL_ENDPT, &proc_nr)) return(EINVAL);
  rp = proc_addr(proc_nr);

  switch(m_ptr->CTL_REQUEST)
  {
  case SYS_PRIV_ALLOW:
	/* Allow process to run. Make sure its privilege structure has already
	 * been set.
	 */
	if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
		return(EPERM);
	}
	RTS_UNSET(rp, RTS_NO_PRIV);
	return(OK);

  case SYS_PRIV_YIELD:
	/* Allow process to run and suspend the caller. */
	if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
		return(EPERM);
	}
	RTS_SET(caller, RTS_NO_PRIV);
	RTS_UNSET(rp, RTS_NO_PRIV);
	return(OK);

  case SYS_PRIV_DISALLOW:
	/* Disallow process from running. */
	if (RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
	RTS_SET(rp, RTS_NO_PRIV);
	return(OK);

  case SYS_PRIV_SET_SYS:
	/* Set a privilege structure of a blocked system process. */
	if (! RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);

	/* Check whether a static or dynamic privilege id must be allocated. */
	priv_id = NULL_PRIV_ID;
	if (m_ptr->CTL_ARG_PTR)
	{
		/* Copy privilege structure from caller */
		if((r=data_copy(caller->p_endpoint, (vir_bytes) m_ptr->CTL_ARG_PTR,
			KERNEL, (vir_bytes) &priv, sizeof(priv))) != OK)
			return r;

		/* See if the caller wants to assign a static privilege id. */
		if(!(priv.s_flags & DYN_PRIV_ID)) {
			priv_id = priv.s_id;
		}
	}

	/* Make sure this process has its own privileges structure. This may
	 * fail, since there are only a limited number of system processes.
	 * Then copy privileges from the caller and restore some defaults.
	 */
	if ((i=get_priv(rp, priv_id)) != OK)
	{
		printf("do_privctl: unable to allocate priv_id %d: %d\n",
			priv_id, i);
		return(i);
	}
	priv_id = priv(rp)->s_id;		/* backup privilege id */
	*priv(rp) = *priv(caller);		/* copy from caller */
	priv(rp)->s_id = priv_id;		/* restore privilege id */
	priv(rp)->s_proc_nr = proc_nr;		/* reassociate process nr */

	for (i=0; i< NR_SYS_CHUNKS; i++)		/* remove pending: */
	      priv(rp)->s_notify_pending.chunk[i] = 0;	/* - notifications */
	priv(rp)->s_int_pending = 0;			/* - interrupts */
	(void) sigemptyset(&priv(rp)->s_sig_pending);	/* - signals */
	reset_timer(&priv(rp)->s_alarm_timer);		/* - alarm */
	priv(rp)->s_asyntab= -1;			/* - asynsends */
	priv(rp)->s_asynsize= 0;

	/* Set defaults for privilege bitmaps. */
	priv(rp)->s_flags= DSRV_F;           /* privilege flags */
	priv(rp)->s_trap_mask= DSRV_T;       /* allowed traps */
	ipc_to_m = DSRV_M;                   /* allowed targets */
	fill_sendto_mask(rp, ipc_to_m);
	kcalls = DSRV_KC;                    /* allowed kernel calls */
	for(i = 0; i < SYS_CALL_MASK_SIZE; i++) {
		priv(rp)->s_k_call_mask[i] = (kcalls == NO_C ? 0 : (~0));
	}

	/* Set the default signal managers. */
	priv(rp)->s_sig_mgr = DSRV_SM;
	priv(rp)->s_bak_sig_mgr = NONE;

	/* Set defaults for resources: no I/O resources, no memory resources,
	 * no IRQs, no grant table
	 */
	priv(rp)->s_nr_io_range= 0;
	priv(rp)->s_nr_mem_range= 0;
	priv(rp)->s_nr_irq= 0;
	priv(rp)->s_grant_table= 0;
	priv(rp)->s_grant_entries= 0;

	/* Override defaults if the caller has supplied a privilege structure. */
	if (m_ptr->CTL_ARG_PTR)
	{
		if((r = update_priv(rp, &priv)) != OK) {
			return r;
		} 
	}

	return(OK);

  case SYS_PRIV_SET_USER:
	/* Set a privilege structure of a blocked user process. */
	if (!RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);

	/* Link the process to the privilege structure of the root user
	 * process all the user processes share.
	 */
	priv(rp) = priv_addr(USER_PRIV_ID);

	return(OK);

  case SYS_PRIV_ADD_IO:
	if (RTS_ISSET(rp, RTS_NO_PRIV))
		return(EPERM);

	/* Only system processes get I/O resources? */
	if (!(priv(rp)->s_flags & SYS_PROC))
		return EPERM;

#if 0 /* XXX -- do we need a call for this? */
	if (strcmp(rp->p_name, "fxp") == 0 ||
		strcmp(rp->p_name, "rtl8139") == 0)
	{
		printf("setting ipc_stats_target to %d\n", rp->p_endpoint);
		ipc_stats_target= rp->p_endpoint;
	}
#endif

	/* Get the I/O range */
	data_copy(caller->p_endpoint, (vir_bytes) m_ptr->CTL_ARG_PTR,
		KERNEL, (vir_bytes) &io_range, sizeof(io_range));
	priv(rp)->s_flags |= CHECK_IO_PORT;	/* Check I/O accesses */
	i= priv(rp)->s_nr_io_range;
	if (i >= NR_IO_RANGE) {
		printf("do_privctl: %d already has %d i/o ranges.\n",
			rp->p_endpoint, i);
		return ENOMEM;
	}

	priv(rp)->s_io_tab[i].ior_base= io_range.ior_base;
	priv(rp)->s_io_tab[i].ior_limit= io_range.ior_limit;
	priv(rp)->s_nr_io_range++;

	return OK;

  case SYS_PRIV_ADD_MEM:
	if (RTS_ISSET(rp, RTS_NO_PRIV))
		return(EPERM);

	/* Only system processes get memory resources? */
	if (!(priv(rp)->s_flags & SYS_PROC))
		return EPERM;

	/* Get the memory range */
	if((r=data_copy(caller->p_endpoint, (vir_bytes) m_ptr->CTL_ARG_PTR,
		KERNEL, (vir_bytes) &mem_range, sizeof(mem_range))) != OK)
		return r;
	priv(rp)->s_flags |= CHECK_MEM;	/* Check memory mappings */
	i= priv(rp)->s_nr_mem_range;
	if (i >= NR_MEM_RANGE) {
		printf("do_privctl: %d already has %d mem ranges.\n",
			rp->p_endpoint, i);
		return ENOMEM;
	}

	priv(rp)->s_mem_tab[i].mr_base= mem_range.mr_base;
	priv(rp)->s_mem_tab[i].mr_limit= mem_range.mr_limit;
	priv(rp)->s_nr_mem_range++;

	return OK;

  case SYS_PRIV_ADD_IRQ:
	if (RTS_ISSET(rp, RTS_NO_PRIV))
		return(EPERM);

	/* Only system processes get IRQs? */
	if (!(priv(rp)->s_flags & SYS_PROC))
		return EPERM;

	data_copy(caller->p_endpoint, (vir_bytes) m_ptr->CTL_ARG_PTR,
		KERNEL, (vir_bytes) &irq, sizeof(irq));
	priv(rp)->s_flags |= CHECK_IRQ;	/* Check IRQs */

	i= priv(rp)->s_nr_irq;
	if (i >= NR_IRQ) {
		printf("do_privctl: %d already has %d irq's.\n",
			rp->p_endpoint, i);
		return ENOMEM;
	}
	priv(rp)->s_irq_tab[i]= irq;
	priv(rp)->s_nr_irq++;

	return OK;
  case SYS_PRIV_QUERY_MEM:
  {
	phys_bytes addr, limit;
  	struct priv *sp;
	/* See if a certain process is allowed to map in certain physical
	 * memory.
	 */
	addr = (phys_bytes) m_ptr->CTL_PHYSSTART;
	limit = addr + (phys_bytes) m_ptr->CTL_PHYSLEN - 1;
	if(limit < addr)
		return EPERM;
	if(!(sp = priv(rp)))
		return EPERM;
	if (!(sp->s_flags & SYS_PROC))
		return EPERM;
	for(i = 0; i < sp->s_nr_mem_range; i++) {
		if(addr >= sp->s_mem_tab[i].mr_base &&
		   limit <= sp->s_mem_tab[i].mr_limit)
			return OK;
	}
	return EPERM;
  }

  case SYS_PRIV_UPDATE_SYS:
	/* Update the privilege structure of a system process. */
	if(!m_ptr->CTL_ARG_PTR) return EINVAL;

	/* Copy privilege structure from caller */
	if((r=data_copy(caller->p_endpoint, (vir_bytes) m_ptr->CTL_ARG_PTR,
		KERNEL, (vir_bytes) &priv, sizeof(priv))) != OK)
		return r;

	/* Override settings in existing privilege structure. */
	if((r = update_priv(rp, &priv)) != OK) {
		return r;
	}

	return(OK);

  default:
	printf("do_privctl: bad request %d\n", m_ptr->CTL_REQUEST);
	return EINVAL;
  }
}
int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
			    char **buf, bool display)
{
	u32 base;       /* SRAM byte address of event log header */
	u32 capacity;   /* event log capacity in # entries */
	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
	u32 num_wraps;  /* # times uCode wrapped to top of log */
	u32 next_entry; /* index of next entry to be written by uCode */
	u32 size;       /* # entries that we'll print */
	u32 logsize;
	int pos = 0;
	size_t bufsz = 0;
	struct iwl_priv *priv = priv(trans);

	base = priv->device_pointers.log_event_table;
	if (priv->ucode_type == IWL_UCODE_INIT) {
		logsize = priv->init_evtlog_size;
		if (!base)
			base = priv->init_evtlog_ptr;
	} else {
		logsize = priv->inst_evtlog_size;
		if (!base)
			base = priv->inst_evtlog_ptr;
	}

	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
		IWL_ERR(trans,
			"Invalid event log pointer 0x%08X for %s uCode\n",
			base,
			(priv->ucode_type == IWL_UCODE_INIT)
					? "Init" : "RT");
		return -EINVAL;
	}

	/* event log header */
	capacity = iwl_read_targ_mem(bus(trans), base);
	mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
	num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
	next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));

	if (capacity > logsize) {
		IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
			"entries\n", capacity, logsize);
		capacity = logsize;
	}

	if (next_entry > logsize) {
		IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
			next_entry, logsize);
		next_entry = logsize;
	}

	size = num_wraps ? capacity : next_entry;

	/* bail out if nothing in log */
	if (size == 0) {
		IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
		return pos;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
	if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
		size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
			? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
	size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
		? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#endif
	IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
		size);

#ifdef CONFIG_IWLWIFI_DEBUG
	if (display) {
		if (full_log)
			bufsz = capacity * 48;
		else
			bufsz = size * 48;
		*buf = kmalloc(bufsz, GFP_KERNEL);
		if (!*buf)
			return -ENOMEM;
	}
	if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
		/*
		 * if uCode has wrapped back to top of log,
		 * start at the oldest entry,
		 * i.e the next one that uCode would fill.
		 */
		if (num_wraps)
			pos = iwl_print_event_log(trans, next_entry,
						capacity - next_entry, mode,
						pos, buf, bufsz);
		/* (then/else) start at top of log */
		pos = iwl_print_event_log(trans, 0,
					  next_entry, mode, pos, buf, bufsz);
	} else
		pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
						next_entry, size, mode,
						pos, buf, bufsz);
#else
	pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
					next_entry, size, mode,
					pos, buf, bufsz);
#endif
	return pos;
}
Ejemplo n.º 20
0
/*===========================================================================*
 *				update_priv				     *
 *===========================================================================*/
PRIVATE int update_priv(struct proc *rp, struct priv *priv)
{
/* Update the privilege structure of a given process. */

  int ipc_to_m, i;

  /* Copy s_flags and signal managers. */
  priv(rp)->s_flags = priv->s_flags;
  priv(rp)->s_sig_mgr = priv->s_sig_mgr;
  priv(rp)->s_bak_sig_mgr = priv->s_bak_sig_mgr;

  /* Copy IRQs. */
  if(priv->s_flags & CHECK_IRQ) {
  	if (priv->s_nr_irq < 0 || priv->s_nr_irq > NR_IRQ)
  		return EINVAL;
  	priv(rp)->s_nr_irq= priv->s_nr_irq;
  	for (i= 0; i<priv->s_nr_irq; i++)
  	{
  		priv(rp)->s_irq_tab[i]= priv->s_irq_tab[i];
#if PRIV_DEBUG
  		printf("do_privctl: adding IRQ %d for %d\n",
  			priv(rp)->s_irq_tab[i], rp->p_endpoint);
#endif
  	}
  }

  /* Copy I/O ranges. */
  if(priv->s_flags & CHECK_IO_PORT) {
  	if (priv->s_nr_io_range < 0 || priv->s_nr_io_range > NR_IO_RANGE)
  		return EINVAL;
  	priv(rp)->s_nr_io_range= priv->s_nr_io_range;
  	for (i= 0; i<priv->s_nr_io_range; i++)
  	{
  		priv(rp)->s_io_tab[i]= priv->s_io_tab[i];
#if PRIV_DEBUG
  		printf("do_privctl: adding I/O range [%x..%x] for %d\n",
  			priv(rp)->s_io_tab[i].ior_base,
  			priv(rp)->s_io_tab[i].ior_limit,
  			rp->p_endpoint);
#endif
  	}
  }

  /* Copy memory ranges. */
  if(priv->s_flags & CHECK_MEM) {
  	if (priv->s_nr_mem_range < 0 || priv->s_nr_mem_range > NR_MEM_RANGE)
  		return EINVAL;
  	priv(rp)->s_nr_mem_range= priv->s_nr_mem_range;
  	for (i= 0; i<priv->s_nr_mem_range; i++)
  	{
  		priv(rp)->s_mem_tab[i]= priv->s_mem_tab[i];
#if PRIV_DEBUG
  		printf("do_privctl: adding mem range [%x..%x] for %d\n",
  			priv(rp)->s_mem_tab[i].mr_base,
  			priv(rp)->s_mem_tab[i].mr_limit,
  			rp->p_endpoint);
#endif
  	}
  }

  /* Copy trap mask. */
  priv(rp)->s_trap_mask = priv->s_trap_mask;

  /* Copy target mask. */
#if PRIV_DEBUG
  printf("do_privctl: Setting ipc target mask for %d:");
  for (i=0; i < NR_SYS_PROCS; i += BITCHUNK_BITS) {
  	printf(" %04x", get_sys_bits(priv->s_ipc_to, i));
  }
  printf("\n");
#endif

  memcpy(&ipc_to_m, &priv->s_ipc_to, sizeof(ipc_to_m));
  fill_sendto_mask(rp, ipc_to_m);

#if PRIV_DEBUG
  printf("do_privctl: Set ipc target mask for %d:");
  for (i=0; i < NR_SYS_PROCS; i += BITCHUNK_BITS) {
  	printf(" %04x", get_sys_bits(priv(rp)->s_ipc_to, i));
  }
  printf("\n");
#endif

  /* Copy kernel call mask. */
  memcpy(priv(rp)->s_k_call_mask, priv->s_k_call_mask,
  	sizeof(priv(rp)->s_k_call_mask));

  return OK;
}
Ejemplo n.º 21
0
static int mcp_sa11x0_probe(struct platform_device *dev)
{
	struct mcp_plat_data *data = dev->dev.platform_data;
	struct resource *mem0, *mem1;
	struct mcp_sa11x0 *m;
	struct mcp *mcp;
	int ret;

	if (!data)
		return -ENODEV;

	mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
	mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
	if (!mem0 || !mem1)
		return -ENXIO;

	if (!request_mem_region(mem0->start, resource_size(mem0),
				DRIVER_NAME)) {
		ret = -EBUSY;
		goto err_mem0;
	}

	if (!request_mem_region(mem1->start, resource_size(mem1),
				DRIVER_NAME)) {
		ret = -EBUSY;
		goto err_mem1;
	}

	mcp = mcp_host_alloc(&dev->dev, sizeof(struct mcp_sa11x0));
	if (!mcp) {
		ret = -ENOMEM;
		goto err_alloc;
	}

	mcp->owner		= THIS_MODULE;
	mcp->ops		= &mcp_sa11x0;
	mcp->sclk_rate		= data->sclk_rate;

	m = priv(mcp);
	m->mccr0 = data->mccr0 | 0x7f7f;
	m->mccr1 = data->mccr1;

	m->base0 = ioremap(mem0->start, resource_size(mem0));
	m->base1 = ioremap(mem1->start, resource_size(mem1));
	if (!m->base0 || !m->base1) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	platform_set_drvdata(dev, mcp);

	/*
	 * Initialise device.  Note that we initially
	 * set the sampling rate to minimum.
	 */
	writel_relaxed(-1, MCSR(m));
	writel_relaxed(m->mccr1, MCCR1(m));
	writel_relaxed(m->mccr0, MCCR0(m));

	/*
	 * Calculate the read/write timeout (us) from the bit clock
	 * rate.  This is the period for 3 64-bit frames.  Always
	 * round this time up.
	 */
	mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
			  mcp->sclk_rate;

	ret = mcp_host_add(mcp, data->codec_pdata);
	if (ret == 0)
		return 0;

	platform_set_drvdata(dev, NULL);

 err_ioremap:
	iounmap(m->base1);
	iounmap(m->base0);
	mcp_host_free(mcp);
 err_alloc:
	release_mem_region(mem1->start, resource_size(mem1));
 err_mem1:
	release_mem_region(mem0->start, resource_size(mem0));
 err_mem0:
	return ret;
}
Ejemplo n.º 22
0
static int
ether1_init_for_open (struct net_device *dev)
{
	int i, status, addr, next, next2;
	int failures = 0;
	unsigned long timeout;

	writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);

	for (i = 0; i < 6; i++)
		init_sa.sa_addr[i] = dev->dev_addr[i];

	/* load data structures into ether1 RAM */
	ether1_writebuffer (dev, &init_scp,  SCP_ADDR,  SCP_SIZE);
	ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE);
	ether1_writebuffer (dev, &init_scb,  SCB_ADDR,  SCB_SIZE);
	ether1_writebuffer (dev, &init_cfg,  CFG_ADDR,  CFG_SIZE);
	ether1_writebuffer (dev, &init_sa,   SA_ADDR,   SA_SIZE);
	ether1_writebuffer (dev, &init_mc,   MC_ADDR,   MC_SIZE);
	ether1_writebuffer (dev, &init_tdr,  TDR_ADDR,  TDR_SIZE);
	ether1_writebuffer (dev, &init_nop,  NOP_ADDR,  NOP_SIZE);

	if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) {
		printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n",
			dev->name);
		return 1;
	}

	/*
	 * setup circularly linked list of { rfd, rbd, buffer }, with
	 * all rfds circularly linked, rbds circularly linked.
	 * First rfd is linked to scp, first rbd is linked to first
	 * rfd.  Last rbd has a suspend command.
	 */
	addr = RX_AREA_START;
	do {
		next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
		next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;

		if (next2 >= RX_AREA_END) {
			next = RX_AREA_START;
			init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND;
			priv(dev)->rx_tail = addr;
		} else
			init_rfd.rfd_command = 0;
		if (addr == RX_AREA_START)
			init_rfd.rfd_rbdoffset = addr + RFD_SIZE;
		else
			init_rfd.rfd_rbdoffset = 0;
		init_rfd.rfd_link = next;
		init_rbd.rbd_link = next + RFD_SIZE;
		init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE;

		ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE);
		ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE);
		addr = next;
	} while (next2 < RX_AREA_END);

	priv(dev)->tx_link = NOP_ADDR;
	priv(dev)->tx_head = NOP_ADDR + NOP_SIZE;
	priv(dev)->tx_tail = TDR_ADDR;
	priv(dev)->rx_head = RX_AREA_START;

	/* release reset & give 586 a prod */
	priv(dev)->resetting = 1;
	priv(dev)->initialising = 1;
	writeb(CTRL_RST, REG_CONTROL);
	writeb(0, REG_CONTROL);
	writeb(CTRL_CA, REG_CONTROL);

	/* 586 should now unset iscp.busy */
	timeout = jiffies + HZ/2;
	while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) {
		if (time_after(jiffies, timeout)) {
			printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name);
			return 1;
		}
	}

	/* check status of commands that we issued */
	timeout += HZ/10;
	while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS))
			& STAT_COMPLETE) == 0) {
		if (time_after(jiffies, timeout))
			break;
	}

	if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
		printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status);
		printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
			ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
		failures += 1;
	}

	timeout += HZ/10;
	while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS))
			& STAT_COMPLETE) == 0) {
		if (time_after(jiffies, timeout))
			break;
	}

	if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
		printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status);
		printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
			ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
		failures += 1;
	}

	timeout += HZ/10;
	while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS))
			& STAT_COMPLETE) == 0) {
		if (time_after(jiffies, timeout))
			break;
	}

	if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
		printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status);
		printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
			ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
		failures += 1;
	}

	timeout += HZ;
	while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS))
			& STAT_COMPLETE) == 0) {
		if (time_after(jiffies, timeout))
			break;
	}

	if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
		printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name);
		printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
			ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
			ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
	} else {
		status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS);
		if (status & TDR_XCVRPROB)
			printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name);
		else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) {
#ifdef FANCY
			printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name,
				status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10,
				(status & TDR_TIME) % 10);
#else
			printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name,
				status & TDR_SHORT ? "short" : "open", (status & TDR_TIME));
#endif
		}
	}

	if (failures)
		ether1_reset (dev);
	return failures ? 1 : 0;
}
Ejemplo n.º 23
0
/*
 * Switch LED off...
 */
static void ether3_ledoff(unsigned long data)
{
	struct net_device *dev = (struct net_device *)data;
	ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
}
Ejemplo n.º 24
0
static int
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
	int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
	unsigned long flags;
	tx_t tx;
	tbd_t tbd;
	nop_t nop;

	if (priv(dev)->restart) {
		printk(KERN_WARNING "%s: resetting device\n", dev->name);

		ether1_reset(dev);

		if (ether1_init_for_open(dev))
			printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
		else
			priv(dev)->restart = 0;
	}

	if (skb->len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			goto out;
	}

	/*
	 * insert packet followed by a nop
	 */
	txaddr = ether1_txalloc (dev, TX_SIZE);
	tbdaddr = ether1_txalloc (dev, TBD_SIZE);
	dataddr = ether1_txalloc (dev, skb->len);
	nopaddr = ether1_txalloc (dev, NOP_SIZE);

	tx.tx_status = 0;
	tx.tx_command = CMD_TX | CMD_INTR;
	tx.tx_link = nopaddr;
	tx.tx_tbdoffset = tbdaddr;
	tbd.tbd_opts = TBD_EOL | skb->len;
	tbd.tbd_link = I82586_NULL;
	tbd.tbd_bufl = dataddr;
	tbd.tbd_bufh = 0;
	nop.nop_status = 0;
	nop.nop_command = CMD_NOP;
	nop.nop_link = nopaddr;

	local_irq_save(flags);
	ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
	ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
	ether1_writebuffer (dev, skb->data, dataddr, skb->len);
	ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
	tmp = priv(dev)->tx_link;
	priv(dev)->tx_link = nopaddr;

	/* now reset the previous nop pointer */
	ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);

	local_irq_restore(flags);

	/* handle transmit */

	/* check to see if we have room for a full sized ether frame */
	tmp = priv(dev)->tx_head;
	tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
	priv(dev)->tx_head = tmp;
	dev_kfree_skb (skb);

	if (tst == -1)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Ejemplo n.º 25
0
static void
ether3_init_for_open(struct net_device *dev)
{
	int i;

	/* Reset the chip */
	ether3_outw(CFG2_RESET, REG_CONFIG2);
	udelay(4);

	priv(dev)->regs.command = 0;
	ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
	while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
		barrier();

	ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
	for (i = 0; i < 6; i++)
		ether3_outb(dev->dev_addr[i], REG_BUFWIN);

	priv(dev)->tx_head	= 0;
	priv(dev)->tx_tail	= 0;
	priv(dev)->regs.config2 |= CFG2_CTRLO;
	priv(dev)->rx_head	= RX_START;

	ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
	ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
	ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
	ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
	ether3_outw(0, REG_TRANSMITPTR);
	ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
	ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);

	ether3_setbuffer(dev, buffer_write, 0);
	ether3_writelong(dev, 0);

	priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX;
	ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
Ejemplo n.º 26
0
static void
ether1_xmit_done (struct net_device *dev)
{
	nop_t nop;
	int caddr, tst;

	caddr = priv(dev)->tx_tail;

again:
	ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);

	switch (nop.nop_command & CMD_MASK) {
	case CMD_TDR:
		/* special case */
		if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
				!= (unsigned short)I82586_NULL) {
			ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t,
				    scb_command, NORMALIRQS);
			writeb(CTRL_CA, REG_CONTROL);
		}
		priv(dev)->tx_tail = NOP_ADDR;
		return;

	case CMD_NOP:
		if (nop.nop_link == caddr) {
			if (priv(dev)->initialising == 0)
				printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name);
			else
			        priv(dev)->initialising = 0;
			return;
		}
		if (caddr == nop.nop_link)
			return;
		caddr = nop.nop_link;
		goto again;

	case CMD_TX:
		if (nop.nop_status & STAT_COMPLETE)
			break;
		printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name);
		priv(dev)->restart = 1;
		return;

	default:
		printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name,
			nop.nop_command & CMD_MASK, caddr);
		priv(dev)->restart = 1;
		return;
	}

	while (nop.nop_status & STAT_COMPLETE) {
		if (nop.nop_status & STAT_OK) {
			dev->stats.tx_packets++;
			dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
		} else {
			dev->stats.tx_errors++;

			if (nop.nop_status & STAT_COLLAFTERTX)
				dev->stats.collisions++;
			if (nop.nop_status & STAT_NOCARRIER)
				dev->stats.tx_carrier_errors++;
			if (nop.nop_status & STAT_TXLOSTCTS)
				printk (KERN_WARNING "%s: cts lost\n", dev->name);
			if (nop.nop_status & STAT_TXSLOWDMA)
				dev->stats.tx_fifo_errors++;
			if (nop.nop_status & STAT_COLLEXCESSIVE)
				dev->stats.collisions += 16;
		}

		if (nop.nop_link == caddr) {
			printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name);
			break;
		}

		caddr = nop.nop_link;
		ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
		if ((nop.nop_command & CMD_MASK) != CMD_NOP) {
			printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name);
			break;
		}

		if (caddr == nop.nop_link)
			break;

		caddr = nop.nop_link;
		ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
		if ((nop.nop_command & CMD_MASK) != CMD_TX) {
			printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name);
			break;
		}
	}
	priv(dev)->tx_tail = caddr;

	caddr = priv(dev)->tx_head;
	tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
	priv(dev)->tx_head = caddr;
	if (tst != -1)
		netif_wake_queue(dev);
}
Ejemplo n.º 27
0
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
int do_fork(struct proc * caller, message * m_ptr)
{
/* Handle sys_fork().
 * m_lsys_krn_sys_fork.endpt has forked.
 * The child is m_lsys_krn_sys_fork.slot.
 */
#if defined(__i386__)
  char *old_fpu_save_area_p;
#endif
  register struct proc *rpc;		/* child process pointer */
  struct proc *rpp;			/* parent process pointer */
  int gen;
  int p_proc;
  int namelen;

  if(!isokendpt(m_ptr->m_lsys_krn_sys_fork.endpt, &p_proc))
	return EINVAL;

  rpp = proc_addr(p_proc);
  rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot);
  if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL);

  assert(!(rpp->p_misc_flags & MF_DELIVERMSG));

  /* needs to be receiving so we know where the message buffer is */
  if(!RTS_ISSET(rpp, RTS_RECEIVING)) {
	printf("kernel: fork not done synchronously?\n");
	return EINVAL;
  }

  /* make sure that the FPU context is saved in parent before copy */
  save_fpu(rpp);
  /* Copy parent 'proc' struct to child. And reinitialize some fields. */
  gen = _ENDPOINT_G(rpc->p_endpoint);
#if defined(__i386__)
  old_fpu_save_area_p = rpc->p_seg.fpu_state;
#endif
  *rpc = *rpp;				/* copy 'proc' struct */
#if defined(__i386__)
  rpc->p_seg.fpu_state = old_fpu_save_area_p;
  if(proc_used_fpu(rpp))
	memcpy(rpc->p_seg.fpu_state, rpp->p_seg.fpu_state, FPU_XFP_SIZE);
#endif
  if(++gen >= _ENDPOINT_MAX_GENERATION)	/* increase generation */
	gen = 1;			/* generation number wraparound */
  rpc->p_nr = m_ptr->m_lsys_krn_sys_fork.slot;	/* this was obliterated by copy */
  rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr);	/* new endpoint of slot */

  rpc->p_reg.retreg = 0;	/* child sees pid = 0 to know it is child */
  rpc->p_user_time = 0;		/* set all the accounting times to 0 */
  rpc->p_sys_time = 0;

  rpc->p_misc_flags &=
	~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN | MF_STEP);
  rpc->p_virt_left = 0;		/* disable, clear the process-virtual timers */
  rpc->p_prof_left = 0;

  /* Mark process name as being a forked copy */
  namelen = strlen(rpc->p_name);
#define FORKSTR "*F"
  if(namelen+strlen(FORKSTR) < sizeof(rpc->p_name))
	strcat(rpc->p_name, FORKSTR);

  /* the child process is not runnable until it's scheduled. */
  RTS_SET(rpc, RTS_NO_QUANTUM);
  reset_proc_accounting(rpc);

  rpc->p_cpu_time_left = 0;
  rpc->p_cycles = 0;
  rpc->p_kcall_cycles = 0;
  rpc->p_kipc_cycles = 0;
  rpc->p_signal_received = 0;

  /* If the parent is a privileged process, take away the privileges from the 
   * child process and inhibit it from running by setting the NO_PRIV flag.
   * The caller should explicitly set the new privileges before executing.
   */
  if (priv(rpp)->s_flags & SYS_PROC) {
      rpc->p_priv = priv_addr(USER_PRIV_ID);
      rpc->p_rts_flags |= RTS_NO_PRIV;
  }

  /* Calculate endpoint identifier, so caller knows what it is. */
  m_ptr->m_krn_lsys_sys_fork.endpt = rpc->p_endpoint;
  m_ptr->m_krn_lsys_sys_fork.msgaddr = rpp->p_delivermsg_vir;

  /* Don't schedule process in VM mode until it has a new pagetable. */
  if(m_ptr->m_lsys_krn_sys_fork.flags & PFF_VMINHIBIT) {
  	RTS_SET(rpc, RTS_VMINHIBIT);
  }

  /* 
   * Only one in group should have RTS_SIGNALED, child doesn't inherit tracing.
   */
  RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
  (void) sigemptyset(&rpc->p_pending);

#if defined(__i386__)
  rpc->p_seg.p_cr3 = 0;
  rpc->p_seg.p_cr3_v = NULL;
#elif defined(__arm__)
  rpc->p_seg.p_ttbr = 0;
  rpc->p_seg.p_ttbr_v = NULL;
#endif

  return OK;
}
Ejemplo n.º 28
0
static void
ether1_recv_done (struct net_device *dev)
{
	int status;
	int nexttail, rbdaddr;
	rbd_t rbd;

	do {
		status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS);
		if ((status & RFD_COMPLETE) == 0)
			break;

		rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS);
		ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE);

		if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) {
			int length = rbd.rbd_status & RBD_ACNT;
			struct sk_buff *skb;

			length = (length + 1) & ~1;
			skb = netdev_alloc_skb(dev, length + 2);

			if (skb) {
				skb_reserve (skb, 2);

				ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);

				skb->protocol = eth_type_trans (skb, dev);
				netif_rx (skb);
				dev->stats.rx_packets++;
			} else
				dev->stats.rx_dropped++;
		} else {
			printk(KERN_WARNING "%s: %s\n", dev->name,
				(rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
			dev->stats.rx_dropped++;
		}

		nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
		/* nexttail should be rx_head */
		if (nexttail != priv(dev)->rx_head)
			printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n",
				dev->name, nexttail, priv(dev)->rx_head);
		ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS);
		ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS);
		ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS);
		ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS);
	
		priv(dev)->rx_tail = nexttail;
		priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS);
	} while (1);
}
/*===========================================================================*
 *				do_vmctl				     *
 *===========================================================================*/
int do_vmctl(struct proc * caller, message * m_ptr)
{
  int proc_nr;
  endpoint_t ep = m_ptr->SVMCTL_WHO;
  struct proc *p, *rp, **rpp, *target;

  if(ep == SELF) { ep = caller->p_endpoint; }

  if(!isokendpt(ep, &proc_nr)) {
	printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
	return EINVAL;
  }

  p = proc_addr(proc_nr);

  switch(m_ptr->SVMCTL_PARAM) {
	case VMCTL_CLEAR_PAGEFAULT:
		assert(RTS_ISSET(p,RTS_PAGEFAULT));
		RTS_UNSET(p, RTS_PAGEFAULT);
		return OK;
	case VMCTL_MEMREQ_GET:
		/* Send VM the information about the memory request. We can
		 * not simply send the first request on the list, because IPC
		 * filters may forbid VM from getting requests for particular
		 * sources. However, IPC filters are used only in rare cases.
		 */
		for (rpp = &vmrequest; *rpp != NULL;
		    rpp = &(*rpp)->p_vmrequest.nextrequestor) {
			rp = *rpp;

			assert(RTS_ISSET(rp, RTS_VMREQUEST));

			okendpt(rp->p_vmrequest.target, &proc_nr);
			target = proc_addr(proc_nr);

			/* Check against IPC filters. */
			if (!allow_ipc_filtered_memreq(rp, target))
				continue;

			/* Reply with request fields. */
			if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
				panic("VMREQUEST wrong type");

			m_ptr->SVMCTL_MRG_TARGET	=
				rp->p_vmrequest.target;
			m_ptr->SVMCTL_MRG_ADDR		=
				rp->p_vmrequest.params.check.start;
			m_ptr->SVMCTL_MRG_LENGTH	=
				rp->p_vmrequest.params.check.length;
			m_ptr->SVMCTL_MRG_FLAG		=
				rp->p_vmrequest.params.check.writeflag;
			m_ptr->SVMCTL_MRG_REQUESTOR	=
				(void *) rp->p_endpoint;

			rp->p_vmrequest.vmresult = VMSUSPEND;

			/* Remove from request chain. */
			*rpp = rp->p_vmrequest.nextrequestor;

			return rp->p_vmrequest.req_type;
		}

		return ENOENT;

	case VMCTL_MEMREQ_REPLY:
		assert(RTS_ISSET(p, RTS_VMREQUEST));
		assert(p->p_vmrequest.vmresult == VMSUSPEND);
  		okendpt(p->p_vmrequest.target, &proc_nr);
		target = proc_addr(proc_nr);
		p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
		assert(p->p_vmrequest.vmresult != VMSUSPEND);

		switch(p->p_vmrequest.type) {
		case VMSTYPE_KERNELCALL:
			/*
			 * we will have to resume execution of the kernel call
			 * as soon the scheduler picks up this process again
			 */
			p->p_misc_flags |= MF_KCALL_RESUME;
			break;
		case VMSTYPE_DELIVERMSG:
			assert(p->p_misc_flags & MF_DELIVERMSG);
			assert(p == target);
			assert(RTS_ISSET(p, RTS_VMREQUEST));
			break;
		case VMSTYPE_MAP:
			assert(RTS_ISSET(p, RTS_VMREQUEST));
			break;
		default:
			panic("strange request type: %d",p->p_vmrequest.type);
		}

		RTS_UNSET(p, RTS_VMREQUEST);
		return OK;

	case VMCTL_KERN_PHYSMAP:
	{
		int i = m_ptr->SVMCTL_VALUE;
		return arch_phys_map(i,
			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
			&m_ptr->SVMCTL_MAP_FLAGS);
	}
	case VMCTL_KERN_MAP_REPLY:
	{
		return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
			(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
	}
	case VMCTL_VMINHIBIT_SET:
		/* check if we must stop a process on a different CPU */
#if CONFIG_SMP
		if (p->p_cpu != cpuid) {
			smp_schedule_vminhibit(p);
		} else
#endif
			RTS_SET(p, RTS_VMINHIBIT);
#if CONFIG_SMP
		p->p_misc_flags |= MF_FLUSH_TLB;
#endif
		return OK;
	case VMCTL_VMINHIBIT_CLEAR:
		assert(RTS_ISSET(p, RTS_VMINHIBIT));
		/*
		 * the processes is certainly not runnable, no need to tell its
		 * cpu
		 */
		RTS_UNSET(p, RTS_VMINHIBIT);
#ifdef CONFIG_SMP
		if (p->p_misc_flags & MF_SENDA_VM_MISS) {
			struct priv *privp;
			p->p_misc_flags &= ~MF_SENDA_VM_MISS;
			privp = priv(p);
			try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
							privp->s_asynsize);
		}
		/*
		 * We don't know whether kernel has the changed mapping
		 * installed to access userspace memory. And if so, on what CPU.
		 * More over we don't know what mapping has changed and how and
		 * therefore we must invalidate all mappings we have anywhere.
		 * Next time we map memory, we map it fresh.
		 */
		bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
#endif
		return OK;
	case VMCTL_CLEARMAPCACHE:
		/* VM says: forget about old mappings we have cached. */
		mem_clear_mapcache();
		return OK;
	case VMCTL_BOOTINHIBIT_CLEAR:
		RTS_UNSET(p, RTS_BOOTINHIBIT);
		return OK;
  }

  /* Try architecture-specific vmctls. */
  return arch_do_vmctl(m_ptr, p);
}
Ejemplo n.º 30
0
/*===========================================================================*
 *				do_update				     *
 *===========================================================================*/
int do_update(struct proc * caller, message * m_ptr)
{
/* Handle sys_update(). Update a process into another by swapping their process
 * slots.
 */
  endpoint_t src_e, dst_e;
  int src_p, dst_p;
  struct proc *src_rp, *dst_rp;
  struct priv *src_privp, *dst_privp;
  struct proc orig_src_proc;
  struct proc orig_dst_proc;
  struct priv orig_src_priv;
  struct priv orig_dst_priv;
  int i;

  /* Lookup slots for source and destination process. */
  src_e = m_ptr->SYS_UPD_SRC_ENDPT;
  if(!isokendpt(src_e, &src_p)) {
      return EINVAL;
  }
  src_rp = proc_addr(src_p);
  src_privp = priv(src_rp);
  if(!(src_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  dst_e = m_ptr->SYS_UPD_DST_ENDPT;
  if(!isokendpt(dst_e, &dst_p)) {
      return EINVAL;
  }
  dst_rp = proc_addr(dst_p);
  dst_privp = priv(dst_rp);
  if(!(dst_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp));

  /* Check if processes are updatable. */
  if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {
      return EBUSY;
  }

#if DEBUG
  printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

  /* Let destination inherit the target mask from source. */
  for (i=0; i < NR_SYS_PROCS; i++) {
      if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) {
          set_sendto_bit(dst_rp, i);
      }
  }

  /* Save existing data. */
  orig_src_proc = *src_rp;
  orig_src_priv = *(priv(src_rp));
  orig_dst_proc = *dst_rp;
  orig_dst_priv = *(priv(dst_rp));

  /* Swap slots. */
  *src_rp = orig_dst_proc;
  *src_privp = orig_dst_priv;
  *dst_rp = orig_src_proc;
  *dst_privp = orig_src_priv;

  /* Adjust process slots. */
  adjust_proc_slot(src_rp, &orig_src_proc);
  adjust_proc_slot(dst_rp, &orig_dst_proc);

  /* Adjust privilege slots. */
  adjust_priv_slot(priv(src_rp), &orig_src_priv);
  adjust_priv_slot(priv(dst_rp), &orig_dst_priv);

  /* Swap global process slot addresses. */
  swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);

#if DEBUG
  printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

#ifdef CONFIG_SMP
  bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
  bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
#endif

  return OK;
}