Ejemplo n.º 1
0
static void fjes_free_resources(struct fjes_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct fjes_device_command_param param;
	struct ep_share_mem_info *buf_pair;
	struct fjes_hw *hw = &adapter->hw;
	bool reset_flag = false;
	unsigned long flags;
	int result;
	int epidx;

	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx == hw->my_epid)
			continue;

		mutex_lock(&hw->hw_info.lock);
		result = fjes_hw_unregister_buff_addr(hw, epidx);
		mutex_unlock(&hw->hw_info.lock);

		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;

		if (result)
			reset_flag = true;

		buf_pair = &hw->ep_shm_info[epidx];

		spin_lock_irqsave(&hw->rx_status_lock, flags);
		fjes_hw_setup_epbuf(&buf_pair->tx,
				    netdev->dev_addr, netdev->mtu);
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		clear_bit(epidx, &hw->txrx_stop_req_bit);
	}

	if (reset_flag || adapter->force_reset) {
		result = fjes_hw_reset(hw);

		adapter->force_reset = false;

		if (result)
			adapter->open_guard = true;

		hw->hw_info.buffer_share_bit = 0;

		memset((void *)&param, 0, sizeof(param));

		param.req_len = hw->hw_info.req_buf_size;
		param.req_start = __pa(hw->hw_info.req_buf);
		param.res_len = hw->hw_info.res_buf_size;
		param.res_start = __pa(hw->hw_info.res_buf);
		param.share_start = __pa(hw->hw_info.share->ep_status);

		fjes_hw_init_command_registers(hw, &param);
	}
}
Ejemplo n.º 2
0
static void fjes_hw_update_zone_task(struct work_struct *work)
{
	struct fjes_hw *hw = container_of(work,
			struct fjes_hw, update_zone_task);

	struct my_s {u8 es_status; u8 zone; } *info;
	union fjes_device_command_res *res_buf;
	enum ep_partner_status pstatus;

	struct fjes_adapter *adapter;
	struct net_device *netdev;

	ulong unshare_bit = 0;
	ulong share_bit = 0;
	ulong irq_bit = 0;

	int epidx;
	int ret;

	adapter = (struct fjes_adapter *)hw->back;
	netdev = adapter->netdev;
	res_buf = hw->hw_info.res_buf;
	info = (struct my_s *)&res_buf->info.info;

	mutex_lock(&hw->hw_info.lock);

	ret = fjes_hw_request_info(hw);
	switch (ret) {
	case -ENOMSG:
	case -EBUSY:
	default:
		if (!work_pending(&adapter->force_close_task)) {
			adapter->force_reset = true;
			schedule_work(&adapter->force_close_task);
		}
		break;

	case 0:

		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid) {
				hw->ep_shm_info[epidx].es_status =
					info[epidx].es_status;
				hw->ep_shm_info[epidx].zone =
					info[epidx].zone;
				continue;
			}

			pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
			switch (pstatus) {
			case EP_PARTNER_UNSHARE:
			default:
				if ((info[epidx].zone !=
					FJES_ZONING_ZONE_TYPE_NONE) &&
				    (info[epidx].es_status ==
					FJES_ZONING_STATUS_ENABLE) &&
				    (info[epidx].zone ==
					info[hw->my_epid].zone))
					set_bit(epidx, &share_bit);
				else
					set_bit(epidx, &unshare_bit);
				break;

			case EP_PARTNER_COMPLETE:
			case EP_PARTNER_WAITING:
				if ((info[epidx].zone ==
					FJES_ZONING_ZONE_TYPE_NONE) ||
				    (info[epidx].es_status !=
					FJES_ZONING_STATUS_ENABLE) ||
				    (info[epidx].zone !=
					info[hw->my_epid].zone)) {
					set_bit(epidx,
						&adapter->unshare_watch_bitmask);
					set_bit(epidx,
						&hw->hw_info.buffer_unshare_reserve_bit);
				}
				break;

			case EP_PARTNER_SHARED:
				if ((info[epidx].zone ==
					FJES_ZONING_ZONE_TYPE_NONE) ||
				    (info[epidx].es_status !=
					FJES_ZONING_STATUS_ENABLE) ||
				    (info[epidx].zone !=
					info[hw->my_epid].zone))
					set_bit(epidx, &irq_bit);
				break;
			}

			hw->ep_shm_info[epidx].es_status =
				info[epidx].es_status;
			hw->ep_shm_info[epidx].zone = info[epidx].zone;
		}
		break;
	}

	mutex_unlock(&hw->hw_info.lock);

	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx == hw->my_epid)
			continue;

		if (test_bit(epidx, &share_bit)) {
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr, netdev->mtu);

			mutex_lock(&hw->hw_info.lock);

			ret = fjes_hw_register_buff_addr(
				hw, epidx, &hw->ep_shm_info[epidx]);

			switch (ret) {
			case 0:
				break;
			case -ENOMSG:
			case -EBUSY:
			default:
				if (!work_pending(&adapter->force_close_task)) {
					adapter->force_reset = true;
					schedule_work(
					  &adapter->force_close_task);
				}
				break;
			}
			mutex_unlock(&hw->hw_info.lock);
		}

		if (test_bit(epidx, &unshare_bit)) {
			mutex_lock(&hw->hw_info.lock);

			ret = fjes_hw_unregister_buff_addr(hw, epidx);

			switch (ret) {
			case 0:
				break;
			case -ENOMSG:
			case -EBUSY:
			default:
				if (!work_pending(&adapter->force_close_task)) {
					adapter->force_reset = true;
					schedule_work(
					  &adapter->force_close_task);
				}
				break;
			}

			mutex_unlock(&hw->hw_info.lock);

			if (ret == 0)
				fjes_hw_setup_epbuf(
					&hw->ep_shm_info[epidx].tx,
					netdev->dev_addr, netdev->mtu);
		}

		if (test_bit(epidx, &irq_bit)) {
			fjes_hw_raise_interrupt(hw, epidx,
						REG_ICTL_MASK_TXRX_STOP_REQ);

			set_bit(epidx, &hw->txrx_stop_req_bit);
			hw->ep_shm_info[epidx].tx.
				info->v1i.rx_status |=
					FJES_RX_STOP_REQ_REQUEST;
			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
		}
	}

	if (irq_bit || adapter->unshare_watch_bitmask) {
		if (!work_pending(&adapter->unshare_watch_task))
			queue_work(adapter->control_wq,
				   &adapter->unshare_watch_task);
	}
}
Ejemplo n.º 3
0
static int fjes_hw_setup(struct fjes_hw *hw)
{
	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
	struct fjes_device_command_param param;
	struct ep_share_mem_info *buf_pair;
	size_t mem_size;
	int result;
	int epidx;
	void *buf;

	hw->hw_info.max_epid = &hw->max_epid;
	hw->hw_info.my_epid = &hw->my_epid;

	buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
		      GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	hw->ep_shm_info = (struct ep_share_mem_info *)buf;

	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
	if (!(hw->hw_info.req_buf))
		return -ENOMEM;

	hw->hw_info.req_buf_size = mem_size;

	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
	if (!(hw->hw_info.res_buf))
		return -ENOMEM;

	hw->hw_info.res_buf_size = mem_size;

	result = fjes_hw_alloc_shared_status_region(hw);
	if (result)
		return result;

	hw->hw_info.buffer_share_bit = 0;
	hw->hw_info.buffer_unshare_reserve_bit = 0;

	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx != hw->my_epid) {
			buf_pair = &hw->ep_shm_info[epidx];

			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
			if (result)
				return result;

			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
			if (result)
				return result;

			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
					    fjes_support_mtu[0]);
			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
					    fjes_support_mtu[0]);
		}
	}

	memset(&param, 0, sizeof(param));

	param.req_len = hw->hw_info.req_buf_size;
	param.req_start = __pa(hw->hw_info.req_buf);
	param.res_len = hw->hw_info.res_buf_size;
	param.res_start = __pa(hw->hw_info.res_buf);

	param.share_start = __pa(hw->hw_info.share->ep_status);

	fjes_hw_init_command_registers(hw, &param);

	return 0;
}
Ejemplo n.º 4
0
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	bool running = netif_running(netdev);
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int ret = -EINVAL;
	int idx, epidx;

	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
		if (new_mtu <= fjes_support_mtu[idx]) {
			new_mtu = fjes_support_mtu[idx];
			if (new_mtu == netdev->mtu)
				return 0;

			ret = 0;
			break;
		}
	}

	if (ret)
		return ret;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;
			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
				~FJES_RX_MTU_CHANGING_DONE;
		}
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		netif_tx_stop_all_queues(netdev);
		netif_carrier_off(netdev);
		cancel_work_sync(&adapter->tx_stall_task);
		napi_disable(&adapter->napi);

		msleep(1000);

		netif_tx_stop_all_queues(netdev);
	}

	netdev->mtu = new_mtu;

	if (running) {
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr,
					    netdev->mtu);

			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
				FJES_RX_MTU_CHANGING_DONE;
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
		}

		netif_tx_wake_all_queues(netdev);
		netif_carrier_on(netdev);
		napi_enable(&adapter->napi);
		napi_schedule(&adapter->napi);
	}

	return ret;
}
Ejemplo n.º 5
0
static int fjes_setup_resources(struct fjes_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct ep_share_mem_info *buf_pair;
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int result;
	int epidx;

	mutex_lock(&hw->hw_info.lock);
	result = fjes_hw_request_info(hw);
	switch (result) {
	case 0:
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			hw->ep_shm_info[epidx].es_status =
			    hw->hw_info.res_buf->info.info[epidx].es_status;
			hw->ep_shm_info[epidx].zone =
			    hw->hw_info.res_buf->info.info[epidx].zone;
		}
		break;
	default:
	case -ENOMSG:
	case -EBUSY:
		adapter->force_reset = true;

		mutex_unlock(&hw->hw_info.lock);
		return result;
	}
	mutex_unlock(&hw->hw_info.lock);

	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
		if ((epidx != hw->my_epid) &&
		    (hw->ep_shm_info[epidx].es_status ==
		     FJES_ZONING_STATUS_ENABLE)) {
			fjes_hw_raise_interrupt(hw, epidx,
						REG_ICTL_MASK_INFO_UPDATE);
			hw->ep_shm_info[epidx].ep_stats
				.send_intr_zoneupdate += 1;
		}
	}

	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);

	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
		if (epidx == hw->my_epid)
			continue;

		buf_pair = &hw->ep_shm_info[epidx];

		spin_lock_irqsave(&hw->rx_status_lock, flags);
		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
				    netdev->mtu);
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
			mutex_lock(&hw->hw_info.lock);
			result =
			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
			mutex_unlock(&hw->hw_info.lock);

			switch (result) {
			case 0:
				break;
			case -ENOMSG:
			case -EBUSY:
			default:
				adapter->force_reset = true;
				return result;
			}

			hw->ep_shm_info[epidx].ep_stats
				.com_regist_buf_exec += 1;
		}
	}

	return 0;
}
Ejemplo n.º 6
0
static void fjes_watch_unshare_task(struct work_struct *work)
{
	struct fjes_adapter *adapter =
	container_of(work, struct fjes_adapter, unshare_watch_task);

	struct net_device *netdev = adapter->netdev;
	struct fjes_hw *hw = &adapter->hw;

	int unshare_watch, unshare_reserve;
	int max_epid, my_epid, epidx;
	int stop_req, stop_req_done;
	ulong unshare_watch_bitmask;
	unsigned long flags;
	int wait_time = 0;
	int is_shared;
	int ret;

	my_epid = hw->my_epid;
	max_epid = hw->max_epid;

	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
	adapter->unshare_watch_bitmask = 0;

	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
	       (wait_time < 3000)) {
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
							   epidx);

			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);

			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
					FJES_RX_STOP_REQ_DONE;

			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);

			unshare_reserve = test_bit(epidx,
						   &hw->hw_info.buffer_unshare_reserve_bit);

			if ((!stop_req ||
			     (is_shared && (!is_shared || !stop_req_done))) &&
			    (is_shared || !unshare_watch || !unshare_reserve))
				continue;

			mutex_lock(&hw->hw_info.lock);
			ret = fjes_hw_unregister_buff_addr(hw, epidx);
			switch (ret) {
			case 0:
				break;
			case -ENOMSG:
			case -EBUSY:
			default:
				if (!work_pending(
					&adapter->force_close_task)) {
					adapter->force_reset = true;
					schedule_work(
						&adapter->force_close_task);
				}
				break;
			}
			mutex_unlock(&hw->hw_info.lock);
			hw->ep_shm_info[epidx].ep_stats
					.com_unregist_buf_exec += 1;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr, netdev->mtu);
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);

			clear_bit(epidx, &hw->txrx_stop_req_bit);
			clear_bit(epidx, &unshare_watch_bitmask);
			clear_bit(epidx,
				  &hw->hw_info.buffer_unshare_reserve_bit);
		}

		msleep(100);
		wait_time += 100;
	}

	if (hw->hw_info.buffer_unshare_reserve_bit) {
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			if (test_bit(epidx,
				     &hw->hw_info.buffer_unshare_reserve_bit)) {
				mutex_lock(&hw->hw_info.lock);

				ret = fjes_hw_unregister_buff_addr(hw, epidx);
				switch (ret) {
				case 0:
					break;
				case -ENOMSG:
				case -EBUSY:
				default:
					if (!work_pending(
						&adapter->force_close_task)) {
						adapter->force_reset = true;
						schedule_work(
							&adapter->force_close_task);
					}
					break;
				}
				mutex_unlock(&hw->hw_info.lock);

				hw->ep_shm_info[epidx].ep_stats
					.com_unregist_buf_exec += 1;

				spin_lock_irqsave(&hw->rx_status_lock, flags);
				fjes_hw_setup_epbuf(
					&hw->ep_shm_info[epidx].tx,
					netdev->dev_addr, netdev->mtu);
				spin_unlock_irqrestore(&hw->rx_status_lock,
						       flags);

				clear_bit(epidx, &hw->txrx_stop_req_bit);
				clear_bit(epidx, &unshare_watch_bitmask);
				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
			}

			if (test_bit(epidx, &unshare_watch_bitmask)) {
				spin_lock_irqsave(&hw->rx_status_lock, flags);
				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
						~FJES_RX_STOP_REQ_DONE;
				spin_unlock_irqrestore(&hw->rx_status_lock,
						       flags);
			}
		}
	}
}