Beispiel #1
0
static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
{
	struct usb_device *usb = comedi_to_usb_dev(dev);
	struct usbduxsigma_private *devpriv = dev->private;
	struct urb *urb;
	int i;

	devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
	devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
	devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
	devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(urb), GFP_KERNEL);
	devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(urb), GFP_KERNEL);
	if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
	    !devpriv->ai_urbs || !devpriv->ao_urbs)
		return -ENOMEM;

	for (i = 0; i < devpriv->n_ai_urbs; i++) {
		/* one frame: 1ms */
		urb = usb_alloc_urb(1, GFP_KERNEL);
		if (!urb)
			return -ENOMEM;
		devpriv->ai_urbs[i] = urb;
		urb->dev = usb;
		/* will be filled later with a pointer to the comedi-device */
		/* and ONLY then the urb should be submitted */
		urb->context = NULL;
		urb->pipe = usb_rcvisocpipe(usb, 6);
		urb->transfer_flags = URB_ISO_ASAP;
		urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
		if (!urb->transfer_buffer)
			return -ENOMEM;
		urb->complete = usbduxsigma_ai_urb_complete;
		urb->number_of_packets = 1;
		urb->transfer_buffer_length = SIZEINBUF;
		urb->iso_frame_desc[0].offset = 0;
		urb->iso_frame_desc[0].length = SIZEINBUF;
	}

	for (i = 0; i < devpriv->n_ao_urbs; i++) {
		/* one frame: 1ms */
		urb = usb_alloc_urb(1, GFP_KERNEL);
		if (!urb)
			return -ENOMEM;
		devpriv->ao_urbs[i] = urb;
		urb->dev = usb;
		/* will be filled later with a pointer to the comedi-device */
		/* and ONLY then the urb should be submitted */
		urb->context = NULL;
		urb->pipe = usb_sndisocpipe(usb, 2);
		urb->transfer_flags = URB_ISO_ASAP;
		urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
		if (!urb->transfer_buffer)
			return -ENOMEM;
		urb->complete = usbduxsigma_ao_urb_complete;
		urb->number_of_packets = 1;
		urb->transfer_buffer_length = SIZEOUTBUF;
		urb->iso_frame_desc[0].offset = 0;
		urb->iso_frame_desc[0].length = SIZEOUTBUF;
		if (devpriv->high_speed)
			urb->interval = 8;	/* uframes */
		else
			urb->interval = 1;	/* frames */
	}

	if (devpriv->pwm_buf_sz) {
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb)
			return -ENOMEM;
		devpriv->pwm_urb = urb;

		urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
					       GFP_KERNEL);
		if (!urb->transfer_buffer)
			return -ENOMEM;
	}

	return 0;
}
/* FIXME: split and cleanup */
int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
			    struct wusb_ckhdid *ck)
{
	int result = -ENOMEM;
	struct usb_device *usb_dev = wusb_dev->usb_dev;
	struct device *dev = &usb_dev->dev;
	u32 tkid;
	__le32 tkid_le;
	struct usb_handshake *hs;
	struct aes_ccm_nonce ccm_n;
	u8 mic[8];
	struct wusb_keydvt_in keydvt_in;
	struct wusb_keydvt_out keydvt_out;

	hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
	if (hs == NULL) {
		dev_err(dev, "can't allocate handshake data\n");
		goto error_kzalloc;
	}

	/* We need to turn encryption before beginning the 4way
	 * hshake (WUSB1.0[.3.2.2]) */
	result = wusb_dev_set_encryption(usb_dev, 1);
	if (result < 0)
		goto error_dev_set_encryption;

	tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
	tkid_le = cpu_to_le32(tkid);

	hs[0].bMessageNumber = 1;
	hs[0].bStatus = 0;
	memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
	hs[0].bReserved = 0;
	memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
	get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
	memset(hs[0].MIC, 0, sizeof(hs[0].MIC));	/* Per WUSB1.0[T7-22] */

	result = usb_control_msg(
		usb_dev, usb_sndctrlpipe(usb_dev, 0),
		USB_REQ_SET_HANDSHAKE,
		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
		1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
	if (result < 0) {
		dev_err(dev, "Handshake1: request failed: %d\n", result);
		goto error_hs1;
	}

	/* Handshake 2, from the device -- need to verify fields */
	result = usb_control_msg(
		usb_dev, usb_rcvctrlpipe(usb_dev, 0),
		USB_REQ_GET_HANDSHAKE,
		USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
		2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
	if (result < 0) {
		dev_err(dev, "Handshake2: request failed: %d\n", result);
		goto error_hs2;
	}

	result = -EINVAL;
	if (hs[1].bMessageNumber != 2) {
		dev_err(dev, "Handshake2 failed: bad message number %u\n",
			hs[1].bMessageNumber);
		goto error_hs2;
	}
	if (hs[1].bStatus != 0) {
		dev_err(dev, "Handshake2 failed: bad status %u\n",
			hs[1].bStatus);
		goto error_hs2;
	}
	if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
		dev_err(dev, "Handshake2 failed: TKID mismatch "
			"(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
			hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
			hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
		goto error_hs2;
	}
	if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
		dev_err(dev, "Handshake2 failed: CDID mismatch\n");
		goto error_hs2;
	}

	/* Setup the CCM nonce */
	memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn));	/* Per WUSB1.0[6.5.2] */
	memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
	ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
	ccm_n.dest_addr.data[0] = wusb_dev->addr;
	ccm_n.dest_addr.data[1] = 0;

	/* Derive the KCK and PTK from CK, the CCM, H and D nonces */
	memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
	memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
	result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
	if (result < 0) {
		dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
			result);
		goto error_hs2;
	}

	/* Compute MIC and verify it */
	result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
	if (result < 0) {
		dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
			result);
		goto error_hs2;
	}

	if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
		dev_err(dev, "Handshake2 failed: MIC mismatch\n");
		goto error_hs2;
	}

	/* Send Handshake3 */
	hs[2].bMessageNumber = 3;
	hs[2].bStatus = 0;
	memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
	hs[2].bReserved = 0;
	memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
	memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
	result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
	if (result < 0) {
		dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
			result);
		goto error_hs2;
	}

	result = usb_control_msg(
		usb_dev, usb_sndctrlpipe(usb_dev, 0),
		USB_REQ_SET_HANDSHAKE,
		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
		3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
	if (result < 0) {
		dev_err(dev, "Handshake3: request failed: %d\n", result);
		goto error_hs3;
	}

	result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
				 keydvt_out.ptk, sizeof(keydvt_out.ptk));
	if (result < 0)
		goto error_wusbhc_set_ptk;

	result = wusb_dev_set_gtk(wusbhc, wusb_dev);
	if (result < 0) {
		dev_err(dev, "Set GTK for device: request failed: %d\n",
			result);
		goto error_wusbhc_set_gtk;
	}

	/* Update the device's address from unauth to auth */
	if (usb_dev->authenticated == 0) {
		result = wusb_dev_update_address(wusbhc, wusb_dev);
		if (result < 0)
			goto error_dev_update_address;
	}
	result = 0;
	dev_info(dev, "device authenticated\n");

error_dev_update_address:
error_wusbhc_set_gtk:
error_wusbhc_set_ptk:
error_hs3:
error_hs2:
error_hs1:
	memset(hs, 0, 3*sizeof(hs[0]));
	memset(&keydvt_out, 0, sizeof(keydvt_out));
	memset(&keydvt_in, 0, sizeof(keydvt_in));
	memset(&ccm_n, 0, sizeof(ccm_n));
	memset(mic, 0, sizeof(mic));
	if (result < 0)
		wusb_dev_set_encryption(usb_dev, 0);
error_dev_set_encryption:
	kfree(hs);
error_kzalloc:
	return result;
}
Beispiel #3
0
/* set global incr burst type configuration registers */
static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
{
	struct device *dev = dwc->dev;
	/* incrx_mode : for INCR burst type. */
	bool incrx_mode;
	/* incrx_size : for size of INCRX burst. */
	u32 incrx_size;
	u32 *vals;
	u32 cfg;
	int ntype;
	int ret;
	int i;

	cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);

	/*
	 * Handle property "snps,incr-burst-type-adjustment".
	 * Get the number of value from this property:
	 * result <= 0, means this property is not supported.
	 * result = 1, means INCRx burst mode supported.
	 * result > 1, means undefined length burst mode supported.
	 */
	ntype = device_property_read_u32_array(dev,
			"snps,incr-burst-type-adjustment", NULL, 0);
	if (ntype <= 0)
		return;

	vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
	if (!vals) {
		dev_err(dev, "Error to get memory\n");
		return;
	}

	/* Get INCR burst type, and parse it */
	ret = device_property_read_u32_array(dev,
			"snps,incr-burst-type-adjustment", vals, ntype);
	if (ret) {
		dev_err(dev, "Error to get property\n");
		return;
	}

	incrx_size = *vals;

	if (ntype > 1) {
		/* INCRX (undefined length) burst mode */
		incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
		for (i = 1; i < ntype; i++) {
			if (vals[i] > incrx_size)
				incrx_size = vals[i];
		}
	} else {
		/* INCRX burst mode */
		incrx_mode = INCRX_BURST_MODE;
	}

	/* Enable Undefined Length INCR Burst and Enable INCRx Burst */
	cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
	if (incrx_mode)
		cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
	switch (incrx_size) {
	case 256:
		cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
		break;
	case 128:
		cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
		break;
	case 64:
		cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
		break;
	case 32:
		cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
		break;
	case 16:
		cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
		break;
	case 8:
		cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
		break;
	case 4:
		cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
		break;
	case 1:
		break;
	default:
		dev_err(dev, "Invalid property\n");
		break;
	}

	dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
}
Beispiel #4
0
static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
							u8 n)
{
	struct tcf_pedit_key_ex *keys_ex;
	struct tcf_pedit_key_ex *k;
	const struct nlattr *ka;
	int err = -EINVAL;
	int rem;

	if (!nla || !n)
		return NULL;

	keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
	if (!keys_ex)
		return ERR_PTR(-ENOMEM);

	k = keys_ex;

	nla_for_each_nested(ka, nla, rem) {
		struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1];

		if (!n) {
			err = -EINVAL;
			goto err_out;
		}
		n--;

		if (nla_type(ka) != TCA_PEDIT_KEY_EX) {
			err = -EINVAL;
			goto err_out;
		}

		err = nla_parse_nested(tb, TCA_PEDIT_KEY_EX_MAX, ka,
				       pedit_key_ex_policy);
		if (err)
			goto err_out;

		if (!tb[TCA_PEDIT_KEY_EX_HTYPE] ||
		    !tb[TCA_PEDIT_KEY_EX_CMD]) {
			err = -EINVAL;
			goto err_out;
		}

		k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]);
		k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]);

		if (k->htype > TCA_PEDIT_HDR_TYPE_MAX ||
		    k->cmd > TCA_PEDIT_CMD_MAX) {
			err = -EINVAL;
			goto err_out;
		}

		k++;
	}

	if (n)
		goto err_out;

	return keys_ex;

err_out:
	kfree(keys_ex);
	return ERR_PTR(err);
}
s32 esparser_init(struct stream_buf_s *buf)
{
    s32 r;
    u32 pts_type;
    u32 parser_sub_start_ptr;
    u32 parser_sub_end_ptr;
    u32 parser_sub_rp;

    if (buf->type == BUF_TYPE_VIDEO) {
        pts_type = PTS_TYPE_VIDEO;
    } else if (buf->type & BUF_TYPE_AUDIO) {
        pts_type = PTS_TYPE_AUDIO;
    } else if (buf->type & BUF_TYPE_SUBTITLE) {
        pts_type = PTS_TYPE_MAX;
    } else {
        return -EINVAL;
    }

    parser_sub_start_ptr = READ_MPEG_REG(PARSER_SUB_START_PTR);
    parser_sub_end_ptr = READ_MPEG_REG(PARSER_SUB_END_PTR);
    parser_sub_rp = READ_MPEG_REG(PARSER_SUB_RP);

    buf->flag |= BUF_FLAG_PARSER;

    if (atomic_add_return(1, &esparser_use_count) == 1) {
        if (fetchbuf == 0) {
            printk("%s: no fetchbuf\n", __FUNCTION__);
            return -ENOMEM;
        }

        if (search_pattern == NULL) {
            search_pattern = (unsigned char *)kcalloc(1, SEARCH_PATTERN_LEN, GFP_KERNEL);

            if (search_pattern == NULL) {
                printk("%s: no search_pattern\n", __FUNCTION__);
                return -ENOMEM;
            }

            /* build a fake start code to get parser interrupt */
            search_pattern[0] = 0x00;
            search_pattern[1] = 0x00;
            search_pattern[2] = 0x01;
            search_pattern[3] = 0xff;

            search_pattern_map = dma_map_single(NULL, search_pattern,
                                                SEARCH_PATTERN_LEN, DMA_TO_DEVICE);
        }

        /* reset PARSER with first esparser_init() call */
        WRITE_MPEG_REG(RESET1_REGISTER, RESET_PARSER);

        /* TS data path */
#ifndef CONFIG_AM_DVB
        WRITE_MPEG_REG(FEC_INPUT_CONTROL, 0);
#else
        tsdemux_set_reset_flag();
#endif
        CLEAR_MPEG_REG_MASK(TS_HIU_CTL, 1 << USE_HI_BSF_INTERFACE);
        CLEAR_MPEG_REG_MASK(TS_HIU_CTL_2, 1 << USE_HI_BSF_INTERFACE);
        CLEAR_MPEG_REG_MASK(TS_HIU_CTL_3, 1 << USE_HI_BSF_INTERFACE);

        CLEAR_MPEG_REG_MASK(TS_FILE_CONFIG, (1 << TS_HIU_ENABLE));

        WRITE_MPEG_REG(PARSER_CONFIG,
                       (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
                       (1  << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
                       (16 << PS_CFG_MAX_FETCH_CYCLE_BIT));

        WRITE_MPEG_REG(PFIFO_RD_PTR, 0);
        WRITE_MPEG_REG(PFIFO_WR_PTR, 0);

        WRITE_MPEG_REG(PARSER_SEARCH_PATTERN, ES_START_CODE_PATTERN);
        WRITE_MPEG_REG(PARSER_SEARCH_MASK, ES_START_CODE_MASK);

        WRITE_MPEG_REG(PARSER_CONFIG,
                       (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
                       (1  << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
                       PS_CFG_STARTCODE_WID_24   |
                       PS_CFG_PFIFO_ACCESS_WID_8 | /* single byte pop */
                       (16 << PS_CFG_MAX_FETCH_CYCLE_BIT));

        WRITE_MPEG_REG(PARSER_CONTROL, PARSER_AUTOSEARCH);

        tasklet_init(&esparser_tasklet, parser_tasklet, 0);
    }

    /* hook stream buffer with PARSER */
    if (pts_type == PTS_TYPE_VIDEO) {
        WRITE_MPEG_REG(PARSER_VIDEO_START_PTR,
                       READ_MPEG_REG(VLD_MEM_VIFIFO_START_PTR));
        WRITE_MPEG_REG(PARSER_VIDEO_END_PTR,
                       READ_MPEG_REG(VLD_MEM_VIFIFO_END_PTR));
        CLEAR_MPEG_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR);

        WRITE_MPEG_REG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);
        CLEAR_MPEG_REG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);

        video_data_parsed = 0;
    } else if (pts_type == PTS_TYPE_AUDIO) {
        WRITE_MPEG_REG(PARSER_AUDIO_START_PTR,
                       READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));
        WRITE_MPEG_REG(PARSER_AUDIO_END_PTR,
                       READ_MPEG_REG(AIU_MEM_AIFIFO_END_PTR));
        CLEAR_MPEG_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR);

        WRITE_MPEG_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);
        CLEAR_MPEG_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);

        audio_data_parsed = 0;
    } else if (buf->type & BUF_TYPE_SUBTITLE) {
        WRITE_MPEG_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr);
        WRITE_MPEG_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr);
        WRITE_MPEG_REG(PARSER_SUB_RP, parser_sub_rp);
        SET_MPEG_REG_MASK(PARSER_ES_CONTROL, (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR);
    }

    if (pts_type < PTS_TYPE_MAX) {
        r = pts_start(pts_type);
        if (r < 0) {
            printk("esparser_init: pts_start failed\n");
            goto Err_1;
        }
    }

#if 0
    if (buf->flag & BUF_FLAG_FIRST_TSTAMP) {
        if (buf->type == BUF_TYPE_VIDEO) {
            es_vpts_checkin(buf, buf->first_tstamp);
        } else if (buf->type == BUF_TYPE_AUDIO) {
            es_apts_checkin(buf, buf->first_tstamp);
        }

        buf->flag &= ~BUF_FLAG_FIRST_TSTAMP;
    }
#endif

    if (atomic_read(&esparser_use_count) == 1) {
        r = request_irq(INT_PARSER, parser_isr,
                        IRQF_SHARED, "esparser", (void *)esparser_id);
        if (r) {
            printk("esparser_init: irq register failed.\n");
            goto Err_2;
        }

        WRITE_MPEG_REG(PARSER_INT_STATUS, 0xffff);
        WRITE_MPEG_REG(PARSER_INT_ENABLE,
                       PARSER_INTSTAT_SC_FOUND << PARSER_INT_HOST_EN_BIT);
    }

    return 0;

Err_2:
    pts_stop(pts_type);

Err_1:
    buf->flag &= ~BUF_FLAG_PARSER;
    return r;
}
Beispiel #6
0
/**
 * hdm_probe - probe function of USB device driver
 * @interface: Interface of the attached USB device
 * @id: Pointer to the USB ID table.
 *
 * This allocates and initializes the device instance, adds the new
 * entry to the internal list, scans the USB descriptors and registers
 * the interface with the core.
 * Additionally, the DCI objects are created and the hardware is sync'd.
 *
 * Return 0 on success. In case of an error a negative number is returned.
 */
static int
hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
	struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
	struct usb_device *usb_dev = interface_to_usbdev(interface);
	struct device *dev = &usb_dev->dev;
	struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
	unsigned int i;
	unsigned int num_endpoints;
	struct most_channel_capability *tmp_cap;
	struct usb_endpoint_descriptor *ep_desc;
	int ret = 0;
	int err;

	if (!mdev)
		goto exit_ENOMEM;

	usb_set_intfdata(interface, mdev);
	num_endpoints = usb_iface_desc->desc.bNumEndpoints;
	mutex_init(&mdev->io_mutex);
	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
	setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
		    (unsigned long)mdev);

	mdev->usb_device = usb_dev;
	mdev->link_stat_timer.expires = jiffies + (2 * HZ);

	mdev->iface.mod = hdm_usb_fops.owner;
	mdev->iface.interface = ITYPE_USB;
	mdev->iface.configure = hdm_configure_channel;
	mdev->iface.request_netinfo = hdm_request_netinfo;
	mdev->iface.enqueue = hdm_enqueue;
	mdev->iface.poison_channel = hdm_poison_channel;
	mdev->iface.description = mdev->description;
	mdev->iface.num_channels = num_endpoints;

	snprintf(mdev->description, sizeof(mdev->description),
		 "usb_device %d-%s:%d.%d",
		 usb_dev->bus->busnum,
		 usb_dev->devpath,
		 usb_dev->config->desc.bConfigurationValue,
		 usb_iface_desc->desc.bInterfaceNumber);

	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
	if (!mdev->conf)
		goto exit_free;

	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
	if (!mdev->cap)
		goto exit_free1;

	mdev->iface.channel_vector = mdev->cap;
	mdev->iface.priv = NULL;

	mdev->ep_address =
		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
	if (!mdev->ep_address)
		goto exit_free2;

	mdev->busy_urbs =
		kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
	if (!mdev->busy_urbs)
		goto exit_free3;

	tmp_cap = mdev->cap;
	for (i = 0; i < num_endpoints; i++) {
		ep_desc = &usb_iface_desc->endpoint[i].desc;
		mdev->ep_address[i] = ep_desc->bEndpointAddress;
		mdev->padding_active[i] = false;
		mdev->is_channel_healthy[i] = true;

		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
			 mdev->ep_address[i]);

		tmp_cap->name_suffix = &mdev->suffix[i][0];
		tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
		tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
		tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
		tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
		tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
				     MOST_CH_ISOC | MOST_CH_SYNC;
		if (usb_endpoint_dir_in(ep_desc))
			tmp_cap->direction = MOST_CH_RX;
		else
			tmp_cap->direction = MOST_CH_TX;
		tmp_cap++;
		init_usb_anchor(&mdev->busy_urbs[i]);
		spin_lock_init(&mdev->channel_lock[i]);
		err = drci_wr_reg(usb_dev,
				  DRCI_REG_BASE + DRCI_COMMAND +
				  ep_desc->bEndpointAddress * 16,
				  1);
		if (err < 0)
			dev_warn(dev, "DCI Sync for EP %02x failed",
				 ep_desc->bEndpointAddress);
	}
	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
		   le16_to_cpu(usb_dev->descriptor.idVendor),
		   le16_to_cpu(usb_dev->descriptor.idProduct),
		   usb_dev->bus->busnum,
		   usb_dev->devnum);

	dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
		   usb_dev->bus->busnum,
		   usb_dev->devpath,
		   usb_dev->config->desc.bConfigurationValue,
		   usb_iface_desc->desc.bInterfaceNumber);

	mdev->parent = most_register_interface(&mdev->iface);
	if (IS_ERR(mdev->parent)) {
		ret = PTR_ERR(mdev->parent);
		goto exit_free4;
	}

	mutex_lock(&mdev->io_mutex);
	if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
		/* this increments the reference count of the instance
		 * object of the core
		 */
		mdev->dci = create_most_dci_obj(mdev->parent);
		if (!mdev->dci) {
			mutex_unlock(&mdev->io_mutex);
			most_deregister_interface(&mdev->iface);
			ret = -ENOMEM;
			goto exit_free4;
		}

		kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
		mdev->dci->usb_device = mdev->usb_device;
	}
	mutex_unlock(&mdev->io_mutex);
	return 0;

exit_free4:
	kfree(mdev->busy_urbs);
exit_free3:
	kfree(mdev->ep_address);
exit_free2:
	kfree(mdev->cap);
exit_free1:
	kfree(mdev->conf);
exit_free:
	kfree(mdev);
exit_ENOMEM:
	if (ret == 0 || ret == -ENOMEM) {
		ret = -ENOMEM;
		dev_err(dev, "out of memory\n");
	}
	return ret;
}
Beispiel #7
0
static int qedr_alloc_resources(struct qedr_dev *dev)
{
	struct qedr_cnq *cnq;
	__le16 *cons_pi;
	u16 n_entries;
	int i, rc;

	dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
				QEDR_MAX_SGID, GFP_KERNEL);
	if (!dev->sgid_tbl)
		return -ENOMEM;

	spin_lock_init(&dev->sgid_lock);

	if (IS_IWARP(dev)) {
		spin_lock_init(&dev->idr_lock);
		idr_init(&dev->qpidr);
		dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
	}

	/* Allocate Status blocks for CNQ */
	dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
				GFP_KERNEL);
	if (!dev->sb_array) {
		rc = -ENOMEM;
		goto err1;
	}

	dev->cnq_array = kcalloc(dev->num_cnq,
				 sizeof(*dev->cnq_array), GFP_KERNEL);
	if (!dev->cnq_array) {
		rc = -ENOMEM;
		goto err2;
	}

	dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);

	/* Allocate CNQ PBLs */
	n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
	for (i = 0; i < dev->num_cnq; i++) {
		cnq = &dev->cnq_array[i];

		rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
				       dev->sb_start + i);
		if (rc)
			goto err3;

		rc = dev->ops->common->chain_alloc(dev->cdev,
						   QED_CHAIN_USE_TO_CONSUME,
						   QED_CHAIN_MODE_PBL,
						   QED_CHAIN_CNT_TYPE_U16,
						   n_entries,
						   sizeof(struct regpair *),
						   &cnq->pbl, NULL);
		if (rc)
			goto err4;

		cnq->dev = dev;
		cnq->sb = &dev->sb_array[i];
		cons_pi = dev->sb_array[i].sb_virt->pi_array;
		cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
		cnq->index = i;
		sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));

		DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
			 i, qed_chain_get_cons_idx(&cnq->pbl));
	}

	return 0;
err4:
	qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
err3:
	for (--i; i >= 0; i--) {
		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
	}
	kfree(dev->cnq_array);
err2:
	kfree(dev->sb_array);
err1:
	kfree(dev->sgid_tbl);
	return rc;
}
Beispiel #8
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	bool bounce = false;
	int ret = -ENOMEM;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request_fn, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->qdepth = 2;
	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
			   GFP_KERNEL);
	if (!mq->mqrq)
		goto blk_cleanup;
	mq->mqrq_cur = &mq->mqrq[0];
	mq->mqrq_prev = &mq->mqrq[1];
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512 &&
		    mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
			if (ret)
				goto cleanup_queue;
			bounce = true;
		}
	}
#endif

	if (!bounce) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		ret = mmc_queue_alloc_sgs(mq, host->max_segs);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto cleanup_queue;
	}

	return 0;

 cleanup_queue:
	mmc_queue_reqs_free_bufs(mq);
	kfree(mq->mqrq);
	mq->mqrq = NULL;
blk_cleanup:
	blk_cleanup_queue(mq->queue);
	return ret;
}
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
	struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
	unsigned int i;
	int n_attrs;
	int rc = 0;
#ifdef HAVE_I2C_SUPPORT
	struct i2c_client *client = NULL;
#endif /* HAVE_I2C_SUPPORT */

	/* If this method isn't defined we don't support thermals */
	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
		goto exit;

	/* Don't create thermal hwmon interface if no sensors present */
	rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
		if (rc)
			goto exit;
#ifdef HAVE_I2C_SUPPORT
	/* init i2c_client */
	client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
	if (client == NULL) {
		dev_info(&adapter->pdev->dev,
			"Failed to create new i2c device..\n");
		goto exit;
	}
	adapter->i2c_client = client;
#endif /* HAVE_I2C_SUPPORT */

	/* Allocation space for max attributes
	 * max num sensors * values (loc, temp, max, caution)
	 */
	n_attrs = E1000_MAX_SENSORS * 4;
	igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
					  GFP_KERNEL);
	if (!igb_hwmon->hwmon_list) {
		rc = -ENOMEM;
		goto err;
	}

	igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
	if (IS_ERR(igb_hwmon->device)) {
		rc = PTR_ERR(igb_hwmon->device);
		goto err;
	}

	for (i = 0; i < E1000_MAX_SENSORS; i++) {

		/* Only create hwmon sysfs entries for sensors that have
		 * meaningful data.
		 */
		if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
			continue;

		/* Bail if any hwmon attr struct fails to initialize */
		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
		if (rc)
			goto err;
	}

	goto exit;

err:
	igb_sysfs_del_adapter(adapter);
exit:
	return rc;
}
Beispiel #10
0
static int wpa_get_scan(PSDevice pDevice,
				     struct viawget_wpa_param *param)
{
	struct viawget_scan_result *scan_buf;
    PSMgmtObject    pMgmt = &(pDevice->sMgmtObj);
    PWLAN_IE_SSID   pItemSSID;
    PKnownBSS pBSS;
	PBYTE  pBuf;
	int ret = 0;
	u16 count = 0;
	u16 ii, jj;
	long ldBm;//James //add

//******mike:bubble sort by stronger RSSI*****//

    PBYTE ptempBSS;



    ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);

    if (ptempBSS == NULL) {

       printk("bubble sort kmalloc memory fail@@@\n");

        ret = -ENOMEM;

        return ret;

    }

    for (ii = 0; ii < MAX_BSS_NUM; ii++) {

	for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {

		if ((pMgmt->sBSSList[jj].bActive != TRUE) ||

                ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {

                 memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));

                 memcpy(&pMgmt->sBSSList[jj],&pMgmt->sBSSList[jj+1],sizeof(KnownBSS));

                 memcpy(&pMgmt->sBSSList[jj+1],ptempBSS,sizeof(KnownBSS));

              }

         }

    };

  kfree(ptempBSS);

 // printk("bubble sort result:\n");

	count = 0;
	pBSS = &(pMgmt->sBSSList[0]);
    for (ii = 0; ii < MAX_BSS_NUM; ii++) {
        pBSS = &(pMgmt->sBSSList[ii]);
        if (!pBSS->bActive)
            continue;
        count++;
    };

    pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);

    if (pBuf == NULL) {
        ret = -ENOMEM;
        return ret;
    }
    scan_buf = (struct viawget_scan_result *)pBuf;
	pBSS = &(pMgmt->sBSSList[0]);
    for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
        pBSS = &(pMgmt->sBSSList[ii]);
        if (pBSS->bActive) {
            if (jj >= count)
                break;
            memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
            pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
   		    memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
   		    scan_buf->ssid_len = pItemSSID->len;
            scan_buf->freq = frequency_list[pBSS->uChannel-1];
            scan_buf->caps = pBSS->wCapInfo;    //DavidWang for sharemode

	        RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
			if(-ldBm<50){
				scan_buf->qual = 100;
			}else  if(-ldBm > 90) {
				 scan_buf->qual = 0;
			}else {
				scan_buf->qual=(40-(-ldBm-50))*100/40;
			}

			//James
            //scan_buf->caps = pBSS->wCapInfo;
            //scan_buf->qual =
            scan_buf->noise = 0;
            scan_buf->level = ldBm;

            //scan_buf->maxrate =
            if (pBSS->wWPALen != 0) {
                scan_buf->wpa_ie_len = pBSS->wWPALen;
                memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
            }
            if (pBSS->wRSNLen != 0) {
                scan_buf->rsn_ie_len = pBSS->wRSNLen;
                memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
            }
            scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
            jj ++;
        }
    }

    if (jj < count)
        count = jj;

    if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) {
		ret = -EFAULT;
	};
	param->u.scan_results.scan_count = count;
    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count)

    kfree(pBuf);
    return ret;
}
Beispiel #11
0
static int ixgbevf_set_ringparam(struct net_device *netdev,
				 struct ethtool_ringparam *ring)
{
	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
	struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
	int i, err = 0;
	u32 new_rx_count, new_tx_count;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
	new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);

	new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
	new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring->count) &&
	    (new_rx_count == adapter->rx_ring->count)) {
		/* nothing to do */
		return 0;
	}

	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
		msleep(1);

	/*
	 * If the adapter isn't up and running then just set the
	 * new parameters and scurry for the exits.
	 */
	if (!netif_running(adapter->netdev)) {
		for (i = 0; i < adapter->num_tx_queues; i++)
			adapter->tx_ring[i].count = new_tx_count;
		for (i = 0; i < adapter->num_rx_queues; i++)
			adapter->rx_ring[i].count = new_rx_count;
		adapter->tx_ring_count = new_tx_count;
		adapter->rx_ring_count = new_rx_count;
		goto clear_reset;
	}

	tx_ring = kcalloc(adapter->num_tx_queues,
			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
	if (!tx_ring) {
		err = -ENOMEM;
		goto clear_reset;
	}

	rx_ring = kcalloc(adapter->num_rx_queues,
			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
	if (!rx_ring) {
		err = -ENOMEM;
		goto err_rx_setup;
	}

	ixgbevf_down(adapter);

	memcpy(tx_ring, adapter->tx_ring,
	       adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
	for (i = 0; i < adapter->num_tx_queues; i++) {
		tx_ring[i].count = new_tx_count;
		err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
		if (err) {
			while (i) {
				i--;
				ixgbevf_free_tx_resources(adapter,
							  &tx_ring[i]);
			}
			goto err_tx_ring_setup;
		}
		tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
	}

	memcpy(rx_ring, adapter->rx_ring,
	       adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
	for (i = 0; i < adapter->num_rx_queues; i++) {
		rx_ring[i].count = new_rx_count;
		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
		if (err) {
			while (i) {
				i--;
				ixgbevf_free_rx_resources(adapter,
							  &rx_ring[i]);
			}
				goto err_rx_ring_setup;
		}
		rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
	}

	/*
	 * Only switch to new rings if all the prior allocations
	 * and ring setups have succeeded.
	 */
	kfree(adapter->tx_ring);
	adapter->tx_ring = tx_ring;
	adapter->tx_ring_count = new_tx_count;

	kfree(adapter->rx_ring);
	adapter->rx_ring = rx_ring;
	adapter->rx_ring_count = new_rx_count;

	/* success! */
	ixgbevf_up(adapter);

	goto clear_reset;

err_rx_ring_setup:
	for(i = 0; i < adapter->num_tx_queues; i++)
		ixgbevf_free_tx_resources(adapter, &tx_ring[i]);

err_tx_ring_setup:
	kfree(rx_ring);

err_rx_setup:
	kfree(tx_ring);

clear_reset:
	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
	return err;
}
Beispiel #12
0
static int __init ion_dummy_init(void)
{
	int i, err;

	idev = ion_device_create(NULL);
	if (IS_ERR(idev))
		return PTR_ERR(idev);
	heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
			GFP_KERNEL);
	if (!heaps)
		return -ENOMEM;


	/* Allocate a dummy carveout heap */
	carveout_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
				GFP_KERNEL);
	if (carveout_ptr)
		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
						virt_to_phys(carveout_ptr);
	else
		pr_err("ion_dummy: Could not allocate carveout\n");

	/* Allocate a dummy chunk heap */
	chunk_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
				GFP_KERNEL);
	if (chunk_ptr)
		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
	else
		pr_err("ion_dummy: Could not allocate chunk\n");

	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];

		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
							!heap_data->base)
			continue;

		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
			continue;

		heaps[i] = ion_heap_create(heap_data);
		if (IS_ERR_OR_NULL(heaps[i])) {
			err = PTR_ERR(heaps[i]);
			goto err;
		}
		ion_device_add_heap(idev, heaps[i]);
	}
	return 0;
err:
	for (i = 0; i < dummy_ion_pdata.nr; ++i)
		ion_heap_destroy(heaps[i]);
	kfree(heaps);

	if (carveout_ptr) {
		free_pages_exact(carveout_ptr,
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
		carveout_ptr = NULL;
	}
	if (chunk_ptr) {
		free_pages_exact(chunk_ptr,
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
		chunk_ptr = NULL;
	}
	return err;
}
Beispiel #13
0
static long efi_runtime_query_capsulecaps(unsigned long arg)
{
    struct efi_querycapsulecapabilities __user *qcaps_user;
    struct efi_querycapsulecapabilities qcaps;
    efi_capsule_header_t *capsules;
    efi_status_t status;
    u64 max_size;
    int i, reset_type;
    int rv = 0;

    qcaps_user = (struct efi_querycapsulecapabilities __user *)arg;

    if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
        return -EFAULT;

    capsules = kcalloc(qcaps.capsule_count + 1,
                       sizeof(efi_capsule_header_t), GFP_KERNEL);
    if (!capsules)
        return -ENOMEM;

    for (i = 0; i < qcaps.capsule_count; i++) {
        efi_capsule_header_t *c;
        /*
         * We cannot dereference qcaps.capsule_header_array directly to
         * obtain the address of the capsule as it resides in the
         * user space
         */
        if (get_user(c, qcaps.capsule_header_array + i)) {
            rv = -EFAULT;
            goto out;
        }
        if (copy_from_user(&capsules[i], c,
                           sizeof(efi_capsule_header_t))) {
            rv = -EFAULT;
            goto out;
        }
    }

    qcaps.capsule_header_array = &capsules;

    status = efi.query_capsule_caps((efi_capsule_header_t **)
                                    qcaps.capsule_header_array,
                                    qcaps.capsule_count,
                                    &max_size, &reset_type);

    if (put_user(status, qcaps.status)) {
        rv = -EFAULT;
        goto out;
    }

    if (status != EFI_SUCCESS) {
        rv = -EINVAL;
        goto out;
    }

    if (put_user(max_size, qcaps.maximum_capsule_size)) {
        rv = -EFAULT;
        goto out;
    }

    if (put_user(reset_type, qcaps.reset_type))
        rv = -EFAULT;

out:
    kfree(capsules);
    return rv;
}
Beispiel #14
0
static int dvb_create_media_entity(struct dvb_device *dvbdev,
				   int type, int demux_sink_pads)
{
	int i, ret, npads;

	switch (type) {
	case DVB_DEVICE_FRONTEND:
		npads = 2;
		break;
	case DVB_DEVICE_DVR:
		ret = dvb_create_tsout_entity(dvbdev, DVR_TSOUT,
					      demux_sink_pads);
		return ret;
	case DVB_DEVICE_DEMUX:
		npads = 1 + demux_sink_pads;
		ret = dvb_create_tsout_entity(dvbdev, DEMUX_TSOUT,
					      demux_sink_pads);
		if (ret < 0)
			return ret;
		break;
	case DVB_DEVICE_CA:
		npads = 2;
		break;
	case DVB_DEVICE_NET:
		/*
		 * We should be creating entities for the MPE/ULE
		 * decapsulation hardware (or software implementation).
		 *
		 * However, the number of for the MPE/ULE decaps may not be
		 * fixed. As we don't have yet dynamic support for PADs at
		 * the Media Controller, let's not create the decap
		 * entities yet.
		 */
		return 0;
	default:
		return 0;
	}

	dvbdev->entity = kzalloc(sizeof(*dvbdev->entity), GFP_KERNEL);
	if (!dvbdev->entity)
		return -ENOMEM;

	dvbdev->entity->name = dvbdev->name;

	if (npads) {
		dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
				       GFP_KERNEL);
		if (!dvbdev->pads)
			return -ENOMEM;
	}

	switch (type) {
	case DVB_DEVICE_FRONTEND:
		dvbdev->entity->function = MEDIA_ENT_F_DTV_DEMOD;
		dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
		dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE;
		break;
	case DVB_DEVICE_DEMUX:
		dvbdev->entity->function = MEDIA_ENT_F_TS_DEMUX;
		dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
		for (i = 1; i < npads; i++)
			dvbdev->pads[i].flags = MEDIA_PAD_FL_SOURCE;
		break;
	case DVB_DEVICE_CA:
		dvbdev->entity->function = MEDIA_ENT_F_DTV_CA;
		dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
		dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE;
		break;
	default:
		/* Should never happen, as the first switch prevents it */
		kfree(dvbdev->entity);
		kfree(dvbdev->pads);
		dvbdev->entity = NULL;
		dvbdev->pads = NULL;
		return 0;
	}

	if (npads) {
		ret = media_entity_pads_init(dvbdev->entity, npads, dvbdev->pads);
		if (ret)
			return ret;
	}
	ret = media_device_register_entity(dvbdev->adapter->mdev,
					   dvbdev->entity);
	if (ret)
		return ret;

	printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
		__func__, dvbdev->entity->name);

	return 0;
}
Beispiel #15
0
/**
 * snd_ac97_pcm_assign - assign AC97 slots to given PCM streams
 * @bus: the ac97 bus instance
 * @pcms_count: count of PCMs to be assigned
 * @pcms: PCMs to be assigned
 *
 * It assigns available AC97 slots for given PCMs. If none or only
 * some slots are available, pcm->xxx.slots and pcm->xxx.rslots[] members
 * are reduced and might be zero.
 */
int snd_ac97_pcm_assign(struct snd_ac97_bus *bus,
			unsigned short pcms_count,
			const struct ac97_pcm *pcms)
{
	int i, j, k;
	const struct ac97_pcm *pcm;
	struct ac97_pcm *rpcms, *rpcm;
	unsigned short avail_slots[2][4];
	unsigned char rate_table[2][4];
	unsigned short tmp, slots;
	unsigned short spdif_slots[4];
	unsigned int rates;
	struct snd_ac97 *codec;

	rpcms = kcalloc(pcms_count, sizeof(struct ac97_pcm), GFP_KERNEL);
	if (rpcms == NULL)
		return -ENOMEM;
	memset(avail_slots, 0, sizeof(avail_slots));
	memset(rate_table, 0, sizeof(rate_table));
	memset(spdif_slots, 0, sizeof(spdif_slots));
	for (i = 0; i < 4; i++) {
		codec = bus->codec[i];
		if (!codec)
			continue;
		avail_slots[0][i] = get_pslots(codec, &rate_table[0][i], &spdif_slots[i]);
		avail_slots[1][i] = get_cslots(codec);
		if (!(codec->scaps & AC97_SCAP_INDEP_SDIN)) {
			for (j = 0; j < i; j++) {
				if (bus->codec[j])
					avail_slots[1][i] &= ~avail_slots[1][j];
			}
		}
	}
	/* first step - exclusive devices */
	for (i = 0; i < pcms_count; i++) {
		pcm = &pcms[i];
		rpcm = &rpcms[i];
		/* low-level driver thinks that it's more clever */
		if (pcm->copy_flag) {
			*rpcm = *pcm;
			continue;
		}
		rpcm->stream = pcm->stream;
		rpcm->exclusive = pcm->exclusive;
		rpcm->spdif = pcm->spdif;
		rpcm->private_value = pcm->private_value;
		rpcm->bus = bus;
		rpcm->rates = ~0;
		slots = pcm->r[0].slots;
		for (j = 0; j < 4 && slots; j++) {
			if (!bus->codec[j])
				continue;
			rates = ~0;
			if (pcm->spdif && pcm->stream == 0)
				tmp = spdif_slots[j];
			else
				tmp = avail_slots[pcm->stream][j];
			if (pcm->exclusive) {
				/* exclusive access */
				tmp &= slots;
				for (k = 0; k < i; k++) {
					if (rpcm->stream == rpcms[k].stream)
						tmp &= ~rpcms[k].r[0].rslots[j];
				}
			} else {
				/* non-exclusive access */
				tmp &= pcm->r[0].slots;
			}
			if (tmp) {
				rpcm->r[0].rslots[j] = tmp;
				rpcm->r[0].codec[j] = bus->codec[j];
				rpcm->r[0].rate_table[j] = rate_table[pcm->stream][j];
				if (bus->no_vra)
					rates = SNDRV_PCM_RATE_48000;
				else
					rates = get_rates(rpcm, j, tmp, 0);
				if (pcm->exclusive)
					avail_slots[pcm->stream][j] &= ~tmp;
			}
			slots &= ~tmp;
			rpcm->r[0].slots |= tmp;
			rpcm->rates &= rates;
		}
		/* for double rate, we check the first codec only */
		if (pcm->stream == SNDRV_PCM_STREAM_PLAYBACK &&
		    bus->codec[0] && (bus->codec[0]->flags & AC97_DOUBLE_RATE) &&
		    rate_table[pcm->stream][0] == 0) {
			tmp = (1<<AC97_SLOT_PCM_LEFT) | (1<<AC97_SLOT_PCM_RIGHT) |
			      (1<<AC97_SLOT_PCM_LEFT_0) | (1<<AC97_SLOT_PCM_RIGHT_0);
			if ((tmp & pcm->r[1].slots) == tmp) {
				rpcm->r[1].slots = tmp;
				rpcm->r[1].rslots[0] = tmp;
				rpcm->r[1].rate_table[0] = 0;
				rpcm->r[1].codec[0] = bus->codec[0];
				if (pcm->exclusive)
					avail_slots[pcm->stream][0] &= ~tmp;
				if (bus->no_vra)
					rates = SNDRV_PCM_RATE_96000;
				else
					rates = get_rates(rpcm, 0, tmp, 1);
				rpcm->rates |= rates;
			}
		}
		if (rpcm->rates == ~0)
			rpcm->rates = 0; /* not used */
	}
	bus->pcms_count = pcms_count;
	bus->pcms = rpcms;
	return 0;
}
Beispiel #16
0
/**
 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
 * @adapter: board private structure
 *
 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
 * return a negative error code if unable to acquire MSI-X vectors for any
 * reason.
 */
static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int i, vectors, vector_threshold;

	/* We start by asking for one vector per queue pair */
	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);

	/* It is easy to be greedy for MSI-X vectors. However, it really
	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
	 * be somewhat conservative and only ask for (roughly) the same number
	 * of vectors as there are CPUs.
	 */
	vectors = min_t(int, vectors, num_online_cpus());

	/* Some vectors are necessary for non-queue interrupts */
	vectors += NON_Q_VECTORS;

	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
	 * With features such as RSS and VMDq, we can easily surpass the
	 * number of Rx and Tx descriptor queues supported by our device.
	 * Thus, we cap the maximum in the rare cases where the CPU count also
	 * exceeds our vector limit
	 */
	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);

	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
	 * handler, and (2) an Other (Link Status Change, etc.) handler.
	 */
	vector_threshold = MIN_MSIX_COUNT;

	adapter->msix_entries = kcalloc(vectors,
					sizeof(struct msix_entry),
					GFP_KERNEL);
	if (!adapter->msix_entries)
		return -ENOMEM;

	for (i = 0; i < vectors; i++)
		adapter->msix_entries[i].entry = i;

	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
					vector_threshold, vectors);

	if (vectors < 0) {
		/* A negative count of allocated vectors indicates an error in
		 * acquiring within the specified range of MSI-X vectors
		 */
		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
			   vectors);

		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
		kfree(adapter->msix_entries);
		adapter->msix_entries = NULL;

		return vectors;
	}

	/* we successfully allocated some number of vectors within our
	 * requested range.
	 */
	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;

	/* Adjust for only the vectors we'll use, which is minimum
	 * of max_q_vectors, or the number of vectors we were allocated.
	 */
	vectors -= NON_Q_VECTORS;
	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);

	return 0;
}
Beispiel #17
0
static int __devinit adp8870_led_probe(struct i2c_client *client)
{
    struct adp8870_backlight_platform_data *pdata =
            client->dev.platform_data;
    struct adp8870_bl *data = i2c_get_clientdata(client);
    struct adp8870_led *led, *led_dat;
    struct led_info *cur_led;
    int ret, i;


    led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
    if (led == NULL) {
        dev_err(&client->dev, "failed to alloc memory\n");
        return -ENOMEM;
    }

    ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
    if (ret)
        goto err_free;

    ret = adp8870_write(client, ADP8870_ISCT1,
                        (pdata->led_on_time & 0x3) << 6);
    if (ret)
        goto err_free;

    ret = adp8870_write(client, ADP8870_ISCF,
                        FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
    if (ret)
        goto err_free;

    for (i = 0; i < pdata->num_leds; ++i) {
        cur_led = &pdata->leds[i];
        led_dat = &led[i];

        led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;

        if (led_dat->id > 7 || led_dat->id < 1) {
            dev_err(&client->dev, "Invalid LED ID %d\n",
                    led_dat->id);
            goto err;
        }

        if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
            dev_err(&client->dev, "LED %d used by Backlight\n",
                    led_dat->id);
            goto err;
        }

        led_dat->cdev.name = cur_led->name;
        led_dat->cdev.default_trigger = cur_led->default_trigger;
        led_dat->cdev.brightness_set = adp8870_led_set;
        led_dat->cdev.brightness = LED_OFF;
        led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
        led_dat->client = client;
        led_dat->new_brightness = LED_OFF;
        INIT_WORK(&led_dat->work, adp8870_led_work);

        ret = led_classdev_register(&client->dev, &led_dat->cdev);
        if (ret) {
            dev_err(&client->dev, "failed to register LED %d\n",
                    led_dat->id);
            goto err;
        }

        ret = adp8870_led_setup(led_dat);
        if (ret) {
            dev_err(&client->dev, "failed to write\n");
            i++;
            goto err;
        }
    }

    data->led = led;

    return 0;

err:
    for (i = i - 1; i >= 0; --i) {
        led_classdev_unregister(&led[i].cdev);
        cancel_work_sync(&led[i].work);
    }

err_free:
    kfree(led);

    return ret;
}
Beispiel #18
0
/**
 * temac_dma_bd_init - Setup buffer descriptor rings
 */
static int temac_dma_bd_init(struct net_device *ndev)
{
	struct temac_local *lp = netdev_priv(ndev);
	struct sk_buff *skb;
	int i;

	lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
	if (!lp->rx_skb)
		goto out;

	/* allocate the tx and rx ring buffer descriptors. */
	/* returns a virtual address and a physical address. */
	lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
					  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
					  &lp->tx_bd_p, GFP_KERNEL);
	if (!lp->tx_bd_v)
		goto out;

	lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
					  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
					  &lp->rx_bd_p, GFP_KERNEL);
	if (!lp->rx_bd_v)
		goto out;

	for (i = 0; i < TX_BD_NUM; i++) {
		lp->tx_bd_v[i].next = lp->tx_bd_p +
				sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
	}

	for (i = 0; i < RX_BD_NUM; i++) {
		lp->rx_bd_v[i].next = lp->rx_bd_p +
				sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);

		skb = netdev_alloc_skb_ip_align(ndev,
						XTE_MAX_JUMBO_FRAME_SIZE);
		if (!skb)
			goto out;

		lp->rx_skb[i] = skb;
		/* returns physical address of skb->data */
		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
						     skb->data,
						     XTE_MAX_JUMBO_FRAME_SIZE,
						     DMA_FROM_DEVICE);
		lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
		lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
	}

	lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN);
	/* 0x10220483 */
	/* 0x00100483 */
	lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN |
					  CHNL_CTRL_IRQ_IOE);
	/* 0xff010283 */

	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
	lp->dma_out(lp, RX_TAILDESC_PTR,
		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);

	/* Init descriptor indexes */
	lp->tx_bd_ci = 0;
	lp->tx_bd_next = 0;
	lp->tx_bd_tail = 0;
	lp->rx_bd_ci = 0;

	return 0;

out:
	temac_dma_bd_release(ndev);
	return -ENOMEM;
}
Beispiel #19
0
static int __devinit pm8xxx_led_probe(struct platform_device *pdev)
{
	const struct pm8xxx_led_platform_data *pdata = pdev->dev.platform_data;
	struct pm8xxx_led_configure *curr_led;
	struct pm8xxx_led_data *led, *led_dat;
	int i, j, ret = -ENOMEM;

	if (pdata == NULL) {
		LED_ERR("platform data not supplied\n");
		return -EINVAL;
	}

	led = kcalloc(pdata->num_leds + 1, sizeof(*led), GFP_KERNEL);
	if (led == NULL) {
		LED_ERR("failed to alloc memory\n");
		return -ENOMEM;
	}
	wake_lock_init(&pmic_led_wake_lock, WAKE_LOCK_SUSPEND, "pmic_led");
	g_led_work_queue = create_workqueue("pm8xxx-led");
	if (g_led_work_queue == NULL) {
		LED_ERR("failed to create workqueue\n");
		goto err_create_work_queue;
	}

	for (i = 0; i < pdata->num_leds; i++) {
		curr_led			= &pdata->leds[i];
		led_dat				= &led[i];
		led_dat->cdev.name		= curr_led->name;
		led_dat->id     		= curr_led->flags;
		led_dat->bank			= curr_led->flags;
		led_dat->function_flags		= curr_led->function_flags;
		led_dat->start_index		= curr_led->start_index;
		led_dat->duty_time_ms		= curr_led->duty_time_ms;
		led_dat->period_us		= curr_led->period_us;
		led_dat->duites_size		= curr_led->duites_size;
		led_dat->lut_flag		= curr_led->lut_flag;
		led_dat->out_current		= curr_led->out_current;
		led_dat->duties			= &(curr_led->duties[0]);
		led_dat->led_sync		= curr_led->led_sync;
		led_dat->pwm_led 		= pwm_request(led_dat->bank, led_dat->cdev.name);
		if (curr_led->duties[1]) {
			for (j = 0; j < 64; j++)
				dutys_array[j] = *(led_dat->duties + j);
		}

		if( curr_led->pwm_coefficient > 0 )
			led_dat->pwm_coefficient	= curr_led->pwm_coefficient;
		else
			led_dat->pwm_coefficient	= 100;
		switch (led_dat->id) {
		case PM8XXX_ID_GPIO24:
		case PM8XXX_ID_GPIO25:
		case PM8XXX_ID_GPIO26:
			led_dat->cdev.brightness_set = pm8xxx_led_gpio_set;
			if (curr_led->gpio_status_switch != NULL)
				led_dat->gpio_status_switch = curr_led->gpio_status_switch;
			break;
		case PM8XXX_ID_LED_0:
		case PM8XXX_ID_LED_1:
		case PM8XXX_ID_LED_2:
			led_dat->cdev.brightness_set    = pm8xxx_led_current_set;
			if (led_dat->function_flags & LED_PWM_FUNCTION) {
				led_dat->reg		= pm8xxxx_led_pwm_mode(led_dat->id);
				INIT_DELAYED_WORK(&led[i].fade_delayed_work, led_fade_do_work);
			} else
				led_dat->reg		= PM8XXX_LED_MODE_MANUAL;
			break;
		case PM8XXX_ID_LED_KB_LIGHT:
			break;
		}
		led_dat->cdev.brightness	= LED_OFF;
		led_dat->dev			= &pdev->dev;

		ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
		if (ret) {
			LED_ERR("unable to register led %d,ret=%d\n", led_dat->id, ret);
			goto err_register_led_cdev;
		}

		if (led_dat->id >= PM8XXX_ID_LED_2 && led_dat->id <= PM8XXX_ID_LED_0) {
			ret = device_create_file(led_dat->cdev.dev, &dev_attr_currents);
			if (ret < 0) {
				LED_ERR("%s: Failed to create %d attr currents\n", __func__, i);
				goto err_register_attr_currents;
			}
		}

		if (led_dat->id >= PM8XXX_ID_LED_2 && led_dat->id <= PM8XXX_ID_LED_0) {
			ret = device_create_file(led_dat->cdev.dev, &dev_attr_lut_coefficient);
			if (ret < 0) {
				LED_ERR("%s: Failed to create %d attr lut_coefficient\n", __func__, i);
				goto err_register_attr_lut_coefficient;
			}
		}

		if ((led_dat->id <= PM8XXX_ID_GPIO26) || (led_dat->id <= PM8XXX_ID_LED_2) ||
		    (led_dat->id <= PM8XXX_ID_LED_1)) {
			ret = device_create_file(led_dat->cdev.dev, &dev_attr_pwm_coefficient);
			if (ret < 0) {
				LED_ERR("%s: Failed to create %d attr pwm_coefficient\n", __func__, i);
				goto err_register_attr_pwm_coefficient;
			}
		}
		if (led_dat->function_flags & LED_BLINK_FUNCTION) {
			INIT_DELAYED_WORK(&led[i].blink_delayed_work, led_blink_do_work);
			ret = device_create_file(led_dat->cdev.dev, &dev_attr_blink);
			if (ret < 0) {
				LED_ERR("%s: Failed to create %d attr blink\n", __func__, i);
				goto err_register_attr_blink;
			}

			ret = device_create_file(led_dat->cdev.dev, &dev_attr_off_timer);
			if (ret < 0) {
				LED_ERR("%s: Failed to create %d attr off timer\n", __func__, i);
				goto err_register_attr_off_timer;
			}
			alarm_init(&led[i].led_alarm, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, led_alarm_handler);
			INIT_WORK(&led[i].led_work, led_work_func); 
		}

		if (!strcmp(led_dat->cdev.name, "button-backlight")) {
			for_key_led_data = led_dat;
		}
		if (!strcmp(led_dat->cdev.name, "green-back")) {
			LED_INFO("%s: green-back, 000 probe, led_dat = %x\n", __func__, (unsigned int)led_dat);
			green_back_led_data = led_dat;
		}
		if (!strcmp(led_dat->cdev.name, "amber-back")) {
			LED_INFO("%s: amber-back\n", __func__);
			amber_back_led_data = led_dat;
		}

	}

	pm8xxx_leds = led;

	platform_set_drvdata(pdev, led);

	return 0;

err_register_attr_off_timer:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			if (led[i].function_flags & LED_BLINK_FUNCTION)
				device_remove_file(led[i].cdev.dev, &dev_attr_off_timer);
		}
	}
	i = pdata->num_leds;
err_register_attr_blink:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			if (led[i].function_flags & LED_BLINK_FUNCTION)
				device_remove_file(led[i].cdev.dev, &dev_attr_blink);
		}
	}
	i = pdata->num_leds;
err_register_attr_pwm_coefficient:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			if (led[i].function_flags <= PM8XXX_ID_GPIO26)
				device_remove_file(led[i].cdev.dev, &dev_attr_pwm_coefficient);
		}
	}
	i = pdata->num_leds;
err_register_attr_lut_coefficient:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			if (led[i].function_flags >= PM8XXX_ID_LED_2 && led[i].function_flags <= PM8XXX_ID_LED_0)
				device_remove_file(led[i].cdev.dev, &dev_attr_lut_coefficient);
		}
	}
	i = pdata->num_leds;

err_register_attr_currents:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			if (led[i].function_flags >= PM8XXX_ID_LED_2 && led[i].function_flags <= PM8XXX_ID_LED_0)
				device_remove_file(led[i].cdev.dev, &dev_attr_currents);
		}
	}
	i = pdata->num_leds;
err_register_led_cdev:
	if (i > 0) {
		for (i = i - 1; i >= 0; i--) {
			pwm_free(led[i].pwm_led);
			led_classdev_unregister(&led[i].cdev);
		}
	}
	destroy_workqueue(g_led_work_queue);
err_create_work_queue:
	kfree(led);
	wake_lock_destroy(&pmic_led_wake_lock);
	return ret;
}
Beispiel #20
0
static int pblk_lines_alloc_metadata(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	int i;

	/* smeta is always small enough to fit on a kmalloc memory allocation,
	 * emeta depends on the number of LUNs allocated to the pblk instance
	 */
	for (i = 0; i < PBLK_DATA_LINES; i++) {
		l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
		if (!l_mg->sline_meta[i])
			goto fail_free_smeta;
	}

	/* emeta allocates three different buffers for managing metadata with
	 * in-memory and in-media layouts
	 */
	for (i = 0; i < PBLK_DATA_LINES; i++) {
		struct pblk_emeta *emeta;

		emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
		if (!emeta)
			goto fail_free_emeta;

		if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
			l_mg->emeta_alloc_type = PBLK_VMALLOC_META;

			emeta->buf = vmalloc(lm->emeta_len[0]);
			if (!emeta->buf) {
				kfree(emeta);
				goto fail_free_emeta;
			}

			emeta->nr_entries = lm->emeta_sec[0];
			l_mg->eline_meta[i] = emeta;
		} else {
			l_mg->emeta_alloc_type = PBLK_KMALLOC_META;

			emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
			if (!emeta->buf) {
				kfree(emeta);
				goto fail_free_emeta;
			}

			emeta->nr_entries = lm->emeta_sec[0];
			l_mg->eline_meta[i] = emeta;
		}
	}

	l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
	if (!l_mg->vsc_list)
		goto fail_free_emeta;

	for (i = 0; i < l_mg->nr_lines; i++)
		l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);

	return 0;

fail_free_emeta:
	while (--i >= 0) {
		vfree(l_mg->eline_meta[i]->buf);
		kfree(l_mg->eline_meta[i]);
	}

fail_free_smeta:
	for (i = 0; i < PBLK_DATA_LINES; i++)
		kfree(l_mg->sline_meta[i]);

	return -ENOMEM;
}
static int act8931_i2c_probe(struct i2c_client *i2c,
			     const struct i2c_device_id *id)
{
	const struct of_device_id *match;
	struct act8931 *act8931;
	struct act8931_board *pdata;
	struct regulator_init_data *reg_data;
	struct regulator_config config = { };
	const char *rail_name = NULL;
	struct regulator_dev *rdev;
	u8 val;
	int i, ret;

	pr_info("%s,line=%d\n", __func__, __LINE__);

	if (i2c->dev.of_node) {
		match = of_match_device(act8931_of_match, &i2c->dev);
		if (!match) {
			pr_err("Failed to find matching dt id\n");
			return -EINVAL;
		}
	}

	act8931 = devm_kzalloc(&i2c->dev, sizeof(struct act8931), GFP_KERNEL);
	if (act8931 == NULL) {
		ret = -ENOMEM;
		goto err;
	}
	act8931->i2c = i2c;
	act8931->dev = &i2c->dev;
	g_act8931 = act8931;

	mutex_init(&act8931->io_lock);

	ret = act8931_reg_read(act8931, 0x22);
	if ((ret < 0) || (ret == 0xff)) {
		pr_err("The device is not act8931\n");
		return 0;
	}

	if (act8931->dev->of_node)
		pdata = act8931_parse_dt(act8931);

	ret = act8931_reg_read(act8931, 0x01);
	if (ret < 0)
		goto err;
	ret = act8931_set_bits(act8931, 0x01, (0x1<<5) | (0x1<<0), (0x1<<0));
	if (ret < 0) {
		pr_err("act8931 set 0x01 error!\n");
		goto err;
	}

	/* Initialize charge status */
	val = act8931_reg_read(act8931, 0x78);
	act8931_charge_det = (val & INDAT_MASK) ? 1 : 0;
	act8931_charge_ok = (val & CHGDAT_MASK) ? 1 : 0;
	DBG(charge_det ? "connect!" : "disconnect!");
	DBG(charge_ok ? "charge ok!\n" : "charging or discharge!\n");

	ret = act8931_set_bits(act8931, 0x78, INSTAT_MASK | CHGSTAT_MASK,
			       INSTAT_MASK | CHGSTAT_MASK);
	if (ret < 0) {
		pr_err("act8931 set 0x78 error!\n");
		goto err;
	}

	ret = act8931_set_bits(act8931, 0x79, INCON_MASK | CHGEOCIN_MASK
			       | INDIS_MASK | CHGEOCOUT_MASK, INCON_MASK
			       | CHGEOCIN_MASK | INDIS_MASK | CHGEOCOUT_MASK);
	if (ret < 0) {
		pr_err("act8931 set 0x79 error!\n");
		goto err;
	}

	if (pdata) {
		act8931->num_regulators = ACT8931_NUM_REGULATORS;
		act8931->rdev = kcalloc(ACT8931_NUM_REGULATORS,
					sizeof(struct regulator_dev *),
					GFP_KERNEL);
		if (!act8931->rdev)
			return -ENOMEM;

		/* Instantiate the regulators */
		for (i = 0; i < ACT8931_NUM_REGULATORS; i++) {
			reg_data = pdata->act8931_init_data[i];
			if (!reg_data)
				continue;
			if (reg_data->constraints.name)
				rail_name = reg_data->constraints.name;
			else
				rail_name = regulators[i].name;
			reg_data->supply_regulator = rail_name;

			config.dev = act8931->dev;
			config.driver_data = act8931;
			if (act8931->dev->of_node)
				config.of_node = pdata->of_node[i];
			config.init_data = reg_data;

			rdev = regulator_register(&regulators[i], &config);
			if (IS_ERR(rdev)) {
				pr_err("failed to register %d regulator\n", i);
				continue;
			}
			act8931->rdev[i] = rdev;
		}
	}

	if (pdata->pm_off && !pm_power_off)
		pm_power_off = act8931_device_shutdown;

	act8931->pwr_hold_gpio = pdata->pwr_hold_gpio;
	if (act8931->pwr_hold_gpio) {
		ret = gpio_request(act8931->pwr_hold_gpio, "act8931 pmic_hold");
		if (ret < 0) {
			pr_err("Failed to request gpio %d with ret %d\n",
			       act8931->pwr_hold_gpio, ret);
			goto err;
		}
		gpio_direction_output(act8931->pwr_hold_gpio, 1);
		ret = gpio_get_value(act8931->pwr_hold_gpio);
		pr_info("%s: act8931_pmic_hold=%x\n", __func__, ret);
	}

	ret = gpio_request(pdata->irq_gpio, "act8931 irq");
	if (ret) {
		pr_err("act8931 irq_gpio request fail\n");
		gpio_free(pdata->irq_gpio);
		goto err;
	}
	gpio_direction_input(pdata->irq_gpio);

	act8931->irq = gpio_to_irq(pdata->irq_gpio);
	ret = request_threaded_irq(act8931->irq, NULL, act8931_irq_thread,
				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
				   i2c->dev.driver->name, act8931);
	if (ret < 0) {
		pr_err("request act8931 irq fail\n");
		goto err;
	}
	enable_irq_wake(act8931->irq);

	i2c_set_clientdata(i2c, act8931);

	return 0;

err:
	return ret;
}
Beispiel #22
0
static int pblk_lines_init(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_line *line;
	unsigned int smeta_len, emeta_len;
	long nr_bad_blks, nr_free_blks;
	int bb_distance, max_write_ppas, mod;
	int i, ret;

	pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
	max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
	pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
				max_write_ppas : nvm_max_phys_sects(dev);
	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);

	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
		pr_err("pblk: cannot support device max_phys_sect\n");
		return -EINVAL;
	}

	div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
	if (mod) {
		pr_err("pblk: bad configuration of sectors/pages\n");
		return -EINVAL;
	}

	l_mg->nr_lines = geo->blks_per_lun;
	l_mg->log_line = l_mg->data_line = NULL;
	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
	l_mg->nr_free_lines = 0;
	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);

	lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
	lm->blk_per_line = geo->nr_luns;
	lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
	lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
	lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
	lm->high_thrs = lm->sec_per_line / 2;
	lm->mid_thrs = lm->sec_per_line / 4;
	lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;

	/* Calculate necessary pages for smeta. See comment over struct
	 * line_smeta definition
	 */
	i = 1;
add_smeta_page:
	lm->smeta_sec = i * geo->sec_per_pl;
	lm->smeta_len = lm->smeta_sec * geo->sec_size;

	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
	if (smeta_len > lm->smeta_len) {
		i++;
		goto add_smeta_page;
	}

	/* Calculate necessary pages for emeta. See comment over struct
	 * line_emeta definition
	 */
	i = 1;
add_emeta_page:
	lm->emeta_sec[0] = i * geo->sec_per_pl;
	lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;

	emeta_len = calc_emeta_len(pblk);
	if (emeta_len > lm->emeta_len[0]) {
		i++;
		goto add_emeta_page;
	}

	lm->emeta_bb = geo->nr_luns - i;
	lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
							geo->sec_per_blk);
	if (lm->min_blk_line > lm->blk_per_line) {
		pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
							lm->blk_per_line);
		ret = -EINVAL;
		goto fail;
	}

	ret = pblk_lines_alloc_metadata(pblk);
	if (ret)
		goto fail;

	l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
	if (!l_mg->bb_template) {
		ret = -ENOMEM;
		goto fail_free_meta;
	}

	l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
	if (!l_mg->bb_aux) {
		ret = -ENOMEM;
		goto fail_free_bb_template;
	}

	bb_distance = (geo->nr_luns) * geo->sec_per_pl;
	for (i = 0; i < lm->sec_per_line; i += bb_distance)
		bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);

	INIT_LIST_HEAD(&l_mg->free_list);
	INIT_LIST_HEAD(&l_mg->corrupt_list);
	INIT_LIST_HEAD(&l_mg->bad_list);
	INIT_LIST_HEAD(&l_mg->gc_full_list);
	INIT_LIST_HEAD(&l_mg->gc_high_list);
	INIT_LIST_HEAD(&l_mg->gc_mid_list);
	INIT_LIST_HEAD(&l_mg->gc_low_list);
	INIT_LIST_HEAD(&l_mg->gc_empty_list);

	INIT_LIST_HEAD(&l_mg->emeta_list);

	l_mg->gc_lists[0] = &l_mg->gc_high_list;
	l_mg->gc_lists[1] = &l_mg->gc_mid_list;
	l_mg->gc_lists[2] = &l_mg->gc_low_list;

	spin_lock_init(&l_mg->free_lock);
	spin_lock_init(&l_mg->close_lock);
	spin_lock_init(&l_mg->gc_lock);

	pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
								GFP_KERNEL);
	if (!pblk->lines) {
		ret = -ENOMEM;
		goto fail_free_bb_aux;
	}

	nr_free_blks = 0;
	for (i = 0; i < l_mg->nr_lines; i++) {
		int blk_in_line;

		line = &pblk->lines[i];

		line->pblk = pblk;
		line->id = i;
		line->type = PBLK_LINETYPE_FREE;
		line->state = PBLK_LINESTATE_FREE;
		line->gc_group = PBLK_LINEGC_NONE;
		line->vsc = &l_mg->vsc_list[i];
		spin_lock_init(&line->lock);

		ret = pblk_alloc_line_bitmaps(pblk, line);
		if (ret)
			goto fail_free_lines;

		nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
		if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
			pblk_free_line_bitmaps(line);
			ret = -EINVAL;
			goto fail_free_lines;
		}

		blk_in_line = lm->blk_per_line - nr_bad_blks;
		if (blk_in_line < lm->min_blk_line) {
			line->state = PBLK_LINESTATE_BAD;
			list_add_tail(&line->list, &l_mg->bad_list);
			continue;
		}

		nr_free_blks += blk_in_line;
		atomic_set(&line->blk_in_line, blk_in_line);

		l_mg->nr_free_lines++;
		list_add_tail(&line->list, &l_mg->free_list);
	}

	pblk_set_provision(pblk, nr_free_blks);

	/* Cleanup per-LUN bad block lists - managed within lines on run-time */
	for (i = 0; i < geo->nr_luns; i++)
		kfree(pblk->luns[i].bb_list);

	return 0;
fail_free_lines:
	while (--i >= 0)
		pblk_free_line_bitmaps(&pblk->lines[i]);
fail_free_bb_aux:
	kfree(l_mg->bb_aux);
fail_free_bb_template:
	kfree(l_mg->bb_template);
fail_free_meta:
	pblk_line_meta_free(pblk);
fail:
	for (i = 0; i < geo->nr_luns; i++)
		kfree(pblk->luns[i].bb_list);

	return ret;
}
Beispiel #23
0
static struct crush_map *crush_decode(void *pbyval, void *end)
{
	struct crush_map *c;
	int err = -EINVAL;
	int i, j;
	void **p = &pbyval;
	void *start = pbyval;
	u32 magic;

	dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));

	c = kzalloc(sizeof(*c), GFP_NOFS);
	if (c == NULL)
		return ERR_PTR(-ENOMEM);

	ceph_decode_need(p, end, 4*sizeof(u32), bad);
	magic = ceph_decode_32(p);
	if (magic != CRUSH_MAGIC) {
		pr_err("crush_decode magic %x != current %x\n",
		       (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
		goto bad;
	}
	c->max_buckets = ceph_decode_32(p);
	c->max_rules = ceph_decode_32(p);
	c->max_devices = ceph_decode_32(p);

	c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
	if (c->device_parents == NULL)
		goto badmem;
	c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
	if (c->bucket_parents == NULL)
		goto badmem;

	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
	if (c->buckets == NULL)
		goto badmem;
	c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
	if (c->rules == NULL)
		goto badmem;

	/* buckets */
	for (i = 0; i < c->max_buckets; i++) {
		int size = 0;
		u32 alg;
		struct crush_bucket *b;

		ceph_decode_32_safe(p, end, alg, bad);
		if (alg == 0) {
			c->buckets[i] = NULL;
			continue;
		}
		dout("crush_decode bucket %d off %x %p to %p\n",
		     i, (int)(*p-start), *p, end);

		switch (alg) {
		case CRUSH_BUCKET_UNIFORM:
			size = sizeof(struct crush_bucket_uniform);
			break;
		case CRUSH_BUCKET_LIST:
			size = sizeof(struct crush_bucket_list);
			break;
		case CRUSH_BUCKET_TREE:
			size = sizeof(struct crush_bucket_tree);
			break;
		case CRUSH_BUCKET_STRAW:
			size = sizeof(struct crush_bucket_straw);
			break;
		default:
			err = -EINVAL;
			goto bad;
		}
		BUG_ON(size == 0);
		b = c->buckets[i] = kzalloc(size, GFP_NOFS);
		if (b == NULL)
			goto badmem;

		ceph_decode_need(p, end, 4*sizeof(u32), bad);
		b->id = ceph_decode_32(p);
		b->type = ceph_decode_16(p);
		b->alg = ceph_decode_8(p);
		b->hash = ceph_decode_8(p);
		b->weight = ceph_decode_32(p);
		b->size = ceph_decode_32(p);

		dout("crush_decode bucket size %d off %x %p to %p\n",
		     b->size, (int)(*p-start), *p, end);

		b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
		if (b->items == NULL)
			goto badmem;
		b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
		if (b->perm == NULL)
			goto badmem;
		b->perm_n = 0;

		ceph_decode_need(p, end, b->size*sizeof(u32), bad);
		for (j = 0; j < b->size; j++)
			b->items[j] = ceph_decode_32(p);

		switch (b->alg) {
		case CRUSH_BUCKET_UNIFORM:
			err = crush_decode_uniform_bucket(p, end,
				  (struct crush_bucket_uniform *)b);
			if (err < 0)
				goto bad;
			break;
		case CRUSH_BUCKET_LIST:
			err = crush_decode_list_bucket(p, end,
			       (struct crush_bucket_list *)b);
			if (err < 0)
				goto bad;
			break;
		case CRUSH_BUCKET_TREE:
			err = crush_decode_tree_bucket(p, end,
				(struct crush_bucket_tree *)b);
			if (err < 0)
				goto bad;
			break;
		case CRUSH_BUCKET_STRAW:
			err = crush_decode_straw_bucket(p, end,
				(struct crush_bucket_straw *)b);
			if (err < 0)
				goto bad;
			break;
		}
	}

	/* rules */
	dout("rule vec is %p\n", c->rules);
	for (i = 0; i < c->max_rules; i++) {
		u32 yes;
		struct crush_rule *r;

		ceph_decode_32_safe(p, end, yes, bad);
		if (!yes) {
			dout("crush_decode NO rule %d off %x %p to %p\n",
			     i, (int)(*p-start), *p, end);
			c->rules[i] = NULL;
			continue;
		}

		dout("crush_decode rule %d off %x %p to %p\n",
		     i, (int)(*p-start), *p, end);

		/* len */
		ceph_decode_32_safe(p, end, yes, bad);
#if BITS_PER_LONG == 32
		err = -EINVAL;
		if (yes > (ULONG_MAX - sizeof(*r))
			  / sizeof(struct crush_rule_step))
			goto bad;
#endif
		r = c->rules[i] = kmalloc(sizeof(*r) +
					  yes*sizeof(struct crush_rule_step),
					  GFP_NOFS);
		if (r == NULL)
			goto badmem;
		dout(" rule %d is at %p\n", i, r);
		r->len = yes;
		ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
		ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
		for (j = 0; j < r->len; j++) {
			r->steps[j].op = ceph_decode_32(p);
			r->steps[j].arg1 = ceph_decode_32(p);
			r->steps[j].arg2 = ceph_decode_32(p);
		}
	}

	/* ignore trailing name maps. */

	dout("crush_decode success\n");
	return c;

badmem:
	err = -ENOMEM;
bad:
	dout("crush_decode fail %d\n", err);
	crush_destroy(c);
	return ERR_PTR(err);
}
Beispiel #24
0
/**
 * rpc_alloc_iostats - allocate an rpc_iostats structure
 * @clnt: RPC program, version, and xprt
 *
 */
struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
{
	return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
}
Beispiel #25
0
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
	struct drm_radeon_cs *cs = data;
	uint64_t *chunk_array_ptr;
	unsigned size, i;

	if (!cs->num_chunks) {
		return 0;
	}
	
	INIT_LIST_HEAD(&p->validated);
	p->idx = 0;
	p->chunk_ib_idx = -1;
	p->chunk_relocs_idx = -1;
	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (p->chunks_array == NULL) {
		return -ENOMEM;
	}
	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
			       sizeof(uint64_t)*cs->num_chunks)) {
		return -EFAULT;
	}
	p->nchunks = cs->num_chunks;
	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
	if (p->chunks == NULL) {
		return -ENOMEM;
	}
	for (i = 0; i < p->nchunks; i++) {
		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
		struct drm_radeon_cs_chunk user_chunk;
		uint32_t __user *cdata;

		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
				       sizeof(struct drm_radeon_cs_chunk))) {
			return -EFAULT;
		}
		p->chunks[i].length_dw = user_chunk.length_dw;
		p->chunks[i].kdata = NULL;
		p->chunks[i].chunk_id = user_chunk.chunk_id;

		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
			p->chunk_relocs_idx = i;
		}
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
			p->chunk_ib_idx = i;
			
			if (p->chunks[i].length_dw == 0)
				return -EINVAL;
		}

		p->chunks[i].length_dw = user_chunk.length_dw;
		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;

		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
		if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
			size = p->chunks[i].length_dw * sizeof(uint32_t);
			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
			if (p->chunks[i].kdata == NULL) {
				return -ENOMEM;
			}
			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
					       p->chunks[i].user_ptr, size)) {
				return -EFAULT;
			}
		} else {
			p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
			p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
				kfree(p->chunks[i].kpage[0]);
				kfree(p->chunks[i].kpage[1]);
				return -ENOMEM;
			}
			p->chunks[i].kpage_idx[0] = -1;
			p->chunks[i].kpage_idx[1] = -1;
			p->chunks[i].last_copied_page = -1;
			p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
		}
	}
	if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
		DRM_ERROR("cs IB too big: %d\n",
			  p->chunks[p->chunk_ib_idx].length_dw);
		return -EINVAL;
	}
	return 0;
}
Beispiel #26
0
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
	struct drm_crtc *crtc = inode->i_private;
	struct drm_crtc_crc *crc = &crtc->crc;
	struct drm_crtc_crc_entry *entries = NULL;
	size_t values_cnt;
	int ret = 0;

	if (drm_drv_uses_atomic_modeset(crtc->dev)) {
		ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
		if (ret)
			return ret;

		if (!crtc->state->active)
			ret = -EIO;
		drm_modeset_unlock(&crtc->mutex);

		if (ret)
			return ret;
	}

	spin_lock_irq(&crc->lock);
	if (!crc->opened)
		crc->opened = true;
	else
		ret = -EBUSY;
	spin_unlock_irq(&crc->lock);

	if (ret)
		return ret;

	ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
	if (ret)
		goto err;

	if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
		ret = -EINVAL;
		goto err_disable;
	}

	if (WARN_ON(values_cnt == 0)) {
		ret = -EINVAL;
		goto err_disable;
	}

	entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
	if (!entries) {
		ret = -ENOMEM;
		goto err_disable;
	}

	spin_lock_irq(&crc->lock);
	crc->entries = entries;
	crc->values_cnt = values_cnt;

	/*
	 * Only return once we got a first frame, so userspace doesn't have to
	 * guess when this particular piece of HW will be ready to start
	 * generating CRCs.
	 */
	ret = wait_event_interruptible_lock_irq(crc->wq,
						crtc_crc_data_count(crc),
						crc->lock);
	spin_unlock_irq(&crc->lock);

	if (ret)
		goto err_disable;

	return 0;

err_disable:
	crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
err:
	spin_lock_irq(&crc->lock);
	crtc_crc_cleanup(crc);
	spin_unlock_irq(&crc->lock);
	return ret;
}
Beispiel #27
0
static int acpi_processor_evaluate_lpi(acpi_handle handle,
				       struct acpi_lpi_states_array *info)
{
	acpi_status status;
	int ret = 0;
	int pkg_count, state_idx = 1, loop;
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *lpi_data;
	struct acpi_lpi_state *lpi_state;

	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
		return -ENODEV;
	}

	lpi_data = buffer.pointer;

	/* There must be at least 4 elements = 3 elements + 1 package */
	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
	    lpi_data->package.count < 4) {
		pr_debug("not enough elements in _LPI\n");
		ret = -ENODATA;
		goto end;
	}

	pkg_count = lpi_data->package.elements[2].integer.value;

	/* Validate number of power states. */
	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
		pr_debug("count given by _LPI is not valid\n");
		ret = -ENODATA;
		goto end;
	}

	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
	if (!lpi_state) {
		ret = -ENOMEM;
		goto end;
	}

	info->size = pkg_count;
	info->entries = lpi_state;

	/* LPI States start at index 3 */
	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
		union acpi_object *element, *pkg_elem, *obj;

		element = &lpi_data->package.elements[loop];
		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
			continue;

		pkg_elem = element->package.elements;

		obj = pkg_elem + 6;
		if (obj->type == ACPI_TYPE_BUFFER) {
			struct acpi_power_register *reg;

			reg = (struct acpi_power_register *)obj->buffer.pointer;
			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
				continue;

			lpi_state->address = reg->address;
			lpi_state->entry_method =
				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
		} else if (obj->type == ACPI_TYPE_INTEGER) {
			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
			lpi_state->address = obj->integer.value;
		} else {
			continue;
		}

		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/

		obj = pkg_elem + 9;
		if (obj->type == ACPI_TYPE_STRING)
			strlcpy(lpi_state->desc, obj->string.pointer,
				ACPI_CX_DESC_LEN);

		lpi_state->index = state_idx;
		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
			pr_debug("No min. residency found, assuming 10 us\n");
			lpi_state->min_residency = 10;
		}

		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
			pr_debug("No wakeup residency found, assuming 10 us\n");
			lpi_state->wake_latency = 10;
		}

		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
			lpi_state->flags = 0;

		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
			lpi_state->arch_flags = 0;

		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
			lpi_state->res_cnt_freq = 1;

		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
			lpi_state->enable_parent_state = 0;
	}

	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
end:
	kfree(buffer.pointer);
	return ret;
}
Beispiel #28
0
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
	u32 *buf, *p;
	const unsigned r_online = a0;
	const unsigned r_nc_count = a1;
	const unsigned r_pcohctl = t7;
	const unsigned max_instrs = 256;
	unsigned cpc_cmd;
	int err;
	enum {
		lbl_incready = 1,
		lbl_poll_cont,
		lbl_secondary_hang,
		lbl_disable_coherence,
		lbl_flush_fsb,
		lbl_invicache,
		lbl_flushdcache,
		lbl_hang,
		lbl_set_cont,
		lbl_secondary_cont,
		lbl_decready,
	};

	/* Allocate a buffer to hold the generated code */
	p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
	if (!buf)
		return NULL;

	/* Clear labels & relocs ready for (re)use */
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			goto out_err;

		/*
		 * Save CPU state. Note the non-standard calling convention
		 * with the return address placed in v0 to avoid clobbering
		 * the ra register before it is saved.
		 */
		UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
		uasm_i_jalr(&p, v0, t0);
		uasm_i_nop(&p);
	}

	/*
	 * Load addresses of required CM & CPC registers. This is done early
	 * because they're needed in both the enable & disable coherence steps
	 * but in the coupled case the enable step will only run on one VPE.
	 */
	UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());

	if (coupled_coherence) {
		/* Increment ready_count */
		uasm_i_sync(&p, stype_ordering);
		uasm_build_label(&l, p, lbl_incready);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, 1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_incready);
		uasm_i_addiu(&p, t1, t1, 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);

		/*
		 * If this is the last VPE to become ready for non-coherence
		 * then it should branch below.
		 */
		uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
		uasm_i_nop(&p);

		if (state < CPS_PM_POWER_GATED) {
			/*
			 * Otherwise this is not the last VPE to become ready
			 * for non-coherence. It needs to wait until coherence
			 * has been disabled before proceeding, which it will do
			 * by polling for the top bit of ready_count being set.
			 */
			uasm_i_addiu(&p, t1, zero, -1);
			uasm_build_label(&l, p, lbl_poll_cont);
			uasm_i_lw(&p, t0, 0, r_nc_count);
			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
			uasm_i_ehb(&p);
			uasm_i_yield(&p, zero, t1);
			uasm_il_b(&p, &r, lbl_poll_cont);
			uasm_i_nop(&p);
		} else {
			/*
			 * The core will lose power & this VPE will not continue
			 * so it can simply halt here.
			 */
			uasm_i_addiu(&p, t0, zero, TCHALT_H);
			uasm_i_mtc0(&p, t0, 2, 4);
			uasm_build_label(&l, p, lbl_secondary_hang);
			uasm_il_b(&p, &r, lbl_secondary_hang);
			uasm_i_nop(&p);
		}
	}

	/*
	 * This is the point of no return - this VPE will now proceed to
	 * disable coherence. At this point we *must* be sure that no other
	 * VPE within the core will interfere with the L1 dcache.
	 */
	uasm_build_label(&l, p, lbl_disable_coherence);

	/* Invalidate the L1 icache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
			      Index_Invalidate_I, lbl_invicache);

	/* Writeback & invalidate the L1 dcache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
			      Index_Writeback_Inv_D, lbl_flushdcache);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	/*
	 * Disable all but self interventions. The load from COHCTL is defined
	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
	 * resulting from the preceeding store is complete.
	 */
	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Sync to ensure previous interventions are complete */
	uasm_i_sync(&p, stype_intervention);
	uasm_i_ehb(&p);

	/* Disable coherence */
	uasm_i_sw(&p, zero, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	if (state >= CPS_PM_CLOCK_GATED) {
		err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
					lbl_flush_fsb);
		if (err)
			goto out_err;

		/* Determine the CPC command to issue */
		switch (state) {
		case CPS_PM_CLOCK_GATED:
			cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
			break;
		case CPS_PM_POWER_GATED:
			cpc_cmd = CPC_Cx_CMD_PWRDOWN;
			break;
		default:
			BUG();
			goto out_err;
		}

		/* Issue the CPC command */
		UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
		uasm_i_addiu(&p, t1, zero, cpc_cmd);
		uasm_i_sw(&p, t1, 0, t0);

		if (state == CPS_PM_POWER_GATED) {
			/* If anything goes wrong just hang */
			uasm_build_label(&l, p, lbl_hang);
			uasm_il_b(&p, &r, lbl_hang);
			uasm_i_nop(&p);

			/*
			 * There's no point generating more code, the core is
			 * powered down & if powered back up will run from the
			 * reset vector not from here.
			 */
			goto gen_done;
		}

		/* Completion barrier */
		uasm_i_sync(&p, stype_memory);
		uasm_i_ehb(&p);
	}

	if (state == CPS_PM_NC_WAIT) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		if (coupled_coherence)
			cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
					    lbl_set_cont);

		/*
		 * VPEs which did not disable coherence will continue
		 * executing, after coherence has been disabled, from this
		 * point.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Now perform our wait */
		uasm_i_wait(&p, 0);
	}

	/*
	 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
	 * will run this. The first will actually re-enable coherence & the
	 * rest will just be performing a rather unusual nop.
	 */
	uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
		/* Decrement ready_count */
		uasm_build_label(&l, p, lbl_decready);
		uasm_i_sync(&p, stype_ordering);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, -1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_decready);
		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);

		/*
		 * This core will be reliant upon another core sending a
		 * power-up command to the CPC in order to resume operation.
		 * Thus an arbitrary VPE can't trigger the core leaving the
		 * idle state and the one that disables coherence might as well
		 * be the one to re-enable it. The rest will continue from here
		 * after that has been done.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	/* The core is coherent, time to return to C code */
	uasm_i_jr(&p, ra);
	uasm_i_nop(&p);

gen_done:
	/* Ensure the code didn't exceed the resources allocated for it */
	BUG_ON((p - buf) > max_instrs);
	BUG_ON((l - labels) > ARRAY_SIZE(labels));
	BUG_ON((r - relocs) > ARRAY_SIZE(relocs));

	/* Patch branch offsets */
	uasm_resolve_relocs(relocs, labels);

	/* Flush the icache */
	local_flush_icache_range((unsigned long)buf, (unsigned long)p);

	return buf;
out_err:
	kfree(buf);
	return NULL;
}
Beispiel #29
0
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
			 const struct fw_img *fw,
			 struct iwl_context_info_dram *ctxt_dram)
{
	struct iwl_self_init_dram *dram = &trans->init_dram;
	int i, ret, lmac_cnt, umac_cnt, paging_cnt;

	if (WARN(dram->paging,
		 "paging shouldn't already be initialized (%d pages)\n",
		 dram->paging_cnt))
		iwl_pcie_ctxt_info_free_paging(trans);

	lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
	/* add 1 due to separator */
	umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
	/* add 2 due to separators */
	paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);

	dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
	if (!dram->fw)
		return -ENOMEM;
	dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
	if (!dram->paging)
		return -ENOMEM;

	/* initialize lmac sections */
	for (i = 0; i < lmac_cnt; i++) {
		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
						   &dram->fw[dram->fw_cnt]);
		if (ret)
			return ret;
		ctxt_dram->lmac_img[i] =
			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
		dram->fw_cnt++;
	}

	/* initialize umac sections */
	for (i = 0; i < umac_cnt; i++) {
		/* access FW with +1 to make up for lmac separator */
		ret = iwl_pcie_ctxt_info_alloc_dma(trans,
						   &fw->sec[dram->fw_cnt + 1],
						   &dram->fw[dram->fw_cnt]);
		if (ret)
			return ret;
		ctxt_dram->umac_img[i] =
			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
		dram->fw_cnt++;
	}

	/*
	 * Initialize paging.
	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
	 * stored separately.
	 * This is since the timing of its release is different -
	 * while fw memory can be released on alive, the paging memory can be
	 * freed only when the device goes down.
	 * Given that, the logic here in accessing the fw image is a bit
	 * different - fw_cnt isn't changing so loop counter is added to it.
	 */
	for (i = 0; i < paging_cnt; i++) {
		/* access FW with +2 to make up for lmac & umac separators */
		int fw_idx = dram->fw_cnt + i + 2;

		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
						   &dram->paging[i]);
		if (ret)
			return ret;

		ctxt_dram->virtual_img[i] =
			cpu_to_le64(dram->paging[i].physical);
		dram->paging_cnt++;
	}

	return 0;
}
Beispiel #30
0
static int __init init_msp_flash(void)
{
	int i, j, ret = -ENOMEM;
	int offset, coff;
	char *env;
	int pcnt;
	char flash_name[] = "flash0";
	char part_name[] = "flash0_0";
	unsigned addr, size;

	
	if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) &&
	    (*ELB_1PC_EN_REG & SINGLE_PCCARD)) {
		printk(KERN_NOTICE "Single PC Card mode: no flash access\n");
		return -ENXIO;
	}

	
	for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++)
		flash_name[5] = '0' + fcnt + 1;

	if (fcnt < 1)
		return -ENXIO;

	printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);

	msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
	if (!msp_flash)
		return -ENOMEM;

	msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
	if (!msp_parts)
		goto free_msp_flash;

	msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
	if (!msp_maps)
		goto free_msp_parts;

	
	for (i = 0; i < fcnt; i++) {
		
		part_name[5] = '0' + i;
		part_name[7] = '0';
		for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++)
			part_name[7] = '0' + pcnt + 1;

		if (pcnt == 0) {
			printk(KERN_NOTICE "Skipping flash device %d "
				"(no partitions defined)\n", i);
			continue;
		}

		msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
				       GFP_KERNEL);
		if (!msp_parts[i])
			goto cleanup_loop;

		
		flash_name[5] = '0' + i;
		env = prom_getenv(flash_name);

		if (sscanf(env, "%x:%x", &addr, &size) < 2) {
			ret = -ENXIO;
			kfree(msp_parts[i]);
			goto cleanup_loop;
		}
		addr = CPHYSADDR(addr);

		printk(KERN_NOTICE
			"MSP flash device \"%s\": 0x%08x at 0x%08x\n",
			flash_name, size, addr);
		
		msp_maps[i].size = size;
		msp_maps[i].phys = addr;

		
		if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
			size = CONFIG_MSP_FLASH_MAP_LIMIT;

		msp_maps[i].virt = ioremap(addr, size);
		if (msp_maps[i].virt == NULL) {
			ret = -ENXIO;
			kfree(msp_parts[i]);
			goto cleanup_loop;
		}

		msp_maps[i].bankwidth = 1;
		msp_maps[i].name = kmalloc(7, GFP_KERNEL);
		if (!msp_maps[i].name) {
			iounmap(msp_maps[i].virt);
			kfree(msp_parts[i]);
			goto cleanup_loop;
		}

		msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);

		for (j = 0; j < pcnt; j++) {
			part_name[5] = '0' + i;
			part_name[7] = '0' + j;

			env = prom_getenv(part_name);

			if (sscanf(env, "%x:%x:%n", &offset, &size,
						&coff) < 2) {
				ret = -ENXIO;
				kfree(msp_maps[i].name);
				iounmap(msp_maps[i].virt);
				kfree(msp_parts[i]);
				goto cleanup_loop;
			}

			msp_parts[i][j].size = size;
			msp_parts[i][j].offset = offset;
			msp_parts[i][j].name = env + coff;
		}

		
		simple_map_init(&msp_maps[i]);
		msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
		if (msp_flash[i]) {
			msp_flash[i]->owner = THIS_MODULE;
			add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
		} else {
			printk(KERN_ERR "map probe failed for flash\n");
			ret = -ENXIO;
			kfree(msp_maps[i].name);
			iounmap(msp_maps[i].virt);
			kfree(msp_parts[i]);
			goto cleanup_loop;
		}
	}

	return 0;

cleanup_loop:
	while (i--) {
		del_mtd_partitions(msp_flash[i]);
		map_destroy(msp_flash[i]);
		kfree(msp_maps[i].name);
		iounmap(msp_maps[i].virt);
		kfree(msp_parts[i]);
	}
	kfree(msp_maps);
free_msp_parts:
	kfree(msp_parts);
free_msp_flash:
	kfree(msp_flash);
	return ret;
}