Пример #1
0
// constructor -- see "Constructor" sub-section
static int
snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
	static int dev;
	struct snd_card *card;
	vortex_t *chip;
	int err;

	// (1)
	if (dev >= SNDRV_CARDS)
		return -ENODEV;
	if (!enable[dev]) {
		dev++;
		return -ENOENT;
	}
	// (2)
	err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
			   0, &card);
	if (err < 0)
		return err;

	// (3)
	if ((err = snd_vortex_create(card, pci, &chip)) < 0) {
		snd_card_free(card);
		return err;
	}
	snd_vortex_workaround(pci, pcifix[dev]);

	// Card details needed in snd_vortex_midi
	strcpy(card->driver, CARD_NAME_SHORT);
	sprintf(card->shortname, "Aureal Vortex %s", CARD_NAME_SHORT);
	sprintf(card->longname, "%s at 0x%lx irq %i",
		card->shortname, chip->io, chip->irq);

	// (4) Alloc components.
	err = snd_vortex_mixer(chip);
	if (err < 0) {
		snd_card_free(card);
		return err;
	}
	// ADB pcm.
	err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_PCM);
	if (err < 0) {
		snd_card_free(card);
		return err;
	}
#ifndef CHIP_AU8820
	// ADB SPDIF
	if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_SPDIF, 1)) < 0) {
		snd_card_free(card);
		return err;
	}
	// A3D
	if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_A3D, NR_A3D)) < 0) {
		snd_card_free(card);
		return err;
	}
#endif
	/*
	   // ADB I2S
	   if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_I2S, 1)) < 0) {
	   snd_card_free(card);
	   return err;
	   }
	 */
#ifndef CHIP_AU8810
	// WT pcm.
	if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_WT, NR_WT)) < 0) {
		snd_card_free(card);
		return err;
	}
#endif
	if ((err = snd_vortex_midi(chip)) < 0) {
		snd_card_free(card);
		return err;
	}

	vortex_gameport_register(chip);

#if 0
	if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_VORTEX_SYNTH,
			       sizeof(snd_vortex_synth_arg_t), &wave) < 0
	    || wave == NULL) {
		snd_printk(KERN_ERR "Can't initialize Aureal wavetable synth\n");
	} else {
		snd_vortex_synth_arg_t *arg;

		arg = SNDRV_SEQ_DEVICE_ARGPTR(wave);
		strcpy(wave->name, "Aureal Synth");
		arg->hwptr = vortex;
		arg->index = 1;
		arg->seq_ports = seq_ports[dev];
		arg->max_voices = max_synth_voices[dev];
	}
#endif

	// (5)
	if ((err = pci_read_config_word(pci, PCI_DEVICE_ID,
				  &(chip->device))) < 0) {
		snd_card_free(card);
		return err;
	}	
	if ((err = pci_read_config_word(pci, PCI_VENDOR_ID,
				  &(chip->vendor))) < 0) {
		snd_card_free(card);
		return err;
	}
	chip->rev = pci->revision;
#ifdef CHIP_AU8830
	if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
		pr_alert(
		       "vortex: The revision (%x) of your card has not been seen before.\n",
		       chip->rev);
		pr_alert(
		       "vortex: Please email the results of 'lspci -vv' to [email protected].\n");
		snd_card_free(card);
		err = -ENODEV;
		return err;
	}
#endif

	// (6)
	if ((err = snd_card_register(card)) < 0) {
		snd_card_free(card);
		return err;
	}
	// (7)
	pci_set_drvdata(pci, card);
	dev++;
	vortex_connect_default(chip, 1);
	vortex_enable_int(chip);
	return 0;
}
Пример #2
0
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;
	void *temp_buf;

	DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__,
			current->comm, current->parent->comm, current->tgid);

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			diag_max_reg += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				diag_max_reg -= pkt_params->count -
							 count_entries;
				pr_alert("diag: Insufficient memory for reg.");
				mutex_unlock(&driver->diagchar_mutex);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (driver->logging_mode == UART_MODE)
			driver->logging_mode = MEMORY_DEVICE_MODE;
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
			driver->in_busy_wcnss = 1;
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE)
			diagfwd_disconnect();
		else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
		else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			DIAG_INFO("diag: USB disconnected\n");
			diagfwd_disconnect();
			DIAG_INFO("sdlogging enable\n");
			driver->qxdm2sd_drop = 0;
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
		} else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
								== USB_MODE) {
			DIAG_INFO("sdlogging disable\n");
			diagfwd_connect();
			driver->qxdm2sd_drop = 1;
		}
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
Пример #3
0
static int diagchar_open(struct inode *inode, struct file *file)
{
    int i = 0;
    void *temp;

    if (driver) {
        mutex_lock(&driver->diagchar_mutex);

        for (i = 0; i < driver->num_clients; i++)
            if (driver->client_map[i].pid == 0)
                break;

        if (i < driver->num_clients) {
            diag_add_client(i, file);
        } else {
            if (i < threshold_client_limit) {
                driver->num_clients++;
                temp = krealloc(driver->client_map
                                , (driver->num_clients) * sizeof(struct
                                        diag_client_map), GFP_KERNEL);
                if (!temp)
                    goto fail;
                else
                    driver->client_map = temp;
                temp = krealloc(driver->data_ready
                                , (driver->num_clients) * sizeof(int),
                                GFP_KERNEL);
                if (!temp)
                    goto fail;
                else
                    driver->data_ready = temp;
                diag_add_client(i, file);
            } else {
                mutex_unlock(&driver->diagchar_mutex);
                pr_alert("Max client limit for DIAG reached\n");
                pr_info("Cannot open handle %s"
                        " %d", current->comm, current->tgid);
                for (i = 0; i < driver->num_clients; i++)
                    pr_debug("%d) %s PID=%d", i, driver->
                             client_map[i].name,
                             driver->client_map[i].pid);
                return -ENOMEM;
            }
        }
        driver->data_ready[i] = 0x0;
        driver->data_ready[i] |= MSG_MASKS_TYPE;
        driver->data_ready[i] |= EVENT_MASKS_TYPE;
        driver->data_ready[i] |= LOG_MASKS_TYPE;

        if (driver->ref_count == 0)
            diagmem_init(driver);
        driver->ref_count++;
        mutex_unlock(&driver->diagchar_mutex);
        return 0;
    }
    return -ENOMEM;

fail:
    mutex_unlock(&driver->diagchar_mutex);
    driver->num_clients--;
    pr_alert("diag: Insufficient memory for new client");
    return -ENOMEM;
}
Пример #4
0
static void pm_callback_power_off(struct kbase_device *kbdev)
{
	unsigned int uiCurrentFreqCount;

	volatile int polling_count = 100000;
	volatile int i = 0;

	struct mtk_config *config;

	if (!kbdev)	{
		pr_alert("MALI:	input	parameter	is NULL	\n");
	}

	config = (struct mtk_config	*)kbdev->mtk_config;
	if (!config) {
		pr_alert("MALI:	mtk_config is	NULL \n");
	}

	/// 1. Delay 0.01ms before power off
	for (i=0; i < DELAY_LOOP_COUNT;i++);
	if (DELAY_LOOP_COUNT != i)
	{
		pr_warn("[MALI] power off delay error!\n");
	}

	/// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms)
	MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG);

	do {
		/// 0x13000184[2]
		/// 1'b1: bus idle
		/// 1'b0: bus busy
		if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT)
		{
			/// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count);
			break;
		}
	} while (polling_count--);

	if (polling_count <=0)
	{
		pr_warn("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count);
	}

#if HARD_RESET_AT_POWER_OFF
	/* Cause a GPU hard reset to test whether we have actually idled the GPU
	 * and that we properly reconfigure the GPU on power up.
	 * Usually this would be dangerous, but if the GPU is working correctly it should
	 * be completely safe as the GPU should not be active at this point.
	 * However this is disabled normally because it will most likely interfere with
	 * bus logging etc.
	 */
	//KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
	kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
	///  Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms)
	MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG);

	do {
		/// 0x13000184[2]
		/// 1'b1: bus idle
		/// 1'b0: bus busy
		if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT)
		{
			/// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count);
			break;
		}
	} while (polling_count--);

	if (polling_count <=0)
	{
		printk("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count);
	}

	g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index.
	//printk("MALI:  GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx );
#if 1
	uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num();       // get freq. table size
	mt_gpufreq_target(uiCurrentFreqCount-1);                    // set gpu to lowest freq.
#endif

	/* MTK clock modified */
#ifdef CONFIG_MTK_CLKMGR
	disable_clock( MT_CG_MFG_BG3D, "GPU");
	disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU");
#endif

	if(mt6325_upmu_get_swcid() >= PMIC6325_E3_CID_CODE)
	{
		mt_gpufreq_voltage_enable_set(0);
	}

#ifdef ENABLE_COMMON_DVFS
	ged_dvfs_gpu_clock_switch_notify(0);
#endif
	mtk_set_vgpu_power_on_flag(MTK_VGPU_POWER_OFF); // the power status is "power off".
#endif
}
static int diagchar_write(struct file *file, const char __user *buf,
			      size_t count, loff_t *ppos)
{
	int err, ret = 0, pkt_type;
	int length = 0, i;
	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
	void *buf_copy = NULL;
	unsigned int payload_size;
#ifdef CONFIG_DIAG_OVER_USB
	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
				(driver->logging_mode == NO_LOGGING_MODE)) {
		/*Drop the diag payload */
		return -EIO;
	}
#endif /* DIAG over USB */
	/* Get the packet type F3/log/event/Pkt response */
	err = copy_from_user((&pkt_type), buf, 4);
	/* First 4 bytes indicate the type of payload - ignore these */
	if (count < 4) {
		pr_err("diag: Client sending short data\n");
		return -EBADMSG;
	}
	payload_size = count - 4;
	if (payload_size > USER_SPACE_DATA) {
		pr_err("diag: Dropping packet, packet payload size crosses 8KB limit. Current payload size %d\n",
				payload_size);
		driver->dropped_count++;
		return -EBADMSG;
	}

	if (pkt_type == DCI_DATA_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		if (err) {
			pr_alert("diag: copy failed for DCI data\n");
			return DIAG_DCI_SEND_DATA_FAIL;
		}
		err = diag_process_dci_client(driver->user_space_data,
							payload_size);
		return err;
	}
	if (pkt_type == USER_SPACE_LOG_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		/* Check masks for On-Device logging */
		if (driver->mask_check) {
			if (!mask_request_validate(driver->user_space_data)) {
				pr_alert("diag: mask request Invalid\n");
				return -EFAULT;
			}
		}
		buf = buf + 4;

		diag_printk(1,"diag:%s user space data %d\n",__func__, payload_size);
		for (i = 0; i < payload_size; i++)
			diag_printk(1,"\t %x", *((driver->user_space_data)+i));
#ifdef CONFIG_DIAG_SDIO_PIPE
		/* send masks to 9k too */
		if (driver->sdio_ch) {
			wait_event_interruptible(driver->wait_q,
				 (sdio_write_avail(driver->sdio_ch) >=
					 payload_size));
			if (driver->sdio_ch && (payload_size > 0)) {
				sdio_write(driver->sdio_ch, (void *)
				   (driver->user_space_data), payload_size);
			}
		}
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
		/* send masks to 9k too */
		if (driver->hsic_ch && (payload_size > 0)) {
			/* wait sending mask updates if HSIC ch not ready */
			if (driver->in_busy_hsic_write)
				wait_event_interruptible(driver->wait_q,
					(driver->in_busy_hsic_write != 1));
			driver->in_busy_hsic_write = 1;
			driver->in_busy_hsic_read_on_device = 0;
			err = diag_bridge_write(driver->user_space_data,
							 payload_size);
			if (err) {
				pr_err("diag: err sending mask to MDM: %d\n",
									 err);
				/*
				* If the error is recoverable, then clear
				* the write flag, so we will resubmit a
				* write on the next frame.  Otherwise, don't
				* resubmit a write on the next frame.
				*/
				if ((-ESHUTDOWN) != err)
					driver->in_busy_hsic_write = 0;
			}
		}
#endif
		/* send masks to 8k now */
		diag_process_hdlc((void *)(driver->user_space_data),
							 payload_size);
		return 0;
	}

	if (payload_size > itemsize) {
		pr_err("diag: Dropping packet, packet payload size crosses"
				"4KB limit. Current payload size %d\n",
				payload_size);
		driver->dropped_count++;
		return -EBADMSG;
	}

	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
	if (!buf_copy) {
		driver->dropped_count++;
		return -ENOMEM;
	}

	err = copy_from_user(buf_copy, buf + 4, payload_size);
	if (err) {
		printk(KERN_INFO "diagchar : copy_from_user failed\n");
		ret = -EFAULT;
		goto fail_free_copy;
	}
#ifdef DIAG_DEBUG
	printk(KERN_DEBUG "data is -->\n");
	for (i = 0; i < payload_size; i++)
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_copy)+i));
#endif
	send.state = DIAG_STATE_START;
	send.pkt = buf_copy;
	send.last = (void *)(buf_copy + payload_size - 1);
	send.terminate = 1;

	diag_printk(1,"diag:%s Already used bytes in buffer %d, and"
	" incoming payload size is %d\n",__func__, driver->used, payload_size);
	diag_printk(1, "hdlc encoded data is -->\n");
	for (i = 0; i < payload_size + 8; i++) {
		diag_printk(1, "\t %x \t", *(((unsigned char *)buf_hdlc)+i));
		if (*(((unsigned char *)buf_hdlc)+i) != 0x7e)
			length++;
	}

	mutex_lock(&driver->diagchar_mutex);
	if (!buf_hdlc)
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
						 POOL_TYPE_HDLC);
	if (!buf_hdlc) {
		ret = -ENOMEM;
		goto fail_free_hdlc;
	}
	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
	}

	enc.dest = buf_hdlc + driver->used;
	enc.dest_last = (void *)(buf_hdlc + driver->used + 2*payload_size + 3);
	diag_hdlc_encode(&send, &enc);

	/* This is to check if after HDLC encoding, we are still within the
	 limits of aggregation buffer. If not, we write out the current buffer
	and start aggregation in a newly allocated buffer */
	if ((unsigned int) enc.dest >=
		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
		enc.dest = buf_hdlc + driver->used;
		enc.dest_last = (void *)(buf_hdlc + driver->used +
							 (2*payload_size) + 3);
		diag_hdlc_encode(&send, &enc);
	}

	driver->used = (uint32_t) enc.dest - (uint32_t) buf_hdlc;
	if (pkt_type == DATA_TYPE_RESPONSE) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
	}

	mutex_unlock(&driver->diagchar_mutex);
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	if (!timer_in_progress)	{
		timer_in_progress = 1;
		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
	}
	return 0;

fail_free_hdlc:
	buf_hdlc = NULL;
	driver->used = 0;
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	mutex_unlock(&driver->diagchar_mutex);
	return ret;

fail_free_copy:
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	return ret;
}
Пример #6
0
static int fdev_init(void)
{
	int result = 0;
	char *name = "firstdev";

	pr_alert("DEVICE:%s\n", name);
	pr_alert("The process is \"%s\" (pid %i)\n",
	       current->comm, current->pid);
	pr_alert("UTS_RELEASE:%s", UTS_RELEASE);
	pr_alert("KERNEL_VERSION:%d", KERNEL_VERSION(2, 6, 10));

	unsigned int firstminor = 0;
	int err;

	err = alloc_chrdev_region(&dev, firstminor, count, name);
	if (!err) {
		pr_alert("alloc_chrdev_region successful.");
		pr_alert("dev_t:%d,Major=%d,Minor=%d",
			dev, MAJOR(dev), MINOR(dev));
	} else {
		pr_alert("alloc_chrdev_region failed.");
	}

	fdev_p = kmalloc_array(count, sizeof(struct fdev), GFP_KERNEL);
	if (!fdev_p) {
		result = -ENOMEM;
		pr_alert("kmalloc fdev_p failed.");
		goto fail;
	} else {
		pr_alert("kmalloc fdev_p successful.");
	}

	memset(fdev_p, 0, count * sizeof(struct fdev));

	int i, major, devno;

	major = MAJOR(dev);
	for (i = 0; i < count; ++i) {
		struct fdev *devp = &fdev_p[i];

		sema_init(&devp->sem, 1);
		devno = MKDEV(major, i);
		devp->major = major;
		devp->minor = i;

		devp->quantum_count = QUANTUM_DEFAULT;
		devp->qset_count = QSET_DEFAULT;

		cdev_init(&devp->cdev, &fops);
		devp->cdev.owner = THIS_MODULE;
		devp->cdev.ops = &fops;
		err = cdev_add(&devp->cdev, devno, 1);
		if (err)
			pr_alert("Error %d adding firstdev %d", err, i);
		else
			pr_alert("Successful adding firstdev %d", i);
	}
	return 0;
fail:
	fdev_exit();
	return result;
}
Пример #7
0
static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
	struct atmsvc_msg *msg;
	struct atm_vcc *session_vcc;
	struct sock *sk;

	msg = (struct atmsvc_msg *) skb->data;
	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	vcc = *(struct atm_vcc **) &msg->vcc;
	pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
	sk = sk_atm(vcc);

	switch (msg->type) {
	case as_okay:
		sk->sk_err = -msg->reply;
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
			vcc->local.sas_family = AF_ATMSVC;
			memcpy(vcc->local.sas_addr.prv,
			       msg->local.sas_addr.prv, ATM_ESA_LEN);
			memcpy(vcc->local.sas_addr.pub,
			       msg->local.sas_addr.pub, ATM_E164_LEN + 1);
		}
		session_vcc = vcc->session ? vcc->session : vcc;
		if (session_vcc->vpi || session_vcc->vci)
			break;
		session_vcc->itf = msg->pvc.sap_addr.itf;
		session_vcc->vpi = msg->pvc.sap_addr.vpi;
		session_vcc->vci = msg->pvc.sap_addr.vci;
		if (session_vcc->vpi || session_vcc->vci)
			session_vcc->qos = msg->qos;
		break;
	case as_error:
		clear_bit(ATM_VF_REGIS, &vcc->flags);
		clear_bit(ATM_VF_READY, &vcc->flags);
		sk->sk_err = -msg->reply;
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		break;
	case as_indicate:
		vcc = *(struct atm_vcc **)&msg->listen_vcc;
		sk = sk_atm(vcc);
		pr_debug("as_indicate!!!\n");
		lock_sock(sk);
		if (sk_acceptq_is_full(sk)) {
			sigd_enq(NULL, as_reject, vcc, NULL, NULL);
			dev_kfree_skb(skb);
			goto as_indicate_complete;
		}
		sk->sk_ack_backlog++;
		skb_queue_tail(&sk->sk_receive_queue, skb);
		pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
		sk->sk_state_change(sk);
as_indicate_complete:
		release_sock(sk);
		return 0;
	case as_close:
		set_bit(ATM_VF_RELEASED, &vcc->flags);
		vcc_release_async(vcc, msg->reply);
		goto out;
	case as_modify:
		modify_qos(vcc, msg);
		break;
	case as_addparty:
	case as_dropparty:
		sk->sk_err_soft = msg->reply;
					/* < 0 failure, otherwise ep_ref */
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		break;
	default:
		pr_alert("bad message type %d\n", (int)msg->type);
		return -EINVAL;
	}
	sk->sk_state_change(sk);
out:
	dev_kfree_skb(skb);
	return 0;
}
static void ion_test_exit(void)
{
	misc_deregister(&ion_test_dev);
	pr_alert("%s\n", __func__);
}
Пример #9
0
static long
qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int err = 0;
	struct qfp_fuse_req req;
	u32 fuse_buf[QFP_FUSE_BUF_SIZE];
	u32 *buf = fuse_buf;
	u32 *ptr = NULL;
	int i;

	/* Verify user arguments. */
	if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
		return -ENOTTY;

	switch (cmd) {
	case QFP_FUSE_IOC_READ:
		if (arg == 0) {
			pr_err("user space arg not supplied\n");
			err = -EFAULT;
			break;
		}

		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
			pr_err("Error copying req from user space\n");
			err = -EFAULT;
			break;
		}

		/* Check for limits */
		if (is_usr_req_valid(&req) == false) {
			pr_err("Invalid request\n");
			err = -EINVAL;
			break;
		}

		if (req.size > QFP_FUSE_BUF_SIZE) {
			/* Allocate memory for buffer */
			ptr = kzalloc(req.size * 4, GFP_KERNEL);
			if (ptr == NULL) {
				pr_alert("No memory for data\n");
				err = -ENOMEM;
				break;
			}
			buf = ptr;
		}

		if (mutex_lock_interruptible(&qfp_priv->lock)) {
			err = -ERESTARTSYS;
			break;
		}

		/* Read data */
		for (i = 0; i < req.size; i++)
			buf[i] = readl_relaxed(
				((u32 *) (qfp_priv->base + req.offset)) + i);

		if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
			pr_err("Error copying to user space\n");
			err = -EFAULT;
		}

		mutex_unlock(&qfp_priv->lock);
		break;

	case QFP_FUSE_IOC_WRITE:
		if (arg == 0) {
			pr_err("user space arg not supplied\n");
			err = -EFAULT;
			break;
		}

		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
			pr_err("Error copying req from user space\n");
			err = -EFAULT;
			break;
		}
		/* Check for limits */
		if (is_usr_req_valid(&req) == false) {
			pr_err("Invalid request\n");
			err = -EINVAL;
			break;
		}

		if (req.size > QFP_FUSE_BUF_SIZE) {
			/* Allocate memory for buffer */
			ptr = kzalloc(req.size * 4, GFP_KERNEL);
			if (ptr == NULL) {
				pr_alert("No memory for data\n");
				err = -ENOMEM;
				break;
			}
			buf = ptr;
		}

		/* Copy user data to local buffer */
		if (copy_from_user(buf, (void __user *)req.data,
				4 * (req.size))) {
			pr_err("Error copying data from user space\n");
			err = -EFAULT;
			break;
		}

		if (mutex_lock_interruptible(&qfp_priv->lock)) {
			err = -ERESTARTSYS;
			break;
		}

		/* Write data word at a time */
		for (i = 0; i < req.size && !err; i++) {
			err = qfp_fuse_write_word(((u32 *) (
				qfp_priv->base + req.offset) + i), buf[i]);
		}

		mutex_unlock(&qfp_priv->lock);
		break;
	default:
		pr_err("Invalid ioctl command.\n");
		return -ENOTTY;
	}
	kfree(ptr);
	return err;
}
Пример #10
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
				unsigned long address)
{
	struct vm_area_struct *vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	int code = SEGV_MAPERR;
	int fault;
	unsigned int flags = 0;

	cause >>= 2;

	/* Restart the instruction */
	regs->ea -= 4;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
		if (user_mode(regs))
			goto bad_area_nosemaphore;
		else
			goto vmalloc_fault;
	}

	if (unlikely(address >= TASK_SIZE))
		goto bad_area_nosemaphore;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;

	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->ea))
			goto bad_area_nosemaphore;
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	code = SEGV_ACCERR;

	switch (cause) {
	case EXC_SUPERV_INSN_ACCESS:
		goto bad_area;
	case EXC_SUPERV_DATA_ACCESS:
		goto bad_area;
	case EXC_X_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
		break;
	case EXC_R_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_READ))
			goto bad_area;
		break;
	case EXC_W_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags = FAULT_FLAG_WRITE;
		break;
	}

survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, address, flags);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
			pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
				"cause %ld\n", current->comm, SIGSEGV, address, cause);
			show_regs(regs);
		}
		_exception(SIGSEGV, regs, code, address);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault? */
	if (fixup_exception(regs))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	pr_alert("Unable to handle kernel %s at virtual address %08lx",
		address < PAGE_SIZE ? "NULL pointer dereference" :
		"paging request", address);
	pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
		cause);
	panic("Oops");
	return;

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_global_init(tsk)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;

	_exception(SIGBUS, regs, BUS_ADRERR, address);
	return;

vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		pgd = pgd_current + offset;
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;
		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;

		flush_tlb_one(address);
		return;
	}
}
Пример #11
0
/*
 * This routine handles page faults.  It determines the address, and the
 * problem, and then passes it handle_page_fault() for normal DTLB and
 * ITLB issues, and for DMA or SN processor faults when we are in user
 * space.  For the latter, if we're in kernel mode, we just save the
 * interrupt away appropriately and return immediately.  We can't do
 * page faults for user code while in kernel mode.
 */
void do_page_fault(struct pt_regs *regs, int fault_num,
		   unsigned long address, unsigned long write)
{
	int is_page_fault;

#ifdef CONFIG_KPROBES
	/*
	 * This is to notify the fault handler of the kprobes.  The
	 * exception code is redundant as it is also carried in REGS,
	 * but we pass it anyhow.
	 */
	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
		       regs->faultnum, SIGSEGV) == NOTIFY_STOP)
		return;
#endif

#ifdef __tilegx__
	/*
	 * We don't need early do_page_fault_ics() support, since unlike
	 * Pro we don't need to worry about unlocking the atomic locks.
	 * There is only one current case in GX where we touch any memory
	 * under ICS other than our own kernel stack, and we handle that
	 * here.  (If we crash due to trying to touch our own stack,
	 * we're in too much trouble for C code to help out anyway.)
	 */
	if (write & ~1) {
		unsigned long pc = write & ~1;
		if (pc >= (unsigned long) __start_unalign_asm_code &&
		    pc < (unsigned long) __end_unalign_asm_code) {
			struct thread_info *ti = current_thread_info();
			/*
			 * Our EX_CONTEXT is still what it was from the
			 * initial unalign exception, but now we've faulted
			 * on the JIT page.  We would like to complete the
			 * page fault however is appropriate, and then retry
			 * the instruction that caused the unalign exception.
			 * Our state has been "corrupted" by setting the low
			 * bit in "sp", and stashing r0..r3 in the
			 * thread_info area, so we revert all of that, then
			 * continue as if this were a normal page fault.
			 */
			regs->sp &= ~1UL;
			regs->regs[0] = ti->unalign_jit_tmp[0];
			regs->regs[1] = ti->unalign_jit_tmp[1];
			regs->regs[2] = ti->unalign_jit_tmp[2];
			regs->regs[3] = ti->unalign_jit_tmp[3];
			write &= 1;
		} else {
			pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
				 current->comm, current->pid, pc, address);
			show_regs(regs);
			do_group_exit(SIGKILL);
			return;
		}
	}
#else
	/* This case should have been handled by do_page_fault_ics(). */
	BUG_ON(write & ~1);
#endif

#if CHIP_HAS_TILE_DMA()
	/*
	 * If it's a DMA fault, suspend the transfer while we're
	 * handling the miss; we'll restart after it's handled.  If we
	 * don't suspend, it's possible that this process could swap
	 * out and back in, and restart the engine since the DMA is
	 * still 'running'.
	 */
	if (fault_num == INT_DMATLB_MISS ||
	    fault_num == INT_DMATLB_ACCESS ||
	    fault_num == INT_DMATLB_MISS_DWNCL ||
	    fault_num == INT_DMATLB_ACCESS_DWNCL) {
		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
		       SPR_DMA_STATUS__BUSY_MASK)
			;
	}
#endif

	/* Validate fault num and decide if this is a first-time page fault. */
	switch (fault_num) {
	case INT_ITLB_MISS:
	case INT_DTLB_MISS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_MISS:
	case INT_DMATLB_MISS_DWNCL:
#endif
		is_page_fault = 1;
		break;

	case INT_DTLB_ACCESS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_ACCESS:
	case INT_DMATLB_ACCESS_DWNCL:
#endif
		is_page_fault = 0;
		break;

	default:
		panic("Bad fault number %d in do_page_fault", fault_num);
	}

#if CHIP_HAS_TILE_DMA()
	if (!user_mode(regs)) {
		struct async_tlb *async;
		switch (fault_num) {
#if CHIP_HAS_TILE_DMA()
		case INT_DMATLB_MISS:
		case INT_DMATLB_ACCESS:
		case INT_DMATLB_MISS_DWNCL:
		case INT_DMATLB_ACCESS_DWNCL:
			async = &current->thread.dma_async_tlb;
			break;
#endif
		default:
			async = NULL;
		}
		if (async) {

			/*
			 * No vmalloc check required, so we can allow
			 * interrupts immediately at this point.
			 */
			local_irq_enable();

			set_thread_flag(TIF_ASYNC_TLB);
			if (async->fault_num != 0) {
				panic("Second async fault %d;"
				      " old fault was %d (%#lx/%ld)",
				      fault_num, async->fault_num,
				      address, write);
			}
			BUG_ON(fault_num == 0);
			async->fault_num = fault_num;
			async->is_fault = is_page_fault;
			async->is_write = write;
			async->address = address;
			return;
		}
	}
#endif

	handle_page_fault(regs, fault_num, is_page_fault, address, write);
}
Пример #12
0
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, temp, success = -1;
	unsigned int count_entries = 0, interim_count = 0;
	void *temp_buf;
	uint16_t support_list = 0;
	struct dci_notification_tbl notify_params;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process pkt_params;
		struct bindpkt_params *params;
		struct bindpkt_params *head_params;
		if (copy_from_user(&pkt_params, (void *)ioarg,
				sizeof(struct bindpkt_params_per_process))) {
			return -EFAULT;
		}
		if ((UINT32_MAX/sizeof(struct bindpkt_params)) <
					pkt_params.count) {
			pr_alert("diag: integer overflow while multiply\n");
			return -EFAULT;
		}
		params = kzalloc(pkt_params.count*sizeof(
				struct bindpkt_params), GFP_KERNEL);
		if (!params) {
			pr_alert("diag: unable to alloc memory\n");
			return -ENOMEM;
		} else
			head_params = params;

		if (copy_from_user(params, pkt_params.params,
			pkt_params.count*sizeof(struct bindpkt_params))) {
			kfree(head_params);
			return -EFAULT;
		}
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, params, &success,
							&count_entries);
				if (pkt_params.count > count_entries) {
					params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			if (pkt_params.count >= count_entries) {
				interim_count = pkt_params.count -
							count_entries;
			} else {
				pr_alert("diag: error in params count\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			if (UINT32_MAX - diag_max_reg >=
						interim_count) {
				diag_max_reg += interim_count;
			} else {
				pr_alert("diag: Integer overflow\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			if (UINT32_MAX/sizeof(struct diag_master_table) <
								diag_max_reg) {
				pr_alert("diag: integer overflow\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				pr_alert("diag: Insufficient memory for reg.\n");
				mutex_unlock(&driver->diagchar_mutex);

				if (pkt_params.count >= count_entries) {
					interim_count = pkt_params.count -
								count_entries;
				} else {
					pr_alert("diag: params count error\n");
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return -EFAULT;
				}
				if (diag_max_reg >= interim_count) {
					diag_max_reg -= interim_count;
				} else {
					pr_alert("diag: Integer underflow\n");
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return -EFAULT;
				}
				kfree(head_params);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, params, &success,
							&count_entries);
				if (pkt_params.count > count_entries) {
					params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return success;
				}
			}
			kfree(head_params);
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			kfree(head_params);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params delay_params;
		uint16_t interim_rsp_id;
		int interim_size;
		if (copy_from_user(&delay_params, (void *)ioarg,
					sizeof(struct diagpkt_delay_params)))
			return -EFAULT;
		if ((delay_params.rsp_ptr) &&
			(delay_params.size == sizeof(delayed_rsp_id)) &&
				(delay_params.num_bytes_ptr)) {
			interim_rsp_id = DIAGPKT_NEXT_DELAYED_RSP_ID(
						delayed_rsp_id);
			if (copy_to_user((void *)delay_params.rsp_ptr,
					&interim_rsp_id, sizeof(uint16_t)))
				return -EFAULT;
			interim_size = sizeof(delayed_rsp_id);
			if (copy_to_user((void *)delay_params.num_bytes_ptr,
						&interim_size, sizeof(int)))
				return -EFAULT;
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
		if (driver->dci_state == DIAG_DCI_NO_REG)
			return DIAG_DCI_NO_REG;
		if (driver->num_dci_client >= MAX_DCI_CLIENT)
			return DIAG_DCI_NO_REG;
		if (copy_from_user(&notify_params, (void *)ioarg,
				sizeof(struct dci_notification_tbl)))
			return -EFAULT;
		mutex_lock(&driver->dci_mutex);
		driver->num_dci_client++;
		pr_debug("diag: id = %d\n", driver->dci_client_id);
		driver->dci_client_id++;
		for (i = 0; i < MAX_DCI_CLIENT; i++) {
			if (driver->dci_notify_tbl[i].client == NULL) {
				driver->dci_notify_tbl[i].client = current;
				driver->dci_notify_tbl[i].list =
							 notify_params.list;
				driver->dci_notify_tbl[i].signal_type =
					 notify_params.signal_type;
				break;
			}
		}
		mutex_unlock(&driver->dci_mutex);
		return driver->dci_client_id;
	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
		success = -1;
		/* Delete this process from DCI table */
		mutex_lock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++) {
			if (driver->dci_tbl[i].pid == current->tgid) {
				pr_debug("diag: delete %d\n", current->tgid);
				driver->dci_tbl[i].pid = 0;
				success = i;
			}
		}
		for (i = 0; i < MAX_DCI_CLIENT; i++) {
			if (driver->dci_notify_tbl[i].client == current) {
				driver->dci_notify_tbl[i].client = NULL;
				break;
			}
		}
		/* if any registrations were deleted successfully OR a valid
		   client_id was sent in DEINIT call , then its DCI client */
		if (success >= 0 || ioarg)
			driver->num_dci_client--;
		driver->num_dci_client--;
		mutex_unlock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++)
			if (driver->dci_tbl[i].pid != 0)
				pr_debug("diag: PID = %d, UID = %d, tag = %d\n",
	driver->dci_tbl[i].pid, driver->dci_tbl[i].uid, driver->dci_tbl[i].tag);
		pr_debug("diag: complete deleting registrations\n");
		return success;
	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
		if (driver->ch_dci)
			support_list = support_list | DIAG_CON_MPSS;
		if (copy_to_user((void *)ioarg, &support_list,
							sizeof(uint16_t)))
			return -EFAULT;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 1;
		}
		if (driver->logging_mode == UART_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
			driver->in_busy_wcnss_1 = 1;
			driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
			diag_clear_hsic_tbl();
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE) {
			diagfwd_disconnect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;

			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == MEMORY_DEVICE_MODE &&
				 driver->logging_mode == USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diag_clear_hsic_tbl();
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		}
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
Пример #13
0
/**
 * pretimeout_noop - No operation on watchdog pretimeout event
 * @wdd - watchdog_device
 *
 * This function prints a message about pretimeout to kernel log.
 */
static void pretimeout_noop(struct watchdog_device *wdd)
{
	pr_alert("watchdog%d: pretimeout event\n", wdd->id);
}
Пример #14
0
static void __cpuinit check_tempk(struct work_struct *work)
{
	unsigned int new_freq;
	struct tsens_device tsens_dev;
	long temp = 0;
	int ret = 0;
	
	tsens_dev.sensor_num = kmsm_thermal_info.sensor_id;
	ret = tsens_get_temp(&tsens_dev, &temp);
	if (ret) {
		pr_debug("%s: Unable to read TSENS sensor %d\n",
				KBUILD_MODNAME, tsens_dev.sensor_num);
		goto reschedule;
	}
	//pr_alert("CHECK TEMP %lu-%d-%d\n", temp, kmsm_thermal_info.temp_limit_degC_start, kmsm_thermal_info.temp_limit_degC_stop);
	kmsm_thermal_info.current_temp = temp;
	
	if (temp >= kmsm_thermal_info.temp_limit_degC_start)
	{
		unsigned int i;
		if (!kmsm_thermal_info.isthrottling)
		{
			//prev_freq = cpufreq_get(0);
			thermal_get_freq_table();
			pr_alert("START KTHROTTLING - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
		}
		kmsm_thermal_info.isthrottling = 1;
		//policy = cpufreq_cpu_get(0);
		//__cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
		limit_idx -= kmsm_thermal_info.freq_steps_while_throttling;
		if (limit_idx < limit_idx_low)
			limit_idx = limit_idx_low;
		for (i = 0; i < num_online_cpus(); i++)
		{
			//pr_alert("KTHROTTLING LOOP - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
			if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
			{
				//pr_alert("KTHROTTLING LOOP IN IF - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
				//policy = NULL;
				//policy = cpufreq_cpu_get(i);
				//if (policy != NULL)
				//	__cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
	}
	else if (kmsm_thermal_info.isthrottling && temp > kmsm_thermal_info.temp_limit_degC_stop && temp < kmsm_thermal_info.temp_limit_degC_start)
	{
		unsigned int i;
		for (i = 0; i < num_online_cpus(); i++)
		{
			if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
			{
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
	}
	else if (kmsm_thermal_info.isthrottling && temp <= kmsm_thermal_info.temp_limit_degC_stop)
	{
		unsigned int i;
		bool stopThrottle = false;
		//policy = cpufreq_cpu_get(0);
		//if (prev_freq > 0)
		//	__cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
		limit_idx += kmsm_thermal_info.freq_steps_while_throttling;
		if (limit_idx >= limit_idx_high)
		{
			limit_idx = limit_idx_high;
			kmsm_thermal_info.isthrottling = 0;
			stopThrottle = true;
			pr_alert("STOP KTHROTTLING - current temp = %lu\n", temp);
		}
		for (i = 0; i < num_online_cpus(); i++)
		{
			if (cpu_online(i))
			{
				//policy = NULL;
				//policy = cpufreq_cpu_get(i);
				//if (prev_freq > 0 && policy != NULL)
				//	__cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
				//do_thermal(i, prev_freq);
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
		if (stopThrottle)
			do_kthermal(0, 0);
	}

reschedule:
	schedule_delayed_work_on(0, &check_temp_workk,
			msecs_to_jiffies(kmsm_thermal_info.poll_speed));
}
Пример #15
0
static void an30259a_set_led_blink(enum an30259a_led_enum led,
					unsigned int delay_on_time,
					unsigned int delay_off_time,
					u8 brightness)
{
	struct i2c_client *client;
	client = b_client;

	if (brightness == LED_OFF) {
		leds_on(led, false, false, brightness);
		return;
	}

	if (brightness > LED_MAX_CURRENT)
		brightness = LED_MAX_CURRENT;

	if (led == LED_R)
		LED_DYNAMIC_CURRENT = LED_R_CURRENT;
	else if (led == LED_G)
		LED_DYNAMIC_CURRENT = LED_G_CURRENT;
	else if (led == LED_B)
		LED_DYNAMIC_CURRENT = LED_B_CURRENT;

	/* In user case, LED current is restricted */
	if (led_intensity == 0 || led_intensity == 40) {	// if stock intesity is used (see LED_x_CURRENT = 0x28)
		brightness = (brightness * LED_DYNAMIC_CURRENT) / LED_MAX_CURRENT;
	}
	else if (led_intensity != 0) {	// adapt current to led_intensity
		brightness = (brightness * led_intensity) / LED_MAX_CURRENT;
	}

	if (led_enable_fade_charging == 1)
	{
		if (led_time_on)
			delay_on_time = led_time_on;
		if (led_time_off)
			delay_off_time = led_time_off;
	}
	
	if (delay_on_time > SLPTT_MAX_VALUE)
		delay_on_time = SLPTT_MAX_VALUE;

	if (delay_off_time > SLPTT_MAX_VALUE)
		delay_off_time = SLPTT_MAX_VALUE;

	if (delay_off_time == LED_OFF) {
		leds_on(led, true, false, brightness);
		if (brightness == LED_OFF)
			leds_on(led, false, false, brightness);
		return;
	} else
		leds_on(led, true, true, brightness);

	if (led_time_on)
	{
		pr_alert("LED OVER-RIDE - DELAY_ON_Orig=%d, DELAY_OFF_Orig=%d, DELAY_ON_New=%d, DELAY_OFF_New=%d", delay_on_time, delay_off_time, led_time_on, led_time_off);
		delay_on_time = led_time_on;
	}
	if (led_time_off)
	{
		pr_alert("LED OVER-RIDE - DELAY_ON_Orig=%d, DELAY_OFF_Orig=%d, DELAY_ON_New=%d, DELAY_OFF_New=%d", delay_on_time, delay_off_time, led_time_on, led_time_off);
		delay_off_time = led_time_off;
	}

	if (led_enable_fade == 1) {
		leds_set_slope_mode(client, led, 0, 30, 15, 0,
			(delay_on_time + AN30259A_TIME_UNIT - 1) /
			AN30259A_TIME_UNIT,
			(delay_off_time + AN30259A_TIME_UNIT - 1) /
			AN30259A_TIME_UNIT,
			led_step_speed1, led_step_speed2, led_step_speed3, led_step_speed4);
	}
	else {
		leds_set_slope_mode(client, led, 0, 15, 15, 0,
			(delay_on_time + AN30259A_TIME_UNIT - 1) /
			AN30259A_TIME_UNIT,
			(delay_off_time + AN30259A_TIME_UNIT - 1) /
			AN30259A_TIME_UNIT,
			0, 0, 0, 0);
	}
}
Пример #16
0
static int diagchar_close(struct inode *inode, struct file *file)
{
	int i = 0;
	struct diagchar_priv *diagpriv_data = file->private_data;

	pr_debug("diag: process exit %s\n", current->comm);
	if (!(file->private_data)) {
		pr_alert("diag: Invalid file pointer");
		return -ENOMEM;
	}
	/* clean up any DCI registrations, if this is a DCI client
	* This will specially help in case of ungraceful exit of any DCI client
	* This call will remove any pending registrations of such client
	*/
	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
		if (driver->dci_client_tbl[i].client &&
			driver->dci_client_tbl[i].client->tgid ==
							 current->tgid) {
			diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
			break;
		}
	}
	/* If the exiting process is the socket process */
	if (driver->socket_process &&
		(driver->socket_process->tgid == current->tgid)) {
		driver->socket_process = NULL;
	}
	if (driver->callback_process &&
		(driver->callback_process->tgid == current->tgid)) {
		driver->callback_process = NULL;
	}

#ifdef CONFIG_DIAG_OVER_USB
	/* If the SD logging process exits, change logging to USB mode */
	if (driver->logging_process_id == current->tgid) {
		driver->logging_mode = USB_MODE;
		diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
		diag_clear_hsic_tbl();
		diagfwd_cancel_hsic();
		diagfwd_connect_bridge(0);
#endif
	}
#endif /* DIAG over USB */
	/* Delete the pkt response table entry for the exiting process */
	for (i = 0; i < diag_max_reg; i++)
			if (driver->table[i].process_id == current->tgid)
					driver->table[i].process_id = 0;

	if (driver) {
		mutex_lock(&driver->diagchar_mutex);
		driver->ref_count--;
		/* On Client exit, try to destroy all 3 pools */
		diagmem_exit(driver, POOL_TYPE_COPY);
		diagmem_exit(driver, POOL_TYPE_HDLC);
		diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT);
		for (i = 0; i < driver->num_clients; i++) {
			if (NULL != diagpriv_data && diagpriv_data->pid ==
				 driver->client_map[i].pid) {
				driver->client_map[i].pid = 0;
				kfree(diagpriv_data);
				diagpriv_data = NULL;
				break;
			}
		}
		mutex_unlock(&driver->diagchar_mutex);
		return 0;
	}
	return -ENOMEM;
}
Пример #17
0
long fdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	pr_alert("fdev_ioctl");
	return 0;
}
Пример #18
0
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;
	void *temp_buf;
	uint16_t support_list = 0;
	struct diag_dci_client_tbl *params =
		kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
	struct diag_dci_health_stats stats;
	int status;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			diag_max_reg += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				diag_max_reg -= pkt_params->count -
							 count_entries;
				pr_alert("diag: Insufficient memory for reg.");
				mutex_unlock(&driver->diagchar_mutex);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
		if (driver->dci_state == DIAG_DCI_NO_REG)
			return DIAG_DCI_NO_REG;
		if (driver->num_dci_client >= MAX_DCI_CLIENTS)
			return DIAG_DCI_NO_REG;
		if (copy_from_user(params, (void *)ioarg,
				 sizeof(struct diag_dci_client_tbl)))
			return -EFAULT;
		mutex_lock(&driver->dci_mutex);
		if (!(driver->num_dci_client))
			driver->in_busy_dci = 0;
		driver->num_dci_client++;
		pr_debug("diag: id = %d\n", driver->dci_client_id);
		driver->dci_client_id++;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			if (driver->dci_client_tbl[i].client == NULL) {
				driver->dci_client_tbl[i].client = current;
				driver->dci_client_tbl[i].list =
							 params->list;
				driver->dci_client_tbl[i].signal_type =
					 params->signal_type;
				create_dci_log_mask_tbl(driver->
					dci_client_tbl[i].dci_log_mask);
				create_dci_event_mask_tbl(driver->
					dci_client_tbl[i].dci_event_mask);
				driver->dci_client_tbl[i].data_len = 0;
				driver->dci_client_tbl[i].dci_data =
					 kzalloc(IN_BUF_SIZE, GFP_KERNEL);
				driver->dci_client_tbl[i].total_capacity =
								 IN_BUF_SIZE;
				driver->dci_client_tbl[i].dropped_logs = 0;
				driver->dci_client_tbl[i].dropped_events = 0;
				driver->dci_client_tbl[i].received_logs = 0;
				driver->dci_client_tbl[i].received_events = 0;
				break;
			}
		}
		mutex_unlock(&driver->dci_mutex);
		return driver->dci_client_id;
	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
		success = -1;
		/* Delete this process from DCI table */
		mutex_lock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++)
			if (driver->req_tracking_tbl[i].pid == current->tgid)
				driver->req_tracking_tbl[i].pid = 0;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			if (driver->dci_client_tbl[i].client &&
			driver->dci_client_tbl[i].client->tgid ==
							 current->tgid) {
				driver->dci_client_tbl[i].client = NULL;
				success = i;
				break;
			}
		}
		if (success >= 0)
			driver->num_dci_client--;
		mutex_unlock(&driver->dci_mutex);
		return success;
	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
		if (driver->ch_dci)
			support_list = support_list | DIAG_CON_MPSS;
		*(uint16_t *)ioarg = support_list;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_DCI_HEALTH_STATS) {
		if (copy_from_user(&stats, (void *)ioarg,
				 sizeof(struct diag_dci_health_stats)))
			return -EFAULT;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			params = &(driver->dci_client_tbl[i]);
			if (params->client &&
				params->client->tgid == current->tgid) {
				stats.dropped_logs = params->dropped_logs;
				stats.dropped_events = params->dropped_events;
				stats.received_logs = params->received_logs;
				stats.received_events = params->received_events;
				if (stats.reset_status) {
					params->dropped_logs = 0;
					params->dropped_events = 0;
					params->received_logs = 0;
					params->received_events = 0;
				}
				break;
			}
		}
		if (copy_to_user((void *)ioarg, &stats,
				   sizeof(struct diag_dci_health_stats)))
			return -EFAULT;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (temp == driver->logging_mode) {
			mutex_unlock(&driver->diagchar_mutex);
			pr_alert("diag: forbidden logging change requested\n");
			return 0;
		}
		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 1;
			if (driver->socket_process) {
				/*
				 * Notify the socket logging process that we
				 * are switching to MEMORY_DEVICE_MODE
				 */
				status = send_sig(SIGCONT,
					 driver->socket_process, 0);
				if (status) {
					pr_err("diag: %s, Error notifying ",
						__func__);
					pr_err("socket process, status: %d\n",
						status);
				}
			}
		}
		if (driver->logging_mode == SOCKET_MODE)
			driver->socket_process = current;
		if (driver->logging_mode == CALLBACK_MODE)
			driver->callback_process = current;
		if (driver->logging_mode == UART_MODE ||
			driver->logging_mode == SOCKET_MODE ||
			driver->logging_mode == CALLBACK_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_lpass_1 = 1;
			driver->in_busy_lpass_2 = 1;
			driver->in_busy_wcnss_1 = 1;
			driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
			diag_clear_hsic_tbl();
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_lpass_1 = 0;
			driver->in_busy_lpass_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chlpass)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_lpass_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE) {
			diagfwd_disconnect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_lpass_1 = 0;
			driver->in_busy_lpass_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;

			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chlpass)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_lpass_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == MEMORY_DEVICE_MODE &&
				 driver->logging_mode == USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diag_clear_hsic_tbl();
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		}
#endif /* DIAG over USB */
		success = 1;
	} else if (iocmd == DIAG_IOCTL_REMOTE_DEV) {
		uint16_t remote_dev = diag_get_remote_device_mask();

		if (copy_to_user((void *)ioarg, &remote_dev, sizeof(uint16_t)))
			success = -EFAULT;
		else
			success = 1;
	}

	return success;
}
Пример #19
0
loff_t fdev_llseek(struct file *filp, loff_t off, int whence)
{
	pr_alert("fdev_llseek");

	return 0;
}
void __init mt_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
/* FIXME: need porting */
#if 1
	struct tag *cmdline_tag = NULL;
	struct tag *reserved_mem_bank_tag = NULL;
	unsigned long node = (unsigned long)tags;
	unsigned long max_limit_size = CONFIG_MAX_DRAM_SIZE_SUPPORT - RESERVED_MEM_MODEM;
	unsigned long avail_dram = 0;
	unsigned long bl_mem_sz = 0;
	/* for modem fixup */
	unsigned int nr_modem = 0, i = 0;
	unsigned int max_avail_addr = 0;
	unsigned int modem_start_addr = 0;
	unsigned int hole_start_addr = 0;
	unsigned int hole_size = 0;
	unsigned int *modem_size_list = 0;

	node = of_scan_flat_dt(early_init_dt_get_chosen, NULL);

	cmdline_tag = of_get_flat_dt_prop(node, "atag,cmdline", NULL);
#if defined(CONFIG_MTK_FB)
	if (cmdline_tag)
		cmdline_filter(cmdline_tag, (char *)&temp_command_line);
#endif

	tags = of_get_flat_dt_prop(node, "atag,boot", NULL);
	if (tags)
		g_boot_mode = tags->u.boot.bootmode;

	tags = of_get_flat_dt_prop(node, "atag,meta", NULL);
	if (tags) {
		g_meta_com_type = tags->u.meta_com.meta_com_type;
		g_meta_com_id = tags->u.meta_com.meta_com_id;
	}

	tags = of_get_flat_dt_prop(node, "atag,videolfb", NULL);
	if (tags)
		parse_tag_videofb_fixup(tags);

	tags = of_get_flat_dt_prop(node, "atag,devinfo", NULL);
	if (tags)
		parse_tag_devinfo_data_fixup(tags);

	tags = of_get_flat_dt_prop(node, "atag,mem", NULL);
	if (tags) {
		for (; tags->hdr.tag == ATAG_MEM; tags = tag_next(tags)) {
			bl_mem_sz += tags->u.mem.size;
			/*
			 * Modify the memory tag to limit available memory to
			 * CONFIG_MAX_DRAM_SIZE_SUPPORT
			 */
			if (max_limit_size > 0) {
				if (max_limit_size >= tags->u.mem.size) {
					max_limit_size -= tags->u.mem.size;
					avail_dram += tags->u.mem.size;
				} else {
					tags->u.mem.size = max_limit_size;
					avail_dram += max_limit_size;
					max_limit_size = 0;
				}
				/* By Keene: */
				/* remove this check to avoid calcuate pmem size before we know all dram size */
				/* Assuming the minimum size of memory bank is 256MB */
				/* if (tags->u.mem.size >= (TOTAL_RESERVED_MEM_SIZE)) { */
				reserved_mem_bank_tag = tags;
				/* } */
			} else {
				tags->u.mem.size = 0;
			}
		}
	}

	kernel_mem_sz = avail_dram;	/* keep the DRAM size (limited by CONFIG_MAX_DRAM_SIZE_SUPPORT) */
	/*
	 * If the maximum memory size configured in kernel
	 * is smaller than the actual size (passed from BL)
	 * Still limit the maximum memory size but use the FB
	 * initialized by BL
	 */
	if (bl_mem_sz >= (CONFIG_MAX_DRAM_SIZE_SUPPORT - RESERVED_MEM_MODEM))
		use_bl_fb++;

	/*
	 * Setup PMEM size
	 */
	/*
	   if (avail_dram < 0x10000000)
	   RESERVED_MEM_SIZE_FOR_PMEM = 0x1700000;
	   else */
	RESERVED_MEM_SIZE_FOR_PMEM = 0;

	/* Reserve memory in the last bank */
	if (reserved_mem_bank_tag) {
		reserved_mem_bank_tag->u.mem.size -= ((__u32) TOTAL_RESERVED_MEM_SIZE);
		mi->bank[mi->nr_banks - 1].size -= ((__u32) TOTAL_RESERVED_MEM_SIZE);
		pmem_start = reserved_mem_bank_tag->u.mem.start + reserved_mem_bank_tag->u.mem.size;
	} else			/* we should always have reserved memory */
		BUG();

	pr_alert("[PHY layout]avaiable DRAM size (lk) = 0x%08lx\n[PHY layout]avaiable DRAM size = 0x%08lx\n[PHY layout]FB       :   0x%08lx - 0x%08lx  (0x%08x)\n",
		 bl_mem_sz, kernel_mem_sz, FB_START, FB_START + FB_SIZE, FB_SIZE);
	if (PMEM_MM_SIZE) {
		pr_alert("[PHY layout]PMEM     :   0x%08lx - 0x%08lx  (0x%08x)\n",
			 PMEM_MM_START, PMEM_MM_START + PMEM_MM_SIZE, PMEM_MM_SIZE);
	}
	/*
	 * fixup memory tags for dual modem model
	 * assumptions:
	 * 1) modem start addresses should be 32MiB aligned
	 */
	nr_modem = get_nr_modem();
	modem_size_list = get_modem_size_list();

	for (i = 0; i < nr_modem; i++) {
		/* sanity test */
		if (modem_size_list[i]) {
			pr_alert("fixup for modem [%d], size = 0x%08x\n", i, modem_size_list[i]);
		} else {
			pr_alert("[Error]skip empty modem [%d]\n", i);
			continue;
		}
		pr_alert("reserved_mem_bank_tag start = 0x%08x, reserved_mem_bank_tag size = 0x%08x, TOTAL_RESERVED_MEM_SIZE = 0x%08x\n",
			 reserved_mem_bank_tag->u.mem.start,
			 reserved_mem_bank_tag->u.mem.size, TOTAL_RESERVED_MEM_SIZE);
		/* find out start address for modem */
		max_avail_addr = reserved_mem_bank_tag->u.mem.start +
		    reserved_mem_bank_tag->u.mem.size;
		modem_start_addr = round_down((max_avail_addr - modem_size_list[i]), 0x2000000);
		/* sanity test */
		if (modem_size_list[i] > reserved_mem_bank_tag->u.mem.size) {
			pr_alert("[Error]skip modem [%d] fixup: size too large: 0x%08x, reserved_mem_bank_tag->u.mem.size: 0x%08x\n",
				i, modem_size_list[i], reserved_mem_bank_tag->u.mem.size);
			continue;
		}
		if (modem_start_addr < reserved_mem_bank_tag->u.mem.start) {
			pr_alert("[Error]skip modem [%d] fixup: modem crosses memory bank boundary: 0x%08x, reserved_mem_bank_tag->u.mem.start: 0x%08x\n",
				 i, modem_start_addr, reserved_mem_bank_tag->u.mem.start);
			continue;
		}
		pr_alert("modem fixup sanity test pass\n");
		modem_start_addr_list[i] = modem_start_addr;
		hole_start_addr = modem_start_addr + modem_size_list[i];
		hole_size = max_avail_addr - hole_start_addr;
		pr_alert("max_avail_addr = 0x%08x, modem_start_addr_list[%d] = 0x%08x, hole_start_addr = 0x%08x, hole_size = 0x%08x\n",
			 max_avail_addr, i, modem_start_addr, hole_start_addr, hole_size);
		pr_alert("[PHY layout]MD       :   0x%08x - 0x%08x  (0x%08x)\n",
			 modem_start_addr, modem_start_addr + modem_size_list[i],
			 modem_size_list[i]);
		/* shrink reserved_mem_bank */
		reserved_mem_bank_tag->u.mem.size -= (max_avail_addr - modem_start_addr);
		pr_alert("reserved_mem_bank: start = 0x%08x, size = 0x%08x\n",
			 reserved_mem_bank_tag->u.mem.start, reserved_mem_bank_tag->u.mem.size);
#if 0
		/* setup a new memory tag */
		tags->hdr.tag = ATAG_MEM;
		tags->hdr.size = tag_size(tag_mem32);
		tags->u.mem.start = hole_start_addr;
		tags->u.mem.size = hole_size;
		/* do next tag */
		tags = tag_next(tags);
#endif
	}
/* tags->hdr.tag = ATAG_NONE; // mark the end of the tag list */
/* tags->hdr.size = 0; */


	if (cmdline_tag != NULL) {
#ifdef CONFIG_FIQ_DEBUGGER
		char *console_ptr;
		int uart_port;
#endif
		char *br_ptr;
		/* This function may modify ttyMT3 to ttyMT0 if needed */
		adjust_kernel_cmd_line_setting_for_console(cmdline_tag->u.cmdline.cmdline,
							   *cmdline);
#ifdef CONFIG_FIQ_DEBUGGER
		console_ptr = strstr(*cmdline, "ttyMT");
		if ((console_ptr) != 0) {
			uart_port = console_ptr[5] - '0';
			if (uart_port > 3)
				uart_port = -1;

			fiq_uart_fixup(uart_port);
		}
#endif

		/*FIXME mark for porting */
		cmdline_filter(cmdline_tag, *cmdline);
		br_ptr = strstr(*cmdline, "boot_reason=");
		if ((br_ptr) != 0) {
			/* get boot reason */
			g_boot_reason = br_ptr[12] - '0';
		}

		/* Use the default cmdline */
		/* memcpy((void*)cmdline_tag, */
		/* (void*)tag_next(cmdline_tag), */
		/* ATAG_NONE actual size */
		/* (uint32_t)(none_tag) - (uint32_t)(tag_next(cmdline_tag)) + 8); */
	}
#endif
}
/*
 * This routine is responsible for faulting in user pages.
 * It passes the work off to one of the appropriate routines.
 * It returns true if the fault was successfully handled.
 */
static int handle_page_fault(struct pt_regs *regs,
			     int fault_num,
			     int is_page_fault,
			     unsigned long address,
			     int write)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned long stack_offset;
	int fault;
	int si_code;
	int is_kernel_mode;
	pgd_t *pgd;

	/* on TILE, protection faults are always writes */
	if (!is_page_fault)
		write = 1;

	is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);

	tsk = validate_current();

	/*
	 * Check to see if we might be overwriting the stack, and bail
	 * out if so.  The page fault code is a relatively likely
	 * place to get trapped in an infinite regress, and once we
	 * overwrite the whole stack, it becomes very hard to recover.
	 */
	stack_offset = stack_pointer & (THREAD_SIZE-1);
	if (stack_offset < THREAD_SIZE / 8) {
		pr_alert("Potential stack overrun: sp %#lx\n",
		       stack_pointer);
		show_regs(regs);
		pr_alert("Killing current process %d/%s\n",
		       tsk->pid, tsk->comm);
		do_group_exit(SIGKILL);
	}

	/*
	 * Early on, we need to check for migrating PTE entries;
	 * see homecache.c.  If we find a migrating PTE, we wait until
	 * the backing page claims to be done migrating, then we proceed.
	 * For kernel PTEs, we rewrite the PTE and return and retry.
	 * Otherwise, we treat the fault like a normal "no PTE" fault,
	 * rather than trying to patch up the existing PTE.
	 */
	pgd = get_current_pgd();
	if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
				 is_kernel_mode, write))
		return 1;

	si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * and that the fault was not a protection fault.
	 */
	if (unlikely(address >= TASK_SIZE &&
		     !is_arch_mappable_range(address, 0))) {
		if (is_kernel_mode && is_page_fault &&
		    vmalloc_fault(pgd, address) >= 0)
			return 1;
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
		 * fault we could otherwise deadlock.
		 */
		mm = NULL;  /* happy compiler */
		vma = NULL;
		goto bad_area_nosemaphore;
	}

	/*
	 * If we're trying to touch user-space addresses, we must
	 * be either at PL0, or else with interrupts enabled in the
	 * kernel, so either way we can re-enable interrupts here
	 * unless we are doing atomic access to user space with
	 * interrupts disabled.
	 */
	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
		local_irq_enable();

	mm = tsk->mm;

	/*
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
	 */
	if (in_atomic() || !mm) {
		vma = NULL;  /* happy compiler */
		goto bad_area_nosemaphore;
	}

	/*
	 * When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
	 * erroneous fault occurring in a code path which already holds mmap_sem
	 * we will deadlock attempting to validate the fault against the
	 * address space.  Luckily the kernel only validly references user
	 * space from well defined areas of code, which are listed in the
	 * exceptions table.
	 *
	 * As the vast majority of faults will be valid we will only perform
	 * the source reference check when there is a possibility of a deadlock.
	 * Attempt to lock the address space, if we cannot we then validate the
	 * source.  If this is invalid we can skip the address space check,
	 * thus avoiding the deadlock.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (is_kernel_mode &&
		    !search_exception_tables(regs->pc)) {
			vma = NULL;  /* happy compiler */
			goto bad_area_nosemaphore;
		}
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (regs->sp < PAGE_OFFSET) {
		/*
		 * accessing the stack below sp is always a bug.
		 */
		if (address < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;

/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	si_code = SEGV_ACCERR;
	if (fault_num == INT_ITLB_MISS) {
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
	} else if (write) {
#ifdef TEST_VERIFY_AREA
		if (!is_page_fault && regs->cs == KERNEL_CS)
			pr_err("WP fault at "REGFMT"\n", regs->eip);
#endif
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (!is_page_fault || !(vma->vm_flags & VM_READ))
			goto bad_area;
	}

 survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, address, write);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;

#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
	/*
	 * If this was an asynchronous fault,
	 * restart the appropriate engine.
	 */
	switch (fault_num) {
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_MISS:
	case INT_DMATLB_MISS_DWNCL:
	case INT_DMATLB_ACCESS:
	case INT_DMATLB_ACCESS_DWNCL:
		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
		break;
#endif
#if CHIP_HAS_SN_PROC()
	case INT_SNITLB_MISS:
	case INT_SNITLB_MISS_DWNCL:
		__insn_mtspr(SPR_SNCTL,
			     __insn_mfspr(SPR_SNCTL) &
			     ~SPR_SNCTL__FRZPROC_MASK);
		break;
#endif
	}
#endif

	up_read(&mm->mmap_sem);
	return 1;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (!is_kernel_mode) {
		/*
		 * It's possible to have interrupts off here.
		 */
		local_irq_enable();

		force_sig_info_fault("segfault", SIGSEGV, si_code, address,
				     fault_num, tsk, regs);
		return 0;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs))
		return 0;

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */

	bust_spinlocks(1);

	/* FIXME: no lookup_address() yet */
#ifdef SUPPORT_LOOKUP_ADDRESS
	if (fault_num == INT_ITLB_MISS) {
		pte_t *pte = lookup_address(address);

		if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
			pr_crit("kernel tried to execute"
			       " non-executable page - exploit attempt?"
			       " (uid: %d)\n", current->uid);
	}
#endif
	if (address < PAGE_SIZE)
		pr_alert("Unable to handle kernel NULL pointer dereference\n");
	else
		pr_alert("Unable to handle kernel paging request\n");
	pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
		 address, regs->pc);

	show_regs(regs);

	if (unlikely(tsk->pid < 2)) {
		panic("Kernel page fault running %s!",
		      is_idle_task(tsk) ? "the idle task" : "init");
	}

	/*
	 * More FIXME: we should probably copy the i386 here and
	 * implement a generic die() routine.  Not today.
	 */
#ifdef SUPPORT_DIE
	die("Oops", regs);
#endif
	bust_spinlocks(1);

	do_group_exit(SIGKILL);

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_global_init(tsk)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	pr_alert("VM: killing process %s\n", tsk->comm);
	if (!is_kernel_mode)
		do_group_exit(SIGKILL);
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (is_kernel_mode)
		goto no_context;

	force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
			     fault_num, tsk, regs);
	return 0;
}
Пример #22
0
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
		struct page *page, struct page_owner *page_owner,
		depot_stack_handle_t handle)
{
	int ret;
	int pageblock_mt, page_mt;
	char *kbuf;
	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = PAGE_OWNER_STACK_DEPTH,
		.skip = 0
	};

	kbuf = kmalloc(count, GFP_KERNEL);
	if (!kbuf)
		return -ENOMEM;

	ret = snprintf(kbuf, count,
			"Page allocated via order %u, mask %#x(%pGg)\n",
			page_owner->order, page_owner->gfp_mask,
			&page_owner->gfp_mask);

	if (ret >= count)
		goto err;

	/* Print information relevant to grouping pages by mobility */
	pageblock_mt = get_pageblock_migratetype(page);
	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
	ret += snprintf(kbuf + ret, count - ret,
			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
			pfn,
			migratetype_names[page_mt],
			pfn >> pageblock_order,
			migratetype_names[pageblock_mt],
			page->flags, &page->flags);

	if (ret >= count)
		goto err;

	depot_fetch_stack(handle, &trace);
	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
	if (ret >= count)
		goto err;

	if (page_owner->last_migrate_reason != -1) {
		ret += snprintf(kbuf + ret, count - ret,
			"Page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_owner->last_migrate_reason]);
		if (ret >= count)
			goto err;
	}

	ret += snprintf(kbuf + ret, count - ret, "\n");
	if (ret >= count)
		goto err;

	if (copy_to_user(buf, kbuf, ret))
		ret = -EFAULT;

	kfree(kbuf);
	return ret;

err:
	kfree(kbuf);
	return -ENOMEM;
}

void __dump_page_owner(struct page *page)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	struct page_owner *page_owner;
	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = PAGE_OWNER_STACK_DEPTH,
		.skip = 0
	};
	depot_stack_handle_t handle;
	gfp_t gfp_mask;
	int mt;

	if (unlikely(!page_ext)) {
		pr_alert("There is not page extension available.\n");
		return;
	}

	page_owner = get_page_owner(page_ext);
	gfp_mask = page_owner->gfp_mask;
	mt = gfpflags_to_migratetype(gfp_mask);

	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

	handle = READ_ONCE(page_owner->handle);
	if (!handle) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

	depot_fetch_stack(handle, &trace);
	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
	print_stack_trace(&trace, 0);

	if (page_owner->last_migrate_reason != -1)
		pr_alert("page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_owner->last_migrate_reason]);
}

static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	unsigned long pfn;
	struct page *page;
	struct page_ext *page_ext;
	struct page_owner *page_owner;
	depot_stack_handle_t handle;

	if (!static_branch_unlikely(&page_owner_inited))
		return -EINVAL;

	page = NULL;
	pfn = min_low_pfn + *ppos;

	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
		pfn++;

	drain_all_pages(NULL);

	/* Find an allocated page */
	for (; pfn < max_pfn; pfn++) {
		/*
		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
		 * validate the area as existing, skip it if not
		 */
		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
			pfn += MAX_ORDER_NR_PAGES - 1;
			continue;
		}

		/* Check for holes within a MAX_ORDER area */
		if (!pfn_valid_within(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			if (freepage_order < MAX_ORDER)
				pfn += (1UL << freepage_order) - 1;
			continue;
		}

		page_ext = lookup_page_ext(page);
		if (unlikely(!page_ext))
			continue;

		/*
		 * Some pages could be missed by concurrent allocation or free,
		 * because we don't hold the zone lock.
		 */
		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
			continue;

		page_owner = get_page_owner(page_ext);

		/*
		 * Access to page_ext->handle isn't synchronous so we should
		 * be careful to access it.
		 */
		handle = READ_ONCE(page_owner->handle);
		if (!handle)
			continue;

		/* Record the next PFN to read in the file offset */
		*ppos = (pfn - min_low_pfn) + 1;

		return print_page_owner(buf, count, pfn, page,
				page_owner, handle);
	}

	return 0;
}

static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
	unsigned long pfn = zone->zone_start_pfn;
	unsigned long end_pfn = zone_end_pfn(zone);
	unsigned long count = 0;

	/*
	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
	 * a zone boundary, it will be double counted between zones. This does
	 * not matter as the mixed block count will still be correct
	 */
	for (; pfn < end_pfn; ) {
		unsigned long block_end_pfn;

		if (!pfn_valid(pfn)) {
			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
			continue;
		}

		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		block_end_pfn = min(block_end_pfn, end_pfn);

		for (; pfn < block_end_pfn; pfn++) {
			struct page *page;
			struct page_ext *page_ext;

			if (!pfn_valid_within(pfn))
				continue;

			page = pfn_to_page(pfn);

			if (page_zone(page) != zone)
				continue;

			/*
			 * To avoid having to grab zone->lock, be a little
			 * careful when reading buddy page order. The only
			 * danger is that we skip too much and potentially miss
			 * some early allocated pages, which is better than
			 * heavy lock contention.
			 */
			if (PageBuddy(page)) {
				unsigned long order = page_order_unsafe(page);

				if (order > 0 && order < MAX_ORDER)
					pfn += (1UL << order) - 1;
				continue;
			}

			if (PageReserved(page))
				continue;

			page_ext = lookup_page_ext(page);
			if (unlikely(!page_ext))
				continue;

			/* Maybe overlapping zone */
			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
				continue;

			/* Found early allocated page */
			__set_page_owner_handle(page_ext, early_handle, 0, 0);
			count++;
		}
		cond_resched();
	}

	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
		pgdat->node_id, zone->name, count);
}
/* Process the data read from the smd control channel */
int diag_process_smd_cntl_read_data(struct diag_smd_info *smd_info, void *buf,
								int total_recd)
{
	int data_len = 0, type = -1, count_bytes = 0, j, flag = 0;
	struct bindpkt_params_per_process *pkt_params =
		kzalloc(sizeof(struct bindpkt_params_per_process), GFP_KERNEL);
	struct diag_ctrl_msg *msg;
	struct cmd_code_range *range;
	struct bindpkt_params *temp;

	if (pkt_params == NULL) {
		pr_alert("diag: In %s, Memory allocation failure\n",
			__func__);
		return 0;
	}

	if (!smd_info) {
		pr_err("diag: In %s, No smd info. Not able to read.\n",
			__func__);
		kfree(pkt_params);
		return 0;
	}

	while (count_bytes + HDR_SIZ <= total_recd) {
		type = *(uint32_t *)(buf);
		data_len = *(uint32_t *)(buf + 4);
		if (type < DIAG_CTRL_MSG_REG ||
				 type > DIAG_CTRL_MSG_LAST) {
			pr_alert("diag: In %s, Invalid Msg type %d proc %d",
				 __func__, type, smd_info->peripheral);
			break;
		}
		if (data_len < 0 || data_len > total_recd) {
			pr_alert("diag: In %s, Invalid data len %d, total_recd: %d, proc %d",
				 __func__, data_len, total_recd,
				 smd_info->peripheral);
			break;
		}
		count_bytes = count_bytes+HDR_SIZ+data_len;
		if (type == DIAG_CTRL_MSG_REG && total_recd >= count_bytes) {
			msg = buf+HDR_SIZ;
			range = buf+HDR_SIZ+
					sizeof(struct diag_ctrl_msg);
			if (msg->count_entries == 0) {
				pr_debug("diag: In %s, received reg tbl with no entries\n",
								__func__);
				buf = buf + HDR_SIZ + data_len;
				continue;
			}
			pkt_params->count = msg->count_entries;
			pkt_params->params = kzalloc(pkt_params->count *
				sizeof(struct bindpkt_params), GFP_KERNEL);
			if (!pkt_params->params) {
				pr_alert("diag: In %s, Memory alloc fail for cmd_code: %d, subsys: %d\n",
						__func__, msg->cmd_code,
						msg->subsysid);
				buf = buf + HDR_SIZ + data_len;
				continue;
			}
			temp = pkt_params->params;
			for (j = 0; j < pkt_params->count; j++) {
				temp->cmd_code = msg->cmd_code;
				temp->subsys_id = msg->subsysid;
				temp->client_id = smd_info->peripheral;
				temp->proc_id = NON_APPS_PROC;
				temp->cmd_code_lo = range->cmd_code_lo;
				temp->cmd_code_hi = range->cmd_code_hi;
				range++;
				temp++;
			}
			flag = 1;
			/* peripheral undergoing SSR should not
			 * record new registration
			 */
			if (!(reg_dirty & smd_info->peripheral_mask))
				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
						(unsigned long)pkt_params);
			else
				pr_err("diag: drop reg proc %d\n",
						smd_info->peripheral);
			kfree(pkt_params->params);
		} else if (type == DIAG_CTRL_MSG_FEATURE &&
				total_recd >= count_bytes) {
			uint8_t feature_mask = 0;
			int feature_mask_len = *(int *)(buf+8);
			if (feature_mask_len > 0) {
				int periph = smd_info->peripheral;
				driver->rcvd_feature_mask[smd_info->peripheral]
									= 1;
				feature_mask = *(uint8_t *)(buf+12);
				if (periph == MODEM_DATA)
					driver->log_on_demand_support =
						feature_mask &
					F_DIAG_LOG_ON_DEMAND_RSP_ON_MASTER;
				/*
				 * If apps supports separate cmd/rsp channels
				 * and the peripheral supports separate cmd/rsp
				 * channels
				 */
				if (driver->supports_separate_cmdrsp &&
					(feature_mask & F_DIAG_REQ_RSP_CHANNEL))
					driver->separate_cmdrsp[periph] =
							ENABLE_SEPARATE_CMDRSP;
				else
					driver->separate_cmdrsp[periph] =
							DISABLE_SEPARATE_CMDRSP;
				/*
				 * Check if apps supports hdlc encoding and the
				 * peripheral supports apps hdlc encoding
				 */
				process_hdlc_encoding_feature(smd_info,
								feature_mask);
				if (feature_mask_len > 1) {
					feature_mask = *(uint8_t *)(buf+13);
					process_stm_feature(smd_info,
								feature_mask);
				}
			}
			flag = 1;
		} else if (type != DIAG_CTRL_MSG_REG) {
			flag = 1;
		}
		buf = buf + HDR_SIZ + data_len;
	}
	kfree(pkt_params);

	return flag;
}
static void diag_update_msg_mask(int start, int end , uint8_t *buf)
{
	int found = 0, first, last, actual_last;
	uint8_t *actual_last_ptr;
	uint8_t *ptr = driver->msg_masks;
	uint8_t *ptr_buffer_start = &(*(driver->msg_masks));
	uint8_t *ptr_buffer_end = &(*(driver->msg_masks)) + MSG_MASK_SIZE;
	uint32_t copy_len = (end - start + 1) * sizeof(int);

	mutex_lock(&driver->diagchar_mutex);
	/* First SSID can be zero : So check that last is non-zero */
	while (*(uint32_t *)(ptr + 4)) {
		first = *(uint32_t *)ptr;
		ptr += 4;
		last = *(uint32_t *)ptr;
		ptr += 4;
		actual_last = *(uint32_t *)ptr;
		actual_last_ptr = ptr;
		ptr += 4;
		if (start >= first && start <= actual_last) {
			ptr += (start - first)*4;
			if (end > actual_last) {
				pr_info("diag: ssid range mismatch\n");
				actual_last = end;
				*(uint32_t *)(actual_last_ptr) = end;
			}
			if (actual_last-first >= MAX_SSID_PER_RANGE) {
				pr_err("diag: In %s, truncating ssid range, %d-%d to max allowed: %d",
						__func__, first, actual_last,
						MAX_SSID_PER_RANGE);
				copy_len = MAX_SSID_PER_RANGE;
				actual_last = first + MAX_SSID_PER_RANGE;
				*(uint32_t *)actual_last_ptr = actual_last;
			}
			if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end,
								copy_len)) {
				pr_debug("diag: update ssid start %d, end %d\n",
								 start, end);
				memcpy(ptr, buf, copy_len);
			} else
				pr_alert("diag: Not enough space MSG_MASK\n");
			found = 1;
			break;
		} else {
			ptr += MAX_SSID_PER_RANGE*4;
		}
	}
	/* Entry was not found - add new table */
	if (!found) {
		if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end,
				  8 + ((end - start) + 1)*4)) {
			memcpy(ptr, &(start) , 4);
			ptr += 4;
			memcpy(ptr, &(end), 4);
			ptr += 4;
			memcpy(ptr, &(end), 4); /* create actual_last entry */
			ptr += 4;
			pr_debug("diag: adding NEW ssid start %d, end %d\n",
								 start, end);
			memcpy(ptr, buf , ((end - start) + 1)*4);
		} else
			pr_alert("diag: Not enough buffer space for MSG_MASK\n");
	}
	driver->msg_status = DIAG_CTRL_MASK_VALID;
	mutex_unlock(&driver->diagchar_mutex);
	diag_print_mask_table();
}
static void diag_smd_cntl_send_req(int proc_num)
{
	int data_len = 0, type = -1, count_bytes = 0, j, r, flag = 0;
	struct bindpkt_params_per_process *pkt_params =
		 kzalloc(sizeof(struct bindpkt_params_per_process), GFP_KERNEL);
	struct diag_ctrl_msg *msg;
	struct cmd_code_range *range;
	struct bindpkt_params *temp;
	void *buf = NULL;
	smd_channel_t *smd_ch = NULL;

	if (pkt_params == NULL) {
		pr_alert("diag: Memory allocation failure\n");
		return;
	}

	if (proc_num == MODEM_PROC) {
		buf = driver->buf_in_cntl;
		smd_ch = driver->ch_cntl;
	} else if (proc_num == QDSP_PROC) {
		buf = driver->buf_in_qdsp_cntl;
		smd_ch = driver->chqdsp_cntl;
	} else if (proc_num == WCNSS_PROC) {
		buf = driver->buf_in_wcnss_cntl;
		smd_ch = driver->ch_wcnss_cntl;
	}

	if (!smd_ch || !buf) {
		kfree(pkt_params);
		return;
	}

	r = smd_read_avail(smd_ch);
	if (r > IN_BUF_SIZE) {
		if (r < MAX_IN_BUF_SIZE) {
			pr_err("diag: SMD CNTL sending pkt upto %d bytes", r);
			buf = krealloc(buf, r, GFP_KERNEL);
		} else {
			pr_err("diag: CNTL pkt > %d bytes", MAX_IN_BUF_SIZE);
			kfree(pkt_params);
			return;
		}
	}
	if (buf && r > 0) {
		smd_read(smd_ch, buf, r);
		while (count_bytes + HDR_SIZ <= r) {
			type = *(uint32_t *)(buf);
			data_len = *(uint32_t *)(buf + 4);
			if (type < DIAG_CTRL_MSG_REG ||
					 type > DIAG_CTRL_MSG_F3_MASK_V2) {
				pr_alert("diag: Invalid Msg type %d proc %d",
					 type, proc_num);
				break;
			}
			if (data_len < 0 || data_len > r) {
				pr_alert("diag: Invalid data len %d proc %d",
					 data_len, proc_num);
				break;
			}
			count_bytes = count_bytes+HDR_SIZ+data_len;
			if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) {
				msg = buf+HDR_SIZ;
				range = buf+HDR_SIZ+
						sizeof(struct diag_ctrl_msg);
				pkt_params->count = msg->count_entries;
				temp = kzalloc(pkt_params->count * sizeof(struct
						 bindpkt_params), GFP_KERNEL);
				if (temp == NULL) {
					pr_alert("diag: Memory alloc fail\n");
					kfree(pkt_params);
					return;
				}
				for (j = 0; j < pkt_params->count; j++) {
					temp->cmd_code = msg->cmd_code;
					temp->subsys_id = msg->subsysid;
					temp->client_id = proc_num;
					temp->proc_id = proc_num;
					temp->cmd_code_lo = range->cmd_code_lo;
					temp->cmd_code_hi = range->cmd_code_hi;
					range++;
					temp++;
				}
				temp -= pkt_params->count;
				pkt_params->params = temp;
				flag = 1;
				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
						 (unsigned long)pkt_params);
				kfree(temp);
			}
			buf = buf + HDR_SIZ + data_len;
		}
	}
	kfree(pkt_params);
	if (flag) {
		/* Poll SMD CNTL channels to check for data */
		if (proc_num == MODEM_PROC)
			diag_smd_cntl_notify(NULL, SMD_EVENT_DATA);
		else if (proc_num == QDSP_PROC)
			diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA);
		else if (proc_num == WCNSS_PROC)
			diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA);
	}
}
Пример #26
0
static int diagchar_close(struct inode *inode, struct file *file)
{
	int i = 0;
	struct diagchar_priv *diagpriv_data = file->private_data;

	if (!(file->private_data)) {
		pr_alert("diag: Invalid file pointer");
		return -ENOMEM;
	}

	/* clean up any DCI registrations for this client
	* This will specially help in case of ungraceful exit of any DCI client
	* This call will remove any pending registrations of such client
	*/
	diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
#ifdef CONFIG_DIAG_OVER_USB
	/* If the SD logging process exits, change logging to USB mode */
	if (driver->logging_process_id == current->tgid) {
		driver->logging_mode = USB_MODE;
		diagfwd_connect();
#ifdef CONFIG_DIAG_HSIC_PIPE
		driver->num_hsic_buf_tbl_entries = 0;
		for (i = 0; i < driver->poolsize_hsic_write; i++) {
			if (driver->hsic_buf_tbl[i].buf) {
				/* Return the buffer to the pool */
				diagmem_free(driver, (unsigned char *)
					(driver->hsic_buf_tbl[i].buf),
					POOL_TYPE_HSIC);
				driver->hsic_buf_tbl[i].buf = 0;
				driver->hsic_buf_tbl[i].length = 0;
			}
		}
		diagfwd_cancel_hsic();
		diagfwd_connect_hsic(0);
#endif
	}
#endif /* DIAG over USB */
	/* Delete the pkt response table entry for the exiting process */
	for (i = 0; i < diag_max_reg; i++)
			if (driver->table[i].process_id == current->tgid)
					driver->table[i].process_id = 0;

	if (driver) {
		mutex_lock(&driver->diagchar_mutex);
		driver->ref_count--;
		/* On Client exit, try to destroy all 3 pools */
		diagmem_exit(driver, POOL_TYPE_COPY);
		diagmem_exit(driver, POOL_TYPE_HDLC);
		diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT);
		for (i = 0; i < driver->num_clients; i++) {
			if (NULL != diagpriv_data && diagpriv_data->pid ==
				 driver->client_map[i].pid) {
				driver->client_map[i].pid = 0;
				kfree(diagpriv_data);
				diagpriv_data = NULL;
				break;
			}
		}
		mutex_unlock(&driver->diagchar_mutex);
		return 0;
	}
	return -ENOMEM;
}
Пример #27
0
static int diagchar_write(struct file *file, const char __user *buf,
			      size_t count, loff_t *ppos)
{
	int err, ret = 0, pkt_type;
#ifdef DIAG_DEBUG
	int length = 0, i;
#endif
	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
	void *buf_copy = NULL;
	int payload_size;
#ifdef CONFIG_DIAG_OVER_USB
	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
				(driver->logging_mode == NO_LOGGING_MODE)) {
		/*Drop the diag payload */
		return -EIO;
	}
#endif /* DIAG over USB */
	/* Get the packet type F3/log/event/Pkt response */
	err = copy_from_user((&pkt_type), buf, 4);
	/* First 4 bytes indicate the type of payload - ignore these */
	payload_size = count - 4;

	if (pkt_type == USER_SPACE_LOG_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		/* Check masks for On-Device logging */
		if (pkt_type == USER_SPACE_LOG_TYPE) {
			if (!mask_request_validate((unsigned char *)buf)) {
				pr_alert("diag: mask request Invalid\n");
				return -EFAULT;
			}
		}
		buf = buf + 4;
#ifdef DIAG_DEBUG
		pr_debug("diag: user space data %d\n", payload_size);
		for (i = 0; i < payload_size; i++)
			printk(KERN_DEBUG "\t %x", *(((unsigned char *)buf)+i));
#endif
		diag_process_hdlc((void *)(driver->user_space_data),
							 payload_size);
		return 0;
	} else if (pkt_type == USERMODE_DIAGFWD) {
		if (diag7k_debug_mask)
			pr_info("%s#%d recv %d bytes\n", __func__, __LINE__, payload_size);
		buf += 4;
		diag_process_hdlc((void *)buf, payload_size);
		return count;
	}

	if (payload_size > itemsize) {
		pr_err("diag: Dropping packet, packet payload size crosses"
				"4KB limit. Current payload size %d\n",
				payload_size);
		driver->dropped_count++;
		return -EBADMSG;
	}

	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
	if (!buf_copy) {
		driver->dropped_count++;
		return -ENOMEM;
	}

	err = copy_from_user(buf_copy, buf + 4, payload_size);
	if (err) {
		printk(KERN_INFO "diagchar : copy_from_user failed\n");
		ret = -EFAULT;
		goto fail_free_copy;
	}
#ifdef DIAG_DEBUG
	printk(KERN_DEBUG "data is -->\n");
	for (i = 0; i < payload_size; i++)
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_copy)+i));
#endif
	send.state = DIAG_STATE_START;
	send.pkt = buf_copy;
	send.last = (void *)(buf_copy + payload_size - 1);
	send.terminate = 1;
#ifdef DIAG_DEBUG
	pr_debug("diag: Already used bytes in buffer %d, and"
	" incoming payload size is %d\n", driver->used, payload_size);
	printk(KERN_DEBUG "hdlc encoded data is -->\n");
	for (i = 0; i < payload_size + 8; i++) {
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_hdlc)+i));
		if (*(((unsigned char *)buf_hdlc)+i) != 0x7e)
			length++;
	}
#endif
	mutex_lock(&driver->diagchar_mutex);
	if (!buf_hdlc)
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
						 POOL_TYPE_HDLC);
	if (!buf_hdlc) {
		ret = -ENOMEM;
		goto fail_free_hdlc;
	}
	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		if (diag7k_debug_mask)
			printk(KERN_INFO "\n size written is %d\n", driver->used);
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
	}

	enc.dest = buf_hdlc + driver->used;
	enc.dest_last = (void *)(buf_hdlc + driver->used + 2*payload_size + 3);
	diag_hdlc_encode(&send, &enc);

	/* This is to check if after HDLC encoding, we are still within the
	 limits of aggregation buffer. If not, we write out the current buffer
	and start aggregation in a newly allocated buffer */
	if ((unsigned int) enc.dest >=
		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		if (diag7k_debug_mask)
			printk(KERN_INFO "\n size written is %d\n", driver->used);
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
		enc.dest = buf_hdlc + driver->used;
		enc.dest_last = (void *)(buf_hdlc + driver->used +
							 (2*payload_size) + 3);
		diag_hdlc_encode(&send, &enc);
	}

	driver->used = (uint32_t) enc.dest - (uint32_t) buf_hdlc;
	if (pkt_type == DATA_TYPE_RESPONSE) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		if (diag7k_debug_mask)
			printk(KERN_INFO "\n size written is %d\n", driver->used);
		driver->used = 0;
	}

	mutex_unlock(&driver->diagchar_mutex);
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	if (!timer_in_progress)	{
		timer_in_progress = 1;
		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
	}
	return 0;

fail_free_hdlc:
	buf_hdlc = NULL;
	driver->used = 0;
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	mutex_unlock(&driver->diagchar_mutex);
	return ret;

fail_free_copy:
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	return ret;
}
Пример #28
0
int ram_console_pstore_reserve_memory(struct reserved_mem *rmem, unsigned long node, const char *uname)
{
	pr_alert("[memblock]%s: 0x%llx - 0x%llx (0x%llx)\n", uname, (unsigned long long)rmem->base,
		 (unsigned long long)rmem->base + (unsigned long long)rmem->size, (unsigned long long)rmem->size);
	return 0;
}
Пример #29
0
long diagchar_ioctl(struct file *filp,
                    unsigned int iocmd, unsigned long ioarg)
{
    int i, j, count_entries = 0, temp;
    int success = -1;
    void *temp_buf;
    uint16_t support_list = 0;
    struct dci_notification_tbl *notify_params;
    int status;

    if (iocmd == DIAG_IOCTL_COMMAND_REG) {
        struct bindpkt_params_per_process *pkt_params =
            (struct bindpkt_params_per_process *) ioarg;
        mutex_lock(&driver->diagchar_mutex);
        for (i = 0; i < diag_max_reg; i++) {
            if (driver->table[i].process_id == 0) {
                diag_add_reg(i, pkt_params->params,
                             &success, &count_entries);
                if (pkt_params->count > count_entries) {
                    pkt_params->params++;
                } else {
                    mutex_unlock(&driver->diagchar_mutex);
                    return success;
                }
            }
        }
        if (i < diag_threshold_reg) {
            /* Increase table size by amount required */
            diag_max_reg += pkt_params->count -
                            count_entries;
            /* Make sure size doesnt go beyond threshold */
            if (diag_max_reg > diag_threshold_reg) {
                diag_max_reg = diag_threshold_reg;
                pr_info("diag: best case memory allocation\n");
            }
            temp_buf = krealloc(driver->table,
                                diag_max_reg*sizeof(struct
                                                    diag_master_table), GFP_KERNEL);
            if (!temp_buf) {
                diag_max_reg -= pkt_params->count -
                                count_entries;
                pr_alert("diag: Insufficient memory for reg.");
                mutex_unlock(&driver->diagchar_mutex);
                return 0;
            } else {
                driver->table = temp_buf;
            }
            for (j = i; j < diag_max_reg; j++) {
                diag_add_reg(j, pkt_params->params,
                             &success, &count_entries);
                if (pkt_params->count > count_entries) {
                    pkt_params->params++;
                } else {
                    mutex_unlock(&driver->diagchar_mutex);
                    return success;
                }
            }
            mutex_unlock(&driver->diagchar_mutex);
        } else {
            mutex_unlock(&driver->diagchar_mutex);
            pr_err("Max size reached, Pkt Registration failed for"
                   " Process %d", current->tgid);
        }
        success = 0;
    } else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
        struct diagpkt_delay_params *delay_params =
            (struct diagpkt_delay_params *) ioarg;

        if ((delay_params->rsp_ptr) &&
                (delay_params->size == sizeof(delayed_rsp_id)) &&
                (delay_params->num_bytes_ptr)) {
            *((uint16_t *)delay_params->rsp_ptr) =
                DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
            *(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
            success = 0;
        }
    } else if (iocmd == DIAG_IOCTL_DCI_REG) {
        if (driver->dci_state == DIAG_DCI_NO_REG)
            return DIAG_DCI_NO_REG;
        if (driver->num_dci_client >= MAX_DCI_CLIENT)
            return DIAG_DCI_NO_REG;
        notify_params = (struct dci_notification_tbl *) ioarg;
        mutex_lock(&driver->dci_mutex);
        driver->num_dci_client++;
        pr_debug("diag: id = %d\n", driver->dci_client_id);
        driver->dci_client_id++;
        for (i = 0; i < MAX_DCI_CLIENT; i++) {
            if (driver->dci_notify_tbl[i].client == NULL) {
                driver->dci_notify_tbl[i].client = current;
                driver->dci_notify_tbl[i].list =
                    notify_params->list;
                driver->dci_notify_tbl[i].signal_type =
                    notify_params->signal_type;
                break;
            }
        }
        mutex_unlock(&driver->dci_mutex);
        return driver->dci_client_id;
    } else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
        success = -1;
        /* Delete this process from DCI table */
        mutex_lock(&driver->dci_mutex);
        for (i = 0; i < dci_max_reg; i++) {
            if (driver->dci_tbl[i].pid == current->tgid) {
                pr_debug("diag: delete %d\n", current->tgid);
                driver->dci_tbl[i].pid = 0;
                success = i;
            }
        }
        for (i = 0; i < MAX_DCI_CLIENT; i++) {
            if (driver->dci_notify_tbl[i].client == current) {
                driver->dci_notify_tbl[i].client = NULL;
                break;
            }
        }
        /* if any registrations were deleted successfully OR a valid
           client_id was sent in DEINIT call , then its DCI client */
        if (success >= 0 || ioarg)
            driver->num_dci_client--;
        driver->num_dci_client--;
        mutex_unlock(&driver->dci_mutex);
        for (i = 0; i < dci_max_reg; i++)
            if (driver->dci_tbl[i].pid != 0)
                pr_debug("diag: PID = %d, UID = %d, tag = %d\n",
                         driver->dci_tbl[i].pid, driver->dci_tbl[i].uid, driver->dci_tbl[i].tag);
        pr_debug("diag: complete deleting registrations\n");
        return success;
    } else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
        if (driver->ch_dci)
            support_list = support_list | DIAG_CON_MPSS;
        *(uint16_t *)ioarg = support_list;
        return DIAG_DCI_NO_ERROR;
    } else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
        for (i = 0; i < driver->num_clients; i++)
            if (driver->client_map[i].pid == current->tgid)
                break;
        if (i == -1)
            return -EINVAL;
        driver->data_ready[i] |= DEINIT_TYPE;
        wake_up_interruptible(&driver->wait_q);
        success = 1;
    } else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
        mutex_lock(&driver->diagchar_mutex);
        temp = driver->logging_mode;
        driver->logging_mode = (int)ioarg;
        if (driver->logging_mode == MEMORY_DEVICE_MODE) {
            diag_clear_hsic_tbl();
            driver->mask_check = 1;
            if (driver->socket_process) {
                /*
                 * Notify the socket logging process that we
                 * are switching to MEMORY_DEVICE_MODE
                 */
                status = send_sig(SIGCONT,
                                  driver->socket_process, 0);
                if (status) {
                    pr_err("diag: %s, Error notifying ",
                           __func__);
                    pr_err("socket process, status: %d\n",
                           status);
                }
            }
        }
        if (driver->logging_mode == UART_MODE) {
            diag_clear_hsic_tbl();
            driver->mask_check = 0;
            driver->logging_mode = MEMORY_DEVICE_MODE;
        }
        if (driver->logging_mode == SOCKET_MODE) {
            diag_clear_hsic_tbl();
            driver->socket_process = current;
            driver->mask_check = 0;
            driver->logging_mode = MEMORY_DEVICE_MODE;
        }
        driver->logging_process_id = current->tgid;
        mutex_unlock(&driver->diagchar_mutex);
        if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
                == NO_LOGGING_MODE) {
            driver->in_busy_1 = 1;
            driver->in_busy_2 = 1;
            driver->in_busy_qdsp_1 = 1;
            driver->in_busy_qdsp_2 = 1;
            driver->in_busy_wcnss_1 = 1;
            driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
            driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diagfwd_disconnect_bridge(0);
            diag_clear_hsic_tbl();
#endif
        } else if (temp == NO_LOGGING_MODE && driver->logging_mode
                   == MEMORY_DEVICE_MODE) {
            driver->in_busy_1 = 0;
            driver->in_busy_2 = 0;
            driver->in_busy_qdsp_1 = 0;
            driver->in_busy_qdsp_2 = 0;
            driver->in_busy_wcnss_1 = 0;
            driver->in_busy_wcnss_2 = 0;
            /* Poll SMD channels to check for data*/
            if (driver->ch)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_work));
            if (driver->chqdsp)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_qdsp_work));
            if (driver->ch_wcnss)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
            driver->in_busy_sdio = 0;
            /* Poll SDIO channel to check for data */
            if (driver->sdio_ch)
                queue_work(driver->diag_sdio_wq,
                           &(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diagfwd_connect_bridge(0);
#endif
        }
#ifdef CONFIG_DIAG_OVER_USB
        else if (temp == USB_MODE && driver->logging_mode
                 == NO_LOGGING_MODE) {
            diagfwd_disconnect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diagfwd_disconnect_bridge(0);
#endif
        } else if (temp == NO_LOGGING_MODE && driver->logging_mode
                   == USB_MODE) {
            diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diagfwd_connect_bridge(0);
#endif
        } else if (temp == USB_MODE && driver->logging_mode
                   == MEMORY_DEVICE_MODE) {
            diagfwd_disconnect();
            driver->in_busy_1 = 0;
            driver->in_busy_2 = 0;
            driver->in_busy_qdsp_1 = 0;
            driver->in_busy_qdsp_2 = 0;
            driver->in_busy_wcnss_1 = 0;
            driver->in_busy_wcnss_2 = 0;

            /* Poll SMD channels to check for data*/
            if (driver->ch)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_work));
            if (driver->chqdsp)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_qdsp_work));
            if (driver->ch_wcnss)
                queue_work(driver->diag_wq,
                           &(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
            driver->in_busy_sdio = 0;
            /* Poll SDIO channel to check for data */
            if (driver->sdio_ch)
                queue_work(driver->diag_sdio_wq,
                           &(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diagfwd_cancel_hsic();
            diagfwd_connect_bridge(0);
#endif
        } else if (temp == MEMORY_DEVICE_MODE &&
                   driver->logging_mode == USB_MODE) {
            diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
            diag_clear_hsic_tbl();
            diagfwd_cancel_hsic();
            diagfwd_connect_bridge(0);
#endif
        }
#endif /* DIAG over USB */
        success = 1;
    }

    return success;
}
void check_touch_off(int x, int y, unsigned char state, unsigned char touch_count)
{
    unsigned char prox = 0;
    if (main_prox_data != NULL)
        prox = main_prox_data->buf[PROXIMITY_RAW].prox[0];//get_proximity_rawdata(main_prox_data);
    //pr_alert("KT TOUCH2 - %d\n", prox);
    if (prox > screen_wake_options_prox_max)
        return;

    //sweep2wake
    if ((screen_wake_options == 1 || (screen_wake_options == 2 && is_charging) || screen_wake_options == 5 || (screen_wake_options == 6 && is_charging)) && !state)
    {
        if (screen_wake_options_debug) pr_alert("WAKE_START TOUCH %d-%d-%d\n", x, x_lo, x_hi);
        //Left to right
        if (x < x_lo)
            wake_start = 1;
        //Right to left
        if (x > x_hi)
            wake_start = 4;
    }
    if ((screen_wake_options == 1 || (screen_wake_options == 2 && is_charging) || screen_wake_options == 5 || (screen_wake_options == 6 && is_charging)) && state)
    {
        //Left to right
        if (wake_start == 1 && x >= (x_onethird-30) && x <= (x_onethird+30))
        {
            wake_start = 2;
            if (screen_wake_options_debug) pr_alert("WAKE_START ON2 %d-%d\n", x, x_lo);
        }
        if (wake_start == 2 && x >= (x_twothird-30) && x <= (x_twothird+30))
        {
            wake_start = 3;
            if (screen_wake_options_debug) pr_alert("WAKE_START ON3 %d-%d\n", x, x_lo);
        }
        if (wake_start == 3 && x > x_hi) {
            pwr_trig_fscreen();
            if (screen_wake_options_debug) pr_alert("WAKE_START OFF-1 %d-%d\n", x, x_hi);
        }
        //Right to left
        if (wake_start == 4 && x >= (x_twothird-30) && x <= (x_twothird+30))
        {
            wake_start = 5;
            if (screen_wake_options_debug) pr_alert("WAKE_START ON3 %d-%d\n", x, x_lo);
        }
        if (wake_start == 5 && x >= (x_onethird-30) && x <= (x_onethird+30))
        {
            wake_start = 6;
            if (screen_wake_options_debug) pr_alert("WAKE_START ON2 %d-%d\n", x, x_lo);
        }
        if (wake_start == 6 && x < x_lo) {
            pwr_trig_fscreen();
            if (screen_wake_options_debug) pr_alert("WAKE_START OFF-1 %d-%d\n", x, x_hi);
        }
    }
    //Double Tap 2 wake
    if ((screen_wake_options == 3 || (screen_wake_options == 4 && is_charging) || screen_wake_options == 5 || (screen_wake_options == 6 && is_charging)) && !state)
    {
        bool block_store = false;
        if (last_touch_time)
        {
            if (screen_wake_options_debug) pr_alert("DOUBLE TAP WAKE TOUCH %d-%d-%ld-%ld-%d\n", x, y, jiffies, last_touch_time, touch_count);
            if (!touch_count && jiffies_to_msecs(jiffies - last_touch_time) < 2000) //(x < x_lo) && (y > y_hi) && //jiffies_to_msecs(jiffies - last_touch_time) > 50
            {
                if (screen_wake_options_debug) pr_alert("DOUBLE TAP WAKE POWER BTN CALLED %d-%d\n", x, y);
                pwr_trig_fscreen();
                last_touch_time = 0;
                block_store = true;
            }
            else
            {
                if (screen_wake_options_debug) pr_alert("DOUBLE TAP WAKE DELETE %d-%d-%ld-%ld\n", x, y, jiffies, last_touch_time);
                last_touch_time = 0;
                block_store = true;
            }
        }
        if (!last_touch_time && !block_store)
            last_touch_time = jiffies;
    }
}