static void mx_pmic_irq_cb(void *cb_data, u8 intsts) { u8 jack_cur_status, jack_prev_state = 0; struct mad_jack *mjack = NULL; unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0; time_t timediff; struct snd_intelmad *intelmaddata = cb_data; mjack = &intelmaddata->jack[0]; if (intsts & 0x2) { jack_cur_status = mx_get_jack_status(); jack_prev_state = intelmaddata->jack_prev_state; if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x40)) { /*headset insert detected. */ pr_debug("MAD headset inserted\n"); present = 1; jack_event_flag = 1; mjack->jack_status = 1; mjack->jack.type = SND_JACK_HEADSET; } if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x00)) { /* headphone insert detected. */ pr_debug("MAD headphone inserted\n"); present = 1; jack_event_flag = 1; mjack->jack.type = SND_JACK_HEADPHONE; } if ((jack_prev_state == 0x40) && (jack_cur_status == 0xc0)) { /* headset remove detected. */ pr_debug("MAD headset removed\n"); present = 0; jack_event_flag = 1; mjack->jack_status = 0; mjack->jack.type = SND_JACK_HEADSET; } if ((jack_prev_state == 0x00) && (jack_cur_status == 0xc0)) { /* headphone remove detected. */ pr_debug("MAD headphone removed\n"); present = 0; jack_event_flag = 1; mjack->jack.type = SND_JACK_HEADPHONE; } if ((jack_prev_state == 0x40) && (jack_cur_status == 0x00)) { /* button pressed */ do_gettimeofday(&mjack->buttonpressed); pr_debug("MAD button press detected\n"); } if ((jack_prev_state == 0x00) && (jack_cur_status == 0x40)) { if (mjack->jack_status) { /*button pressed */ do_gettimeofday( &mjack->buttonreleased); /*button pressed */ pr_debug("MAD Button Released detected\n"); timediff = mjack->buttonreleased.tv_sec - mjack->buttonpressed.tv_sec; buttonpressflag = 1; if (timediff > 1) { pr_debug("MAD long press dtd\n"); /* send headphone detect/undetect */ present = 1; jack_event_flag = 1; mjack->jack.type = MID_JACK_HS_LONG_PRESS; } else { pr_debug("MAD short press dtd\n"); /* send headphone detect/undetect */ present = 1; jack_event_flag = 1; mjack->jack.type = MID_JACK_HS_SHORT_PRESS; } } else { /***workaround for maxim hw issue,0x00 t 0x40 is not a valid transiton for Headset insertion */ /*headset insert detected. */ pr_debug("MAD headset inserted\n"); present = 1; jack_event_flag = 1; mjack->jack_status = 1; mjack->jack.type = SND_JACK_HEADSET; } } intelmaddata->jack_prev_state = jack_cur_status; pr_debug("mx_pmic_irq_cb prv_state= 0x%x\n", intelmaddata->jack_prev_state); } if (jack_event_flag) sst_mad_send_jack_report(&mjack->jack, buttonpressflag, present); }
static int try_to_freeze_tasks(bool sig_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; unsigned int wakeup = 0; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; do { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezeable(p)) continue; if (!freeze_task(p, sig_only)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); yield(); /* Yield is okay here */ if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (time_after(jiffies, end_time)) break; } while (todo); do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", sig_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze):\n", elapsed_csecs / 100, elapsed_csecs % 100, todo); show_state(); } read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p)) printk(KERN_ERR " %s\n", p->comm); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static void cypress_touchkey_work(struct work_struct *work) { struct cypress_touchkey_info *info = container_of(work, struct cypress_touchkey_info, key_work); struct timeval diff; int code; int press; if (info->keybuf == 0xFF) { dev_err(&info->client->dev, "keybuf: 0x%2X\n", info->keybuf); goto out; } press = !(info->keybuf & PRESS_BIT_MASK); code = (int)(info->keybuf & KEYCODE_BIT_MASK) - 1; if (code < 0) { dev_err(&info->client->dev, "not proper interrupt 0x%2X.\n", info->keybuf); if (info->press == 0) goto out; dev_err(&info->client->dev, "forced key released.\n"); code = info->code; press = 0; } /* ignore invalid keycode, keypress value */ if (code > 1 || press > 1) { dev_err(&info->client->dev, "invalid keycode or keypress 0x%2X.\n", info->keybuf); goto out; } #if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP) TOUCHKEY_LOG(info->keycode[code], press); #endif if (touch_is_pressed && press) { #if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP) #ifdef CONFIG_DEBUG_PRINTK printk(KERN_DEBUG "[TouchKey]touchkey pressed but don't send event because touch is pressed.\n"); #else ; #endif #endif goto out; } if (code == 0 && press == 1) { do_gettimeofday(&info->end); diff.tv_sec = info->end.tv_sec - info->start.tv_sec; diff.tv_usec = info->end.tv_usec - info->start.tv_usec; if (diff.tv_sec >= 0) { if (diff.tv_usec < 0) { (diff.tv_sec)--; diff.tv_usec = (info->end.tv_usec + 1000000L) - info->start.tv_usec; } /* If the interval of pressed menu-key is below 100msec */ if (diff.tv_sec == 0 && diff.tv_usec < 100000) { dev_err(&info->client->dev, "Interval below 100msec:%ldusec\n", diff.tv_usec); info->start.tv_sec = info->end.tv_sec; info->start.tv_usec = info->end.tv_usec; goto out; } } /* refresh timeval */ info->start.tv_sec = info->end.tv_sec; info->start.tv_usec = info->end.tv_usec; } info->code = code; info->press = press; input_report_key(info->input_dev, info->keycode[code], press); input_sync(info->input_dev); out: enable_irq(info->irq); return; }
/***************************************************************************** * FUNCTION * hal_tx_dma_irq_handler * DESCRIPTION * lower level tx interrupt handler * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_tx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info) { #define MAX_CONTINIOUS_TIMES 512 unsigned int i_ret = -1; unsigned int valid_size = 0; unsigned int vff_len = 0; unsigned int left_len = 0; unsigned int base = p_dma_info->base; static int flush_irq_counter; static struct timeval start_timer; static struct timeval end_timer; unsigned long flag = 0; spin_lock_irqsave(&(g_clk_cg_spinlock), flag); #if MTK_BTIF_ENABLE_CLK_CTL if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); BTIF_ERR_FUNC ("%s: clock is off before irq status clear done!!!\n", __FILE__); return i_ret; } #endif /*check if Tx VFF Left Size equal to VFIFO size or not*/ vff_len = BTIF_READ32(TX_DMA_VFF_LEN(base)); valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)); left_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base)); if (0 == flush_irq_counter) { do_gettimeofday(&start_timer); } if ((0 < valid_size) && (8 > valid_size)) { i_ret = _tx_dma_flush(p_dma_info); flush_irq_counter++; if (MAX_CONTINIOUS_TIMES <= flush_irq_counter) { do_gettimeofday(&end_timer); /*when btif tx fifo cannot accept any data and counts of bytes left in tx vfifo < 8 for a while we assume that btif cannot send data for a long time in order not to generate interrupt continiously, which may effect system's performance. we clear tx flag and disable btif tx interrupt */ /*clear interrupt flag*/ BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK); /*vFIFO data has been read by DMA controller, just disable tx dma's irq*/ i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false); BTIF_ERR_FUNC ("**********************ERROR, ERROR, ERROR**************************\n"); BTIF_ERR_FUNC ("BTIF Tx IRQ happened %d times (continiously), between %d.%d and %d.%d\n", MAX_CONTINIOUS_TIMES, start_timer.tv_sec, start_timer.tv_usec, end_timer.tv_usec, end_timer.tv_usec); } } else if (vff_len == left_len) { flush_irq_counter = 0; /*clear interrupt flag*/ BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK); /*vFIFO data has been read by DMA controller, just disable tx dma's irq*/ i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false); } else { #if 0 BTIF_ERR_FUNC ("**********************WARNING**************************\n"); BTIF_ERR_FUNC("invalid irq condition, dump register\n"); hal_dma_dump_reg(p_dma_info, REG_TX_DMA_ALL); #endif BTIF_DBG_FUNC ("superious IRQ occurs, vff_len(%d), valid_size(%d), left_len(%d)\n", vff_len, valid_size, left_len); } spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); return i_ret; }
static long CAM_CAL_Ioctl( struct file *file, unsigned int a_u4Command, unsigned long a_u4Param ) #endif { int i4RetValue = 0; u8 * pBuff = NULL; u8 * pWorkingBuff = NULL; stCAM_CAL_INFO_STRUCT *ptempbuf; #ifdef CAM_CALGETDLT_DEBUG struct timeval ktv1, ktv2; unsigned long TimeIntervalUS; #endif if(_IOC_NONE == _IOC_DIR(a_u4Command)) { } else { pBuff = (u8 *)kmalloc(sizeof(stCAM_CAL_INFO_STRUCT),GFP_KERNEL); if(NULL == pBuff) { CAM_CALDB("[S24CAM_CAL] ioctl allocate mem failed\n"); return -ENOMEM; } if(_IOC_WRITE & _IOC_DIR(a_u4Command)) { if(copy_from_user((u8 *) pBuff , (u8 *) a_u4Param, sizeof(stCAM_CAL_INFO_STRUCT))) { //get input structure address kfree(pBuff); CAM_CALDB("[S24CAM_CAL] ioctl copy from user failed\n"); return -EFAULT; } } } ptempbuf = (stCAM_CAL_INFO_STRUCT *)pBuff; pWorkingBuff = (u8*)kmalloc(ptempbuf->u4Length,GFP_KERNEL); if(NULL == pWorkingBuff) { kfree(pBuff); CAM_CALDB("[S24CAM_CAL] ioctl allocate mem failed\n"); return -ENOMEM; } CAM_CALDB("[S24CAM_CAL] init Working buffer address 0x%8x command is 0x%8x\n", (u32)pWorkingBuff, (u32)a_u4Command); if(copy_from_user((u8*)pWorkingBuff , (u8*)ptempbuf->pu1Params, ptempbuf->u4Length)) { kfree(pBuff); kfree(pWorkingBuff); CAM_CALDB("[S24CAM_CAL] ioctl copy from user failed\n"); return -EFAULT; } switch(a_u4Command) { case CAM_CALIOC_S_WRITE: CAM_CALDB("[S24CAM_CAL] Write CMD \n"); #ifdef CAM_CALGETDLT_DEBUG do_gettimeofday(&ktv1); #endif i4RetValue = iWriteData((u16)ptempbuf->u4Offset, ptempbuf->u4Length, pWorkingBuff); #ifdef CAM_CALGETDLT_DEBUG do_gettimeofday(&ktv2); if(ktv2.tv_sec > ktv1.tv_sec) { TimeIntervalUS = ktv1.tv_usec + 1000000 - ktv2.tv_usec; } else { TimeIntervalUS = ktv2.tv_usec - ktv1.tv_usec; } printk("Write data %d bytes take %lu us\n",ptempbuf->u4Length, TimeIntervalUS); #endif break; case CAM_CALIOC_G_READ: CAM_CALDB("[S24CAM_CAL] Read CMD \n"); #ifdef CAM_CALGETDLT_DEBUG do_gettimeofday(&ktv1); #endif CAM_CALDB("[CAM_CAL] offset %d \n", ptempbuf->u4Offset); CAM_CALDB("[CAM_CAL] length %d \n", ptempbuf->u4Length); CAM_CALDB("[CAM_CAL] Before read Working buffer address 0x%8x \n", (u32)pWorkingBuff); Enb_OTP_Read(1); //Enable OTP Read i4RetValue = iReadData((u16)(ptempbuf->u4Offset+OTP_START_ADDR), ptempbuf->u4Length, pWorkingBuff); Enb_OTP_Read(0); //Disable OTP Read Clear_OTP_Buff(); //Clean OTP buff CAM_CALDB("[S24CAM_CAL] After read Working buffer data 0x%4x \n", *pWorkingBuff); #ifdef CAM_CALGETDLT_DEBUG do_gettimeofday(&ktv2); if(ktv2.tv_sec > ktv1.tv_sec) { TimeIntervalUS = ktv1.tv_usec + 1000000 - ktv2.tv_usec; } else { TimeIntervalUS = ktv2.tv_usec - ktv1.tv_usec; } printk("Read data %d bytes take %lu us\n",ptempbuf->u4Length, TimeIntervalUS); #endif break; default : CAM_CALDB("[S24CAM_CAL] No CMD \n"); i4RetValue = -EPERM; break; } if(_IOC_READ & _IOC_DIR(a_u4Command)) { //copy data to user space buffer, keep other input paremeter unchange. CAM_CALDB("[S24CAM_CAL] to user length %d \n", ptempbuf->u4Length); CAM_CALDB("[S24CAM_CAL] to user Working buffer address 0x%8x \n", (u32)pWorkingBuff); if(copy_to_user((u8 __user *) ptempbuf->pu1Params , (u8 *)pWorkingBuff , ptempbuf->u4Length)) { kfree(pBuff); kfree(pWorkingBuff); CAM_CALDB("[S24CAM_CAL] ioctl copy to user failed\n"); return -EFAULT; } } kfree(pBuff); kfree(pWorkingBuff); return i4RetValue; }
/******************************************************************************* ** ** sysHyPhy20gGetTimeOfDay ** ___________________________________________________________________________ ** ** DESCRIPTION: Get current day time. ** ** INPUTS: TIMEVAL - time value ** ** OUTPUTS: None ** ** RETURNS: None ** *******************************************************************************/ void sysHyPhy20gGetTimeOfDay(TIMEVAL *pTime) { do_gettimeofday(pTime); }
static void ipt_ulog_packet(unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct ipt_ulog_info *loginfo, const char *prefix) { ulog_buff_t *ub; ulog_packet_msg_t *pm; size_t size, copy_len; struct nlmsghdr *nlh; /* ffs == find first bit set, necessary because userspace * is already shifting groupnumber, but we need unshifted. * ffs() returns [1..32], we need [0..31] */ unsigned int groupnum = ffs(loginfo->nl_group) - 1; /* calculate the size of the skb needed */ if ((loginfo->copy_range == 0) || (loginfo->copy_range > skb->len)) { copy_len = skb->len; } else { copy_len = loginfo->copy_range; } size = NLMSG_SPACE(sizeof(*pm) + copy_len); ub = &ulog_buffers[groupnum]; LOCK_BH(&ulog_lock); if (!ub->skb) { if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } else if (ub->qlen >= loginfo->qthreshold || size > skb_tailroom(ub->skb)) { /* either the queue len is too high or we don't have * enough room in nlskb left. send it to userspace. */ ulog_send(groupnum); if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, loginfo->qthreshold); /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, sizeof(*pm)+copy_len); ub->qlen++; pm = NLMSG_DATA(nlh); /* We might not have a timestamp, get one */ if (skb->stamp.tv_sec == 0) do_gettimeofday((struct timeval *)&skb->stamp); /* copy hook, prefix, timestamp, payload, etc. */ pm->data_len = copy_len; pm->timestamp_sec = skb->stamp.tv_sec; pm->timestamp_usec = skb->stamp.tv_usec; pm->mark = skb->nfmark; pm->hook = hooknum; if (prefix != NULL) strncpy(pm->prefix, prefix, sizeof(pm->prefix)); else if (loginfo->prefix[0] != '\0') strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); else *(pm->prefix) = '\0'; if (in && in->hard_header_len > 0 && skb->mac.raw != (void *) skb->nh.iph && in->hard_header_len <= ULOG_MAC_LEN) { memcpy(pm->mac, skb->mac.raw, in->hard_header_len); pm->mac_len = in->hard_header_len; } else pm->mac_len = 0; if (in) strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); else pm->indev_name[0] = '\0'; if (out) strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); else pm->outdev_name[0] = '\0'; /* copy_len <= skb->len, so can't fail. */ if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) BUG(); /* check if we are building multi-part messages */ if (ub->qlen > 1) { ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; } ub->lastnlh = nlh; /* if timer isn't already running, start it */ if (!timer_pending(&ub->timer)) { ub->timer.expires = jiffies + flushtimeout * HZ / 100; add_timer(&ub->timer); } /* if threshold is reached, send message to userspace */ if (ub->qlen >= loginfo->qthreshold) { if (loginfo->qthreshold > 1) nlh->nlmsg_type = NLMSG_DONE; ulog_send(groupnum); } UNLOCK_BH(&ulog_lock); return; nlmsg_failure: PRINTR("ipt_ULOG: error during NLMSG_PUT\n"); alloc_failure: PRINTR("ipt_ULOG: Error building netlink message\n"); UNLOCK_BH(&ulog_lock); }
int wlan_log_to_user(VOS_TRACE_LEVEL log_level, char *to_be_sent, int length) { /* Add the current time stamp */ char *ptr; char tbuf[50]; int tlen; int total_log_len; unsigned int *pfilled_length; bool wake_up_thread = false; unsigned long flags; struct timeval tv; struct rtc_time tm; unsigned long local_time; if (!vos_is_multicast_logging()) { /* * This is to make sure that we print the logs to kmsg console * when no logger app is running. This is also needed to * log the initial messages during loading of driver where even * if app is running it will not be able to * register with driver immediately and start logging all the * messages. */ pr_info("%s\n", to_be_sent); } else { /* Format the Log time [hr:min:sec.microsec] */ do_gettimeofday(&tv); /* Convert rtc to local time */ local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); rtc_time_to_tm(local_time, &tm); tlen = snprintf(tbuf, sizeof(tbuf), "[%s][%02d:%02d:%02d.%06lu] ", current->comm, tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec); /* 1+1 indicate '\n'+'\0' */ total_log_len = length + tlen + 1 + 1; spin_lock_irqsave(&gwlan_logging.spin_lock, flags); // wlan logging svc resources are not yet initialized if (!gwlan_logging.pcur_node) { spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); return -EIO; } pfilled_length = &gwlan_logging.pcur_node->filled_length; /* Check if we can accomodate more log into current * node/buffer */ if ((MAX_LOGMSG_LENGTH <= (*pfilled_length + sizeof(tAniNlHdr))) || ((MAX_LOGMSG_LENGTH - (*pfilled_length + sizeof(tAniNlHdr))) < total_log_len)) { wake_up_thread = true; wlan_queue_logmsg_for_app(); pfilled_length = &gwlan_logging.pcur_node->filled_length; } ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)]; /* Assumption here is that we receive logs which is always * less than MAX_LOGMSG_LENGTH, where we can accomodate the * tAniNlHdr + [context][timestamp] + log * VOS_ASSERT if we cannot accomodate the the complete log into * the available buffer. * * Continue and copy logs to the available length and * discard the rest. */ if (MAX_LOGMSG_LENGTH < (sizeof(tAniNlHdr) + total_log_len)) { VOS_ASSERT(0); total_log_len = MAX_LOGMSG_LENGTH - sizeof(tAniNlHdr) - 2; } memcpy(&ptr[*pfilled_length], tbuf, tlen); memcpy(&ptr[*pfilled_length + tlen], to_be_sent, min(length, (total_log_len - tlen))); *pfilled_length += tlen + min(length, total_log_len - tlen); ptr[*pfilled_length] = '\n'; *pfilled_length += 1; spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); /* Wakeup logger thread */ if ((true == wake_up_thread)) { /* If there is logger app registered wakeup the logging * thread (or) if always multicasting of host messages * is enabled, wake up the logging thread */ set_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); wake_up_interruptible(&gwlan_logging.wait_queue); } if (gwlan_logging.log_fe_to_console && ((VOS_TRACE_LEVEL_FATAL == log_level) || (VOS_TRACE_LEVEL_ERROR == log_level))) { pr_info("%s\n", to_be_sent); } } return 0; }
static INT32 wmt_dev_tm_temp_query(void) { #define HISTORY_NUM 5 #define TEMP_THRESHOLD 65 #define REFRESH_TIME 300 //sec static INT32 temp_table[HISTORY_NUM] = {99}; //not query yet. static INT32 idx_temp_table = 0; static struct timeval query_time, now_time; INT8 query_cond = 0; INT32 current_temp = 0; INT32 index = 0; //Query condition 1: // If we have the high temperature records on the past, we continue to query/monitor // the real temperature until cooling for(index = 0; index < HISTORY_NUM ; index++) { if(temp_table[index] >= TEMP_THRESHOLD) { query_cond = 1; WMT_INFO_FUNC("high temperature (current temp = %d), we must keep querying temp temperature..\n", temp_table[index]); } } do_gettimeofday(&now_time); // Query condition 2: // Moniter the hif_sdio activity to decide if we have the need to query temperature. if(!query_cond) { if( wmt_dev_tra_sdio_poll()==0) { query_cond = 1; WMT_INFO_FUNC("sdio traffic , we must query temperature..\n"); } else { WMT_DBG_FUNC("sdio idle traffic ....\n"); } //only WIFI tx power might make temperature varies largely #if 0 if(!query_cond) { last_access_time = wmt_dev_tra_uart_poll(); if( jiffies_to_msecs(last_access_time) < TIME_THRESHOLD_TO_TEMP_QUERY) { query_cond = 1; WMT_DBG_FUNC("uart busy traffic , we must query temperature..\n"); } else { WMT_DBG_FUNC("uart still idle traffic , we don't query temp temperature..\n"); } } #endif } // Query condition 3: // If the query time exceeds the a certain of period, refresh temp table. // if(!query_cond) { if( (now_time.tv_sec < query_time.tv_sec) || //time overflow, we refresh temp table again for simplicity! ((now_time.tv_sec > query_time.tv_sec) && (now_time.tv_sec - query_time.tv_sec) > REFRESH_TIME)) { query_cond = 1; WMT_INFO_FUNC("It is long time (> %d sec) not to query, we must query temp temperature..\n", REFRESH_TIME); for (index = 0; index < HISTORY_NUM ; index++) { temp_table[index] = 99; } } } if(query_cond) { // update the temperature record mtk_wcn_wmt_therm_ctrl(WMTTHERM_ENABLE); current_temp = mtk_wcn_wmt_therm_ctrl(WMTTHERM_READ); mtk_wcn_wmt_therm_ctrl(WMTTHERM_DISABLE); wmt_lib_notify_stp_sleep(); idx_temp_table = (idx_temp_table + 1) % HISTORY_NUM; temp_table[idx_temp_table] = current_temp; do_gettimeofday(&query_time); WMT_INFO_FUNC("[Thermal] current_temp = 0x%x \n", (current_temp & 0xFF)); } else { current_temp = temp_table[idx_temp_table]; idx_temp_table = (idx_temp_table + 1) % HISTORY_NUM; temp_table[idx_temp_table] = current_temp; } // // Dump information // WMT_DBG_FUNC("[Thermal] idx_temp_table = %d \n", idx_temp_table); WMT_DBG_FUNC("[Thermal] now.time = %d, query.time = %d, REFRESH_TIME = %d\n", now_time.tv_sec, query_time.tv_sec, REFRESH_TIME); WMT_DBG_FUNC("[0] = %d, [1] = %d, [2] = %d, [3] = %d, [4] = %d \n----\n", temp_table[0], temp_table[1], temp_table[2], temp_table[3], temp_table[4]); return current_temp; }
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's scheduler lock, it's * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING * transition can't race with task state testing here. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", user_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); } if (!wakeup) { read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p) && elapsed_msecs > 1000) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } } else {
/* * vpif_channel_isr: It changes status of the displayed buffer, takes next * buffer from the queue and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct channel_obj *ch; struct common_obj *common; enum v4l2_field field; int fid = -1, i; int channel_id = 0; channel_id = *(int *)(dev_id); ch = dev->dev[channel_id]; field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field; for (i = 0; i < VPIF_NUMOBJECTS; i++) { common = &ch->common[i]; /* If streaming is started in this channel */ if (0 == common->started) continue; if (1 == ch->vpifparams.std_info.frm_fmt) { if (list_empty(&common->dma_queue)) continue; /* Progressive mode */ if (!channel_first_int[i][channel_id]) { /* Mark status of the cur_frm to * done and unlock semaphore on it */ do_gettimeofday(&common->cur_frm->ts); common->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&common->cur_frm->done); /* Make cur_frm pointing to next_frm */ common->cur_frm = common->next_frm; } channel_first_int[i][channel_id] = 0; process_progressive_mode(common); } else { /* Interlaced mode */ /* If it is first interrupt, ignore it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id + 2); /* If fid does not match with stored field id */ if (fid != ch->field_id) { /* Make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } process_interlaced_mode(fid, common); } } return IRQ_HANDLED; }
int nfsd_svc(unsigned short port, int nrservs) { int error; int none_left; struct list_head *victim; lock_kernel(); dprintk("nfsd: creating service\n"); error = -EINVAL; if (nrservs <= 0) nrservs = 0; if (nrservs > NFSD_MAXSERVS) nrservs = NFSD_MAXSERVS; /* Readahead param cache - will no-op if it already exists */ error = nfsd_racache_init(2*nrservs); if (error<0) goto out; error = nfs4_state_init(); if (error<0) goto out; if (!nfsd_serv) { atomic_set(&nfsd_busy, 0); error = -ENOMEM; nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE); if (nfsd_serv == NULL) goto out; error = svc_makesock(nfsd_serv, IPPROTO_UDP, port); if (error < 0) goto failure; #ifdef CONFIG_NFSD_TCP error = svc_makesock(nfsd_serv, IPPROTO_TCP, port); if (error < 0) goto failure; #endif do_gettimeofday(&nfssvc_boot); /* record boot time */ } else nfsd_serv->sv_nrthreads++; nrservs -= (nfsd_serv->sv_nrthreads-1); while (nrservs > 0) { nrservs--; __module_get(THIS_MODULE); error = svc_create_thread(nfsd, nfsd_serv); if (error < 0) { module_put(THIS_MODULE); break; } } victim = nfsd_list.next; while (nrservs < 0 && victim != &nfsd_list) { struct nfsd_list *nl = list_entry(victim,struct nfsd_list, list); victim = victim->next; send_sig(SIG_NOCLEAN, nl->task, 1); nrservs++; } failure: none_left = (nfsd_serv->sv_nrthreads == 1); svc_destroy(nfsd_serv); /* Release server */ if (none_left) { nfsd_serv = NULL; nfsd_racache_shutdown(); nfs4_state_shutdown(); } out: unlock_kernel(); return error; }
/* * Server's incoming request callback */ void request_in_callback(lnet_event_t *ev) { struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; struct ptlrpc_service *service = svcpt->scp_service; struct ptlrpc_request *req; LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer); LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <= rqbd->rqbd_buffer + service->srv_buf_size); CDEBUG((ev->status == 0) ? D_NET : D_ERROR, "event type %d, status %d, service %s\n", ev->type, ev->status, service->srv_name); if (ev->unlinked) { /* If this is the last request message to fit in the * request buffer we can use the request object embedded in * rqbd. Note that if we failed to allocate a request, * we'd have to re-post the rqbd, which we can't do in this * context. */ req = &rqbd->rqbd_req; memset(req, 0, sizeof (*req)); } else { LASSERT (ev->type == LNET_EVENT_PUT); if (ev->status != 0) { /* We moaned above already... */ return; } OBD_ALLOC_GFP(req, sizeof(*req), ALLOC_ATOMIC_TRY); if (req == NULL) { CERROR("Can't allocate incoming request descriptor: " "Dropping %s RPC from %s\n", service->srv_name, libcfs_id2str(ev->initiator)); return; } } /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, * flags are reset and scalars are zero. We only set the message * size to non-zero if this was a successful receive. */ req->rq_xid = ev->match_bits; req->rq_reqbuf = ev->md.start + ev->offset; if (ev->type == LNET_EVENT_PUT && ev->status == 0) req->rq_reqdata_len = ev->mlength; do_gettimeofday(&req->rq_arrival_time); req->rq_peer = ev->initiator; req->rq_self = ev->target.nid; req->rq_rqbd = rqbd; req->rq_phase = RQ_PHASE_NEW; spin_lock_init(&req->rq_lock); INIT_LIST_HEAD(&req->rq_timed_list); INIT_LIST_HEAD(&req->rq_exp_list); atomic_set(&req->rq_refcount, 1); if (ev->type == LNET_EVENT_PUT) CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n", req, req->rq_xid, ev->mlength); CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); spin_lock(&svcpt->scp_lock); ptlrpc_req_add_history(svcpt, req); if (ev->unlinked) { svcpt->scp_nrqbds_posted--; CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", svcpt->scp_nrqbds_posted); /* Normally, don't complain about 0 buffers posted; LNET won't * drop incoming reqs since we set the portal lazy */ if (test_req_buffer_pressure && ev->type != LNET_EVENT_UNLINK && svcpt->scp_nrqbds_posted == 0) CWARN("All %s request buffers busy\n", service->srv_name); /* req takes over the network's ref on rqbd */ } else { /* req takes a ref on rqbd */ rqbd->rqbd_refcount++; } list_add_tail(&req->rq_list, &svcpt->scp_req_incoming); svcpt->scp_nreqs_incoming++; /* NB everything can disappear under us once the request * has been queued and we unlock, so do the wake now... */ wake_up(&svcpt->scp_waitq); spin_unlock(&svcpt->scp_lock); }
static DEVICE_ATTR_READER(driftinfo_show, dev, buf) { xbus_t *xbus; struct xpp_drift *di; struct xpp_ticker *ticker; struct timeval now; int len = 0; int hours; int minutes; int seconds; int speed_range; int uframes_inaccuracy; int i; xbus = dev_to_xbus(dev); di = &xbus->drift; ticker = &xbus->ticker; /* * Calculate lost ticks time */ do_gettimeofday(&now); seconds = now.tv_sec - di->last_lost_tick.tv.tv_sec; minutes = seconds / 60; seconds = seconds % 60; hours = minutes / 60; minutes = minutes % 60; len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d (was %d:%02d:%02d ago)\n", "lost_ticks", di->lost_ticks, hours, minutes, seconds); speed_range = abs(di->max_speed - di->min_speed); uframes_inaccuracy = di->sync_inaccuracy / 125; len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d ", "instability", speed_range + uframes_inaccuracy); if (xbus->sync_mode == SYNC_MODE_AB) { buf[len++] = '-'; } else { for (i = 0; len < PAGE_SIZE - 1 && i < speed_range + uframes_inaccuracy; i++) buf[len++] = '#'; } buf[len++] = '\n'; len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d (uframes)\n", "inaccuracy", uframes_inaccuracy); len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d\n", "speed_range", speed_range); #define SHOW(ptr, item) \ do { \ len += snprintf(buf + len, PAGE_SIZE - len, \ "%-15s: %8d\n", #item, (ptr)->item); \ } while (0) SHOW(xbus, sync_adjustment); len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d\n", "offset (usec)", di->offset_prev); SHOW(di, offset_range); len += snprintf(buf + len, PAGE_SIZE - len, "%-15s: %8d\n", "best_speed", (di->max_speed + di->min_speed) / 2); SHOW(di, min_speed); SHOW(di, max_speed); SHOW(ticker, cycle); SHOW(ticker, tick_period); SHOW(ticker, count); #undef SHOW return len; }
/* Modem_response command */ static int lge_dm_tty_modem_response(struct dm_tty *lge_dm_tty_drv, short modem_number, const unsigned char *buf, int count) { int num_push = 0; int left = 0; int total_push; struct timeval time; int start_flag_length; int end_flag_length; if (count == 0) return 0; if (lge_dm_tty_drv-> is_modem_open[modem_number] == FALSE) return 0; /* make start flag */ memcpy(dm_modem_response, &dm_rx_start_flag, sizeof(dm_rx_start_flag)); start_flag_length = sizeof(dm_rx_start_flag); /* make header */ dm_modem_response_header->dm_router_size = dm_modem_response_header_length + dm_modem_response_body_length + count; memcpy(dm_modem_response + start_flag_length, dm_modem_response_header, dm_modem_response_header_length); /* make body */ dm_modem_response_body->modem_chip = modem_number; do_gettimeofday(&time); memcpy(&(dm_modem_response_body->local_time), &time, sizeof(struct timeval)); memcpy(dm_modem_response + start_flag_length + dm_modem_response_header_length, dm_modem_response_body, dm_modem_response_body_length); memcpy(dm_modem_response + start_flag_length + dm_modem_response_header_length + dm_modem_response_body_length, buf, count); dm_modem_response_length = dm_modem_response_header->dm_router_size + start_flag_length; /* make end flag */ memcpy(dm_modem_response + dm_modem_response_length, &dm_rx_end_flag, sizeof(dm_rx_end_flag)); end_flag_length = sizeof(dm_rx_end_flag); dm_modem_response_length = dm_modem_response_length + end_flag_length; /* send modem_response packet to DM router */ total_push = 0; left = dm_modem_response_length; do { num_push = tty_insert_flip_string(lge_dm_tty_drv->tty_str, dm_modem_response + total_push, left); total_push += num_push; left -= num_push; tty_flip_buffer_push(lge_dm_tty_drv->tty_str); } while (left != 0); return total_push; }
//----------------------------------------------------------------------------- void _gtpusp_print_hex_octets(unsigned char* data_pP, unsigned short sizeP) { //----------------------------------------------------------------------------- unsigned long octet_index = 0; unsigned long buffer_marker = 0; unsigned char aindex; struct timeval tv; char timeofday[64]; unsigned int h,m,s; if (data_pP == NULL) { return; } if (sizeP > 2000) { return; } do_gettimeofday(&tv); h = (tv.tv_sec/3600) % 24; m = (tv.tv_sec / 60) % 60; s = tv.tv_sec % 60; snprintf(timeofday, 64, "%02d:%02d:%02d.%06ld", h,m,s,tv.tv_usec); buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker,"%s------+-------------------------------------------------+\n",timeofday); buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker,"%s | 0 1 2 3 4 5 6 7 8 9 a b c d e f |\n",timeofday); buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker,"%s------+-------------------------------------------------+\n",timeofday); pr_info("%s",_gtpusp_print_buffer); buffer_marker = 0; for (octet_index = 0; octet_index < sizeP; octet_index++) { if ((octet_index % 16) == 0) { if (octet_index != 0) { buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker, " |\n"); pr_info("%s",_gtpusp_print_buffer); buffer_marker = 0; } buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker, "%s %04ld |",timeofday, octet_index); } /* * Print every single octet in hexadecimal form */ buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker, " %02x", data_pP[octet_index]); /* * Align newline and pipes according to the octets in groups of 2 */ } /* * Append enough spaces and put final pipe */ for (aindex = octet_index; aindex < 16; ++aindex) buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker, " "); //SGI_IF_DEBUG(" "); buffer_marker+=snprintf(&_gtpusp_print_buffer[buffer_marker], GTPUSP_2_PRINT_BUFFER_LEN - buffer_marker, " |\n"); pr_info("%s",_gtpusp_print_buffer); }
static long int get_current_time_us(void) { struct timeval t; do_gettimeofday(&t); return (t.tv_sec & 0xFFF) * 1000000 + t.tv_usec; }
/* adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ int do_adjtimex(struct timex *txc) { long mtemp, save_adjust, rem; s64 freq_adj, temp64; int result; /* In order to modify anything, you gotta be super-user! */ if (txc->modes && !capable(CAP_SYS_TIME)) return -EPERM; /* Now we validate the data before disabling interrupts */ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) /* singleshot must not be used with any other mode bits */ if (txc->modes != ADJ_OFFSET_SINGLESHOT) return -EINVAL; if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) /* adjustment Offset limited to +- .512 seconds */ if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) return -EINVAL; /* if the quartz is off by more than 10% something is VERY wrong ! */ if (txc->modes & ADJ_TICK) if (txc->tick < 900000/USER_HZ || txc->tick > 1100000/USER_HZ) return -EINVAL; write_seqlock_irq(&xtime_lock); result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ save_adjust = time_adjust; #if 0 /* STA_CLOCKERR is never set yet */ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ #endif /* If there are input parameters, then process them */ if (txc->modes) { if (txc->modes & ADJ_STATUS) /* only set allowed bits */ time_status = (txc->status & ~STA_RONLY) | (time_status & STA_RONLY); if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { result = -EINVAL; goto leave; } time_freq = ((s64)txc->freq * NSEC_PER_USEC) >> (SHIFT_USEC - SHIFT_NSEC); } if (txc->modes & ADJ_MAXERROR) { if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_maxerror = txc->maxerror; } if (txc->modes & ADJ_ESTERROR) { if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_esterror = txc->esterror; } if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ if (txc->constant < 0) { /* NTP v4 uses values > 6 */ result = -EINVAL; goto leave; } time_constant = min(txc->constant + 4, (long)MAXTC); } if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ if (txc->modes == ADJ_OFFSET_SINGLESHOT) { /* adjtime() is independent from ntp_adjtime() */ time_adjust = txc->offset; } else if (time_status & STA_PLL) { time_offset = txc->offset * NSEC_PER_USEC; /* * Scale the phase adjustment and * clamp to the operating range. */ time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); /* * Select whether the frequency is to be controlled * and in which mode (PLL or FLL). Clamp to the operating * range. Ugly multiply/divide should be replaced someday. */ if (time_status & STA_FREQHOLD || time_reftime == 0) time_reftime = xtime.tv_sec; mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; freq_adj = time_offset * mtemp; freq_adj = shift_right(freq_adj, time_constant * 2 + (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { u64 utemp64; temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL); if (time_offset < 0) { utemp64 = -temp64; do_div(utemp64, mtemp); freq_adj -= utemp64; } else { utemp64 = temp64; do_div(utemp64, mtemp); freq_adj += utemp64; } } freq_adj += time_freq; freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); time_offset = div_long_long_rem_signed(time_offset, NTP_INTERVAL_FREQ, &rem); time_offset <<= SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) tick_usec = txc->tick; if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) ntp_update_frequency(); } /* txc->modes */ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) txc->offset = save_adjust; else txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * NTP_INTERVAL_FREQ / 1000; txc->freq = (time_freq / NSEC_PER_USEC) << (SHIFT_USEC - SHIFT_NSEC); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; txc->constant = time_constant; txc->precision = 1; txc->tolerance = MAXFREQ; txc->tick = tick_usec; /* PPS is not implemented, so these are zero */ txc->ppsfreq = 0; txc->jitter = 0; txc->shift = 0; txc->stabil = 0; txc->jitcnt = 0; txc->calcnt = 0; txc->errcnt = 0; txc->stbcnt = 0; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); notify_arch_cmos_timer(); return(result); }
/* Modem_response command */ static int lge_dm_tty_modem_response(struct dm_tty *lge_dm_tty_drv, short modem_number, const unsigned char *buf, int count) { int num_push = 0; int left = 0; int total_push = 0; struct timeval time; int start_flag_length; int end_flag_length; if ((count == 0) || (buf == NULL)) return 0; if(lge_dm_tty_drv->logging_mode == DM_APP_SDM) { /* make start flag */ memcpy(dm_modem_response, &dm_rx_start_flag, sizeof(dm_rx_start_flag)); start_flag_length = sizeof(dm_rx_start_flag); /* make header */ dm_modem_response_header->dm_router_size = dm_modem_response_header_length + dm_modem_response_body_length + count; memcpy(dm_modem_response + start_flag_length, dm_modem_response_header, dm_modem_response_header_length); /* make body */ dm_modem_response_body->modem_chip = modem_number; do_gettimeofday(&time); memcpy(&(dm_modem_response_body->local_time), &time, sizeof(struct timeval)); memcpy(dm_modem_response + start_flag_length + dm_modem_response_header_length, dm_modem_response_body, dm_modem_response_body_length); if(buf != NULL){ memcpy(dm_modem_response + start_flag_length + dm_modem_response_header_length + dm_modem_response_body_length, buf, count); }else{ printk("[DM_APP]buf is null!\n"); } dm_modem_response_length = dm_modem_response_header->dm_router_size + start_flag_length; /* make end flag */ memcpy(dm_modem_response + dm_modem_response_length, &dm_rx_end_flag, sizeof(dm_rx_end_flag)); end_flag_length = sizeof(dm_rx_end_flag); dm_modem_response_length = dm_modem_response_length + end_flag_length; /* send modem_response packet to DM router */ total_push = 0; left = dm_modem_response_length; do { #ifndef CONFIG_TTY_PORT num_push = tty_insert_flip_string(lge_dm_tty_drv->tty_str, dm_modem_response + total_push, left); #else num_push = tty_insert_flip_string(&dm_tty_port, dm_modem_response + total_push, left); #endif /* CONFIG_TTY_PORT */ total_push += num_push; left -= num_push; #ifndef CONFIG_TTY_PORT tty_flip_buffer_push(lge_dm_tty_drv->tty_str); #else tty_flip_buffer_push(&dm_tty_port); #endif /* CONFIG_TTY_PORT */ } while (left != 0); } else if(lge_dm_tty_drv->logging_mode == DM_APP_ODM) { total_push = 0; left = count; do { if (buf == NULL) return total_push; #ifndef CONFIG_TTY_PORT num_push = tty_insert_flip_string(lge_dm_tty_drv->tty_str, buf + total_push, left); #else num_push = tty_insert_flip_string(&dm_tty_port, buf + total_push, left); #endif /* CONFIG_TTY_PORT */ total_push += num_push; left -= num_push; #ifndef CONFIG_TTY_PORT tty_flip_buffer_push(lge_dm_tty_drv->tty_str); #else tty_flip_buffer_push(&dm_tty_port); #endif /* CONFIG_TTY_PORT */ } while (left != 0); } return total_push; }
static void pm8xxx_vib_enable(struct timed_output_dev *dev, int value) { struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib, timed_dev); unsigned long flags; /* */ int origin_value; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER struct timeval current_tv; struct timeval interval_tv; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE int over_ms = vib->overdrive_ms; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT spin_lock_irqsave(&vib->lock, flags); if (value == 0 && vib->pre_value <= vib->min_timeout_ms) { spin_unlock_irqrestore(&vib->lock, flags); return; } spin_unlock_irqrestore(&vib->lock, flags); #endif /* */ if(unlikely(debug_mask)) printk(KERN_INFO "pm8xxx_vib_enable value:%d\n",value); retry: spin_lock_irqsave(&vib->lock, flags); if (hrtimer_try_to_cancel(&vib->vib_timer) < 0) { spin_unlock_irqrestore(&vib->lock, flags); cpu_relax(); goto retry; } #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE if (hrtimer_try_to_cancel(&vib->vib_overdrive_timer) < 0) { spin_unlock_irqrestore(&vib->lock, flags); cpu_relax(); goto retry; } #endif /* */ origin_value = value; if (value == 0) vib->state = 0; else { /* Set Min Timeout for normal fuction */ #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT value = (value < vib->min_timeout_ms ? vib->min_timeout_ms : value); #endif value = (value > vib->pdata->max_timeout_ms ? vib->pdata->max_timeout_ms : value); vib->state = 1; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT vib->pre_value = value; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE if(vib->overdrive_ms > 0 && value <= vib->overdrive_range_ms) { vib->remain_vib_ms = value - over_ms; vib->level = vib->max_level_mv / 100; vib->active_level = vib->request_level; if(unlikely(debug_mask)) printk(KERN_INFO "start overdrive over_level:%d over_ms:%d \n",vib->level,over_ms); #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER do_gettimeofday(¤t_tv); if(vib->vib_state) { // vibrator is working now. struct timeval min_timeout_tv; min_timeout_tv.tv_sec = vib->min_timeout_ms / 1000; min_timeout_tv.tv_usec = (vib->min_timeout_ms % 1000) * 1000; get_timeval_interval(¤t_tv, &(vib->start_tv), &interval_tv); if(unlikely(debug_mask)) { printk(KERN_INFO "vib_state is true, cur:%ld.%06ld, sta:%ld.%06ld, itv:%ld.%06ld\n", current_tv.tv_sec, current_tv.tv_usec, vib->start_tv.tv_sec, vib->start_tv.tv_usec, interval_tv.tv_sec, interval_tv.tv_usec ); } // if greater than min_timeout, no need over drive and min time. if(compare_timeval_interval(&interval_tv, &min_timeout_tv)==1) { value = origin_value; if(unlikely(debug_mask)) printk(KERN_INFO "interval greater than min_timeout, start normal vib %dms\n",value); goto NORMAL_VIB_START; } // if less than min_timeout, need corrected value else { int interval_ms; interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000000); if(over_ms > interval_ms) { over_ms = over_ms - interval_ms; vib->remain_vib_ms = origin_value; if(unlikely(debug_mask)) printk(KERN_INFO "interval less than min_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } else { value = value - interval_ms; if(unlikely(debug_mask)) printk(KERN_INFO "interval less than min_timeout, start normal vib %dms\n",value); goto NORMAL_VIB_START; } } } else { // vibrator is not working now. struct timeval min_stop_tv; min_stop_tv.tv_sec = vib->min_stop_ms / 1000; min_stop_tv.tv_usec = (vib->min_stop_ms % 1000) * 1000; get_timeval_interval(¤t_tv, &(vib->stop_tv), &interval_tv); if(unlikely(debug_mask)) { printk(KERN_INFO "vib_state is false, cur:%ld.%06ld, sto:%ld.%06ld, itv:%ld.%06ld\n", current_tv.tv_sec, current_tv.tv_usec, vib->stop_tv.tv_sec, vib->stop_tv.tv_usec, interval_tv.tv_sec, interval_tv.tv_usec ); } // if greater than min_stop_tv, start vibration over drive and value. if(compare_timeval_interval(&interval_tv, &min_stop_tv)==1) { if(unlikely(debug_mask)) printk(KERN_INFO "greater than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } // if less than min_stop_tv, reduce over drive time. else { int interval_ms; interval_ms = (interval_tv.tv_sec * 1000) + (interval_tv.tv_usec / 1000); over_ms = interval_ms / (vib->min_stop_ms / vib->overdrive_ms) / 2; vib->remain_vib_ms = (value - over_ms) / 2; if(unlikely(debug_mask)) printk(KERN_INFO "less than min_stop_timeout, start overdrive %dms, remain %dms\n", over_ms, vib->remain_vib_ms); goto OVERDRIVE_VIB_START; } } #else goto OVERDRIVE_VIB_START; #endif } else #endif { goto NORMAL_VIB_START; } } NORMAL_VIB_START: #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL vib->level = vib->request_level; #else vib->level = vib->default_level; #endif hrtimer_start(&vib->vib_timer, ktime_set(value / 1000, (value % 1000) * 1000000), HRTIMER_MODE_REL); goto FINISH_VIB_ENABLE; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE OVERDRIVE_VIB_START: hrtimer_start(&vib->vib_overdrive_timer, ktime_set(over_ms / 1000, (over_ms % 1000) * 1000000), HRTIMER_MODE_REL); #endif FINISH_VIB_ENABLE: spin_unlock_irqrestore(&vib->lock, flags); schedule_work(&vib->work); }
/****************************************************************************** * * cpia2_usb_complete * * callback when incoming packet is received *****************************************************************************/ static void cpia2_usb_complete(struct urb *urb) { int i; unsigned char *cdata; static int frame_ready = false; struct camera_data *cam = (struct camera_data *) urb->context; if (urb->status!=0) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { DBG("urb->status = %d!\n", urb->status); } DBG("Stopping streaming\n"); return; } if (!cam->streaming || !cam->present || cam->open_count == 0) { LOG("Will now stop the streaming: streaming = %d, " "present=%d, open_count=%d\n", cam->streaming, cam->present, cam->open_count); return; } /*** * Packet collater ***/ //DBG("Collating %d packets\n", urb->number_of_packets); for (i = 0; i < urb->number_of_packets; i++) { u16 checksum, iso_checksum; int j; int n = urb->iso_frame_desc[i].actual_length; int st = urb->iso_frame_desc[i].status; if(cam->workbuff->status == FRAME_READY) { struct framebuf *ptr; /* Try to find an available buffer */ DBG("workbuff full, searching\n"); for (ptr = cam->workbuff->next; ptr != cam->workbuff; ptr = ptr->next) { if (ptr->status == FRAME_EMPTY) { ptr->status = FRAME_READING; ptr->length = 0; break; } } if (ptr == cam->workbuff) break; /* No READING or EMPTY buffers left */ cam->workbuff = ptr; } if (cam->workbuff->status == FRAME_EMPTY || cam->workbuff->status == FRAME_ERROR) { cam->workbuff->status = FRAME_READING; cam->workbuff->length = 0; } //DBG(" Packet %d length = %d, status = %d\n", i, n, st); cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (st) { LOG("cpia2 data error: [%d] len=%d, status = %d\n", i, n, st); if(!ALLOW_CORRUPT) cam->workbuff->status = FRAME_ERROR; continue; } if(n<=2) continue; checksum = 0; for(j=0; j<n-2; ++j) checksum += cdata[j]; iso_checksum = cdata[j] + cdata[j+1]*256; if(checksum != iso_checksum) { LOG("checksum mismatch: [%d] len=%d, calculated = %x, checksum = %x\n", i, n, (int)checksum, (int)iso_checksum); if(!ALLOW_CORRUPT) { cam->workbuff->status = FRAME_ERROR; continue; } } n -= 2; if(cam->workbuff->status != FRAME_READING) { if((0xFF == cdata[0] && 0xD8 == cdata[1]) || (0xD8 == cdata[0] && 0xFF == cdata[1] && 0 != cdata[2])) { /* frame is skipped, but increment total * frame count anyway */ cam->frame_count++; } DBG("workbuff not reading, status=%d\n", cam->workbuff->status); continue; } if (cam->frame_size < cam->workbuff->length + n) { ERR("buffer overflow! length: %d, n: %d\n", cam->workbuff->length, n); cam->workbuff->status = FRAME_ERROR; if(cam->workbuff->length > cam->workbuff->max_length) cam->workbuff->max_length = cam->workbuff->length; continue; } if (cam->workbuff->length == 0) { int data_offset; if ((0xD8 == cdata[0]) && (0xFF == cdata[1])) { data_offset = 1; } else if((0xFF == cdata[0]) && (0xD8 == cdata[1]) && (0xFF == cdata[2])) { data_offset = 2; } else { DBG("Ignoring packet, not beginning!\n"); continue; } DBG("Start of frame pattern found\n"); do_gettimeofday(&cam->workbuff->timestamp); cam->workbuff->seq = cam->frame_count++; cam->workbuff->data[0] = 0xFF; cam->workbuff->data[1] = 0xD8; cam->workbuff->length = 2; add_APPn(cam); add_COM(cam); memcpy(cam->workbuff->data+cam->workbuff->length, cdata+data_offset, n-data_offset); cam->workbuff->length += n-data_offset; } else if (cam->workbuff->length > 0) { memcpy(cam->workbuff->data + cam->workbuff->length, cdata, n); cam->workbuff->length += n; } if ((cam->workbuff->length >= 3) && (cam->workbuff->data[cam->workbuff->length - 3] == 0xFF) && (cam->workbuff->data[cam->workbuff->length - 2] == 0xD9) && (cam->workbuff->data[cam->workbuff->length - 1] == 0xFF)) { frame_ready = true; cam->workbuff->data[cam->workbuff->length - 1] = 0; cam->workbuff->length -= 1; } else if ((cam->workbuff->length >= 2) && (cam->workbuff->data[cam->workbuff->length - 2] == 0xFF) && (cam->workbuff->data[cam->workbuff->length - 1] == 0xD9)) { frame_ready = true; } if (frame_ready) { DBG("Workbuff image size = %d\n",cam->workbuff->length); process_frame(cam); frame_ready = false; if (waitqueue_active(&cam->wq_stream)) wake_up_interruptible(&cam->wq_stream); } } if(cam->streaming) { /* resubmit */ urb->dev = cam->dev; if ((i = usb_submit_urb(urb, GFP_ATOMIC)) != 0) ERR("%s: usb_submit_urb ret %d!\n", __func__, i); } }
void ssp_dump_task(struct work_struct *work) { #ifdef CONFIG_SENSORS_SSP_BBD pr_err("[SSPBBD]:TODO:%s()\n", __func__); #else struct ssp_big *big; struct file *dump_file; struct ssp_msg *msg; char *buffer; char strFilePath[60]; struct timeval cur_time; int iTimeTemp; mm_segment_t fs; int buf_len, packet_len, residue, iRet = 0, index = 0 ,iRetTrans=0 ,iRetWrite=0; big = container_of(work, struct ssp_big, work); pr_err("[SSP]: %s - start ssp dumping (%d)(%d)\n", __func__,big->data->bMcuDumpMode,big->data->uDumpCnt); big->data->uDumpCnt++; wake_lock(&big->data->ssp_wake_lock); fs = get_fs(); set_fs(get_ds()); if(big->data->bMcuDumpMode == true) { do_gettimeofday(&cur_time); iTimeTemp = (int) cur_time.tv_sec; sprintf(strFilePath, "%s%d.txt", DUMP_FILE_PATH, iTimeTemp); dump_file = filp_open(strFilePath, O_RDWR | O_CREAT | O_APPEND, 0666); if (IS_ERR(dump_file)) { pr_err("[SSP]: %s - Can't open dump file\n", __func__); set_fs(fs); iRet = PTR_ERR(dump_file); wake_unlock(&big->data->ssp_wake_lock); kfree(big); return; } } else dump_file = NULL; buf_len = big->length > DATA_PACKET_SIZE ? DATA_PACKET_SIZE : big->length; buffer = kzalloc(buf_len, GFP_KERNEL); residue = big->length; while (residue > 0) { packet_len = residue > DATA_PACKET_SIZE ? DATA_PACKET_SIZE : residue; msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = MSG2SSP_AP_GET_BIG_DATA; msg->length = packet_len; msg->options = AP2HUB_READ | (index++ << SSP_INDEX); msg->data = big->addr; msg->buffer = buffer; msg->free_buffer = 0; iRetTrans = ssp_spi_sync(big->data, msg, 1000); if (iRetTrans != SUCCESS) { pr_err("[SSP]: %s - Fail to receive data %d (%d)\n", __func__, iRetTrans,residue); break; } if(big->data->bMcuDumpMode == true) { iRetWrite = vfs_write(dump_file, (char __user *) buffer, packet_len, &dump_file->f_pos); if (iRetWrite < 0) { pr_err("[SSP]: %s - Can't write dump to file\n", __func__); break; } } residue -= packet_len; } if(big->data->bMcuDumpMode == true && (iRetTrans != SUCCESS || iRetWrite < 0) ) { char FAILSTRING[100]; sprintf(FAILSTRING,"FAIL OCCURED(%d)(%d)(%d)",iRetTrans,iRetWrite,big->length); vfs_write(dump_file, (char __user *) FAILSTRING, strlen(FAILSTRING),&dump_file->f_pos); } big->data->bDumping = false; if(big->data->bMcuDumpMode == true) filp_close(dump_file, current->files); set_fs(fs); wake_unlock(&big->data->ssp_wake_lock); kfree(buffer); kfree(big); pr_err("[SSP]: %s done\n", __func__); #endif }
static void bfin_serial_rx_chars(struct bfin_serial_port *uart) { struct tty_struct *tty = NULL; unsigned int status, ch, flg; static struct timeval anomaly_start = { .tv_sec = 0 }; status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); ch = UART_GET_CHAR(uart); uart->port.icount.rx++; #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) if (kgdb_connected && kgdboc_port_line == uart->port.line) if (ch == 0x3) {/* Ctrl + C */ kgdb_breakpoint(); return; } if (!uart->port.info || !uart->port.info->port.tty) return; #endif tty = uart->port.info->port.tty; if (ANOMALY_05000363) { /* The BF533 (and BF561) family of processors have a nice anomaly * where they continuously generate characters for a "single" break. * We have to basically ignore this flood until the "next" valid * character comes across. Due to the nature of the flood, it is * not possible to reliably catch bytes that are sent too quickly * after this break. So application code talking to the Blackfin * which sends a break signal must allow at least 1.5 character * times after the end of the break for things to stabilize. This * timeout was picked as it must absolutely be larger than 1 * character time +/- some percent. So 1.5 sounds good. All other * Blackfin families operate properly. Woo. */ if (anomaly_start.tv_sec) { struct timeval curr; suseconds_t usecs; if ((~ch & (~ch + 1)) & 0xff) goto known_good_char; do_gettimeofday(&curr); if (curr.tv_sec - anomaly_start.tv_sec > 1) goto known_good_char; usecs = 0; if (curr.tv_sec != anomaly_start.tv_sec) usecs += USEC_PER_SEC; usecs += curr.tv_usec - anomaly_start.tv_usec; if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) goto known_good_char; if (ch) anomaly_start.tv_sec = 0; else anomaly_start = curr; return; known_good_char: status &= ~BI; anomaly_start.tv_sec = 0; } } if (status & BI) { if (ANOMALY_05000363) if (bfin_revid() < 5) do_gettimeofday(&anomaly_start); uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto ignore_char; status &= ~(PE | FE); } if (status & PE) uart->port.icount.parity++; if (status & OE) uart->port.icount.overrun++; if (status & FE) uart->port.icount.frame++; status &= uart->port.read_status_mask; if (status & BI) flg = TTY_BREAK; else if (status & PE) flg = TTY_PARITY; else if (status & FE) flg = TTY_FRAME; else flg = TTY_NORMAL; if (uart_handle_sysrq_char(&uart->port, ch)) goto ignore_char; uart_insert_char(&uart->port, status, OE, ch, flg); ignore_char: tty_flip_buffer_push(tty); } static void bfin_serial_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.info->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { #ifdef CONFIG_BF54x /* Clear TFI bit */ UART_PUT_LSR(uart, TFI); #endif UART_CLEAR_IER(uart, ETBEI); return; } if (uart->port.x_char) { UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; } while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uart->port.icount.tx++; SSYNC(); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); } static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; spin_lock(&uart->port.lock); while (UART_GET_LSR(uart) & DR) bfin_serial_rx_chars(uart); spin_unlock(&uart->port.lock); return IRQ_HANDLED; } static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif spin_lock(&uart->port.lock); if (UART_GET_LSR(uart) & THRE) bfin_serial_tx_chars(uart); spin_unlock(&uart->port.lock); return IRQ_HANDLED; } #endif #ifdef CONFIG_SERIAL_BFIN_DMA static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.info->xmit; uart->tx_done = 0; if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { uart->tx_count = 0; uart->tx_done = 1; return; } if (uart->port.x_char) { UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; } uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) uart->tx_count = UART_XMIT_SIZE - xmit->tail; blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail), (unsigned long)(xmit->buf+xmit->tail+uart->tx_count)); set_dma_config(uart->tx_dma_channel, set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); set_dma_x_count(uart->tx_dma_channel, uart->tx_count); set_dma_x_modify(uart->tx_dma_channel, 1); enable_dma(uart->tx_dma_channel); UART_SET_IER(uart, ETBEI); } static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) { struct tty_struct *tty = uart->port.info->port.tty; int i, flg, status; status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE); if (status & BI) { uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto dma_ignore_char; status &= ~(PE | FE); } if (status & PE) uart->port.icount.parity++; if (status & OE) uart->port.icount.overrun++; if (status & FE) uart->port.icount.frame++; status &= uart->port.read_status_mask; if (status & BI) flg = TTY_BREAK; else if (status & PE) flg = TTY_PARITY; else if (status & FE) flg = TTY_FRAME; else flg = TTY_NORMAL; for (i = uart->rx_dma_buf.tail; ; i++) { if (i >= UART_XMIT_SIZE) i = 0; if (i == uart->rx_dma_buf.head) break; if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); } dma_ignore_char: tty_flip_buffer_push(tty); } void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; unsigned long flags; spin_lock_irqsave(&uart->port.lock, flags); uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); x_pos = get_dma_curr_xcount(uart->rx_dma_channel); uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; if (uart->rx_dma_nrows == DMA_RX_YCOUNT) uart->rx_dma_nrows = 0; x_pos = DMA_RX_XCOUNT - x_pos; if (x_pos == DMA_RX_XCOUNT) x_pos = 0; pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; if (pos != uart->rx_dma_buf.tail) { uart->rx_dma_buf.head = pos; bfin_serial_dma_rx_chars(uart); uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } spin_unlock_irqrestore(&uart->port.lock, flags); mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); }
static int kingsun_net_open(struct net_device *netdev) { struct kingsun_cb *kingsun = netdev_priv(netdev); int err = -ENOMEM; char hwname[16]; kingsun->receiving = 0; kingsun->rx_buff.in_frame = FALSE; kingsun->rx_buff.state = OUTSIDE_FRAME; kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU; kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!kingsun->rx_buff.skb) goto free_mem; skb_reserve(kingsun->rx_buff.skb, 1); kingsun->rx_buff.head = kingsun->rx_buff.skb->data; do_gettimeofday(&kingsun->rx_time); kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->rx_urb) goto free_mem; kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->tx_urb) goto free_mem; sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { err("kingsun-sir: irlap_open failed"); goto free_mem; } usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev, usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in), kingsun->in_buf, kingsun->max_rx, kingsun_rcv_irq, kingsun, 1); kingsun->rx_urb->status = 0; err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); if (err) { err("kingsun-sir: first urb-submit failed: %d", err); goto close_irlap; } netif_start_queue(netdev); return 0; close_irlap: irlap_close(kingsun->irlap); free_mem: if (kingsun->tx_urb) { usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; } if (kingsun->rx_urb) { usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; } if (kingsun->rx_buff.skb) { kfree_skb(kingsun->rx_buff.skb); kingsun->rx_buff.skb = NULL; kingsun->rx_buff.head = NULL; } return err; }
/* format_corename will inspect the pattern parameter, and output a * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ static int format_corename(struct core_name *cn, struct coredump_params *cprm) { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); int pid_in_pattern = 0; int err = 0; cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; if (!cn->corename) return -ENOMEM; /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { if (*pat_ptr != '%') { if (*pat_ptr == 0) goto out; err = cn_printf(cn, "%c", *pat_ptr++); } else { switch (*++pat_ptr) { /* single % at the end, drop that */ case 0: goto out; /* Double percent, output one percent */ case '%': err = cn_printf(cn, "%c", '%'); break; /* pid */ case 'p': pid_in_pattern = 1; err = cn_printf(cn, "%d", task_tgid_vnr(current)); break; /* uid */ case 'u': err = cn_printf(cn, "%d", cred->uid); break; /* gid */ case 'g': err = cn_printf(cn, "%d", cred->gid); break; case 'd': err = cn_printf(cn, "%d", __get_dumpable(cprm->mm_flags)); break; /* signal that caused the coredump */ case 's': err = cn_printf(cn, "%ld", cprm->siginfo->si_signo); break; /* UNIX time of coredump */ case 't': { struct timeval tv; do_gettimeofday(&tv); err = cn_printf(cn, "%lu", tv.tv_sec); break; } /* hostname */ case 'h': { char *namestart = cn->corename + cn->used; down_read(&uts_sem); err = cn_printf(cn, "%s", utsname()->nodename); up_read(&uts_sem); cn_escape(namestart); break; } /* executable */ case 'e': { char *commstart = cn->corename + cn->used; err = cn_printf(cn, "%s", current->comm); cn_escape(commstart); break; } case 'E': err = cn_print_exe_file(cn); break; /* core limit size */ case 'c': err = cn_printf(cn, "%lu", rlimit(RLIMIT_CORE)); break; default: break; } ++pat_ptr; } if (err) return err; } /* Backward compatibility with core_uses_pid: * * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ if (!ispipe && !pid_in_pattern && core_uses_pid) { err = cn_printf(cn, ".%d", task_tgid_vnr(current)); if (err) return err; } out: return ispipe; }
/* * RTCC thread entry */ static int rtcc_thread(void * nothing) { unsigned long nr_to_reclaim, nr_reclaimed, nr_swapped; #if RTCC_DBG unsigned long dt; struct timeval tv1, tv2; #endif set_freezable(); for ( ; ; ) { try_to_freeze(); if (kthread_should_stop()) break; if (likely(atomic_read(&krtccd_running) == 1)) { #if RTCC_DBG do_gettimeofday(&tv1); #endif swap_toplimit = get_swapped_pages() + get_anon_pages() / 2; swap_toplimit = min(swap_toplimit, total_swap_pages); nr_to_reclaim = get_reclaim_count(); nr_swapped = 0; nr_reclaimed = rtcc_reclaim_pages(nr_to_reclaim, 200, &nr_swapped); nr_krtccd_swapped += nr_swapped; printk("reclaimed %ld (swapped %ld) pages.\n", nr_reclaimed, nr_swapped); if (likely(rtcc_boost_mode == 0)) { if (get_rtcc_grade() <= 0) { // If free memory is enough, cancel reclaim atomic_set(&need_to_reclaim, 0); } else if ((swap_toplimit - get_swapped_pages()) <= rtcc_grade[RTCC_GRADE_NUM-2]) { // If swap space is more than anon, also cancel reclaim atomic_set(&need_to_reclaim, 0); } } else if (get_anon_pages() < swap_toplimit / 4) { rtcc_boost_mode = 0; printk("swapped %ldMB enough, exit boost mode.\n", get_swapped_pages()/256); } else if (time_after(jiffies, boost_end_jiffy)) { rtcc_boost_mode = 0; printk("time out, swapped %ldMB, exit boost mode.\n", get_swapped_pages()/256); } atomic_set(&krtccd_running, 0); #if RTCC_DBG do_gettimeofday(&tv2); dt = tv2.tv_sec*1000000 + tv2.tv_usec - tv1.tv_sec*1000000 - tv1.tv_usec; printk("cost %ldms, %ldus one page, ", dt/1000, dt/nr_reclaimed); #endif } set_current_state(TASK_INTERRUPTIBLE); schedule(); } return 0; }
int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) { int l; unsigned char * iph; unsigned char * optptr; int optlen; unsigned char * pp_ptr = NULL; struct rtable *rt = skb ? (struct rtable*)skb->dst : NULL; if (!opt) { opt = &(IPCB(skb)->opt); memset(opt, 0, sizeof(struct ip_options)); iph = skb->nh.raw; opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); optptr = iph + sizeof(struct iphdr); opt->is_data = 0; } else { optptr = opt->is_data ? opt->__data : (unsigned char*)&(skb->nh.iph[1]); iph = optptr - sizeof(struct iphdr); } for (l = opt->optlen; l > 0; ) { switch (*optptr) { case IPOPT_END: for (optptr++, l--; l>0; l--) { if (*optptr != IPOPT_END) { *optptr = IPOPT_END; opt->is_changed = 1; } } goto eol; case IPOPT_NOOP: l--; optptr++; continue; } if (l < 2) { pp_ptr = optptr; goto error; } optlen = optptr[1]; if (optlen<2 || optlen>l) { pp_ptr = optptr; goto error; } switch (*optptr) { case IPOPT_SSRR: case IPOPT_LSRR: if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } /* NB: cf RFC-1812 5.2.4.1 */ if (opt->srr) { pp_ptr = optptr; goto error; } if (!skb) { if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) { pp_ptr = optptr + 1; goto error; } memcpy(&opt->faddr, &optptr[3], 4); if (optlen > 7) memmove(&optptr[3], &optptr[7], optlen-7); } opt->is_strictroute = (optptr[0] == IPOPT_SSRR); opt->srr = optptr - iph; break; case IPOPT_RR: if (opt->rr) { pp_ptr = optptr; goto error; } if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { if (optptr[2]+3 > optlen) { pp_ptr = optptr + 2; goto error; } if (skb) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); opt->is_changed = 1; } optptr[2] += 4; opt->rr_needaddr = 1; } opt->rr = optptr - iph; break; case IPOPT_TIMESTAMP: if (opt->ts) { pp_ptr = optptr; goto error; } if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 5) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { __u32 * timeptr = NULL; if (optptr[2]+3 > optptr[1]) { pp_ptr = optptr + 2; goto error; } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: opt->ts = optptr - iph; if (skb) timeptr = (__u32*)&optptr[optptr[2]-1]; opt->ts_needtime = 1; optptr[2] += 4; break; case IPOPT_TS_TSANDADDR: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; if (skb) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); timeptr = (__u32*)&optptr[optptr[2]+3]; } opt->ts_needaddr = 1; opt->ts_needtime = 1; optptr[2] += 8; break; case IPOPT_TS_PRESPEC: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; { u32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); if (inet_addr_type(addr) == RTN_UNICAST) break; if (skb) timeptr = (__u32*)&optptr[optptr[2]+3]; } opt->ts_needtime = 1; optptr[2] += 8; break; default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr + 3; goto error; } break; } if (timeptr) { struct timeval tv; __u32 midtime; do_gettimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); memcpy(timeptr, &midtime, sizeof(__u32)); opt->is_changed = 1; } } else { unsigned overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; } opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } break; case IPOPT_RA: if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] == 0 && optptr[3] == 0) opt->router_alert = optptr - iph; break; case IPOPT_SEC: case IPOPT_CIPSO: case IPOPT_SID: if (security_ip_decode_options(skb, optptr, &pp_ptr)) goto error; break; default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr; goto error; } break; } l -= optlen; optptr += optlen; } eol: if (!pp_ptr) return 0; error: if (skb) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24)); } return -EINVAL; }
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; char suspend_abort[MAX_SUSPEND_ABORT_LEN]; do_gettimeofday(&start); end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { pm_get_active_wakeup_sources(suspend_abort, MAX_SUSPEND_ABORT_LEN); log_suspend_abort_reason(suspend_abort); wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (wakeup) { printk("\n"); printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds", elapsed_msecs / 1000, elapsed_msecs % 1000); } else if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds" " (%d tasks refusing to freeze, wq_busy=%d):\n", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
/* static int cypress_touchkey_led_on(struct cypress_touchkey_info *info) { u8 buf = CYPRESS_LED_ON; int ret; ret = i2c_smbus_write_byte_data(info->client, CYPRESS_GEN, buf); return ret; } static int cypress_touchkey_led_off(struct cypress_touchkey_info *info) { u8 buf = CYPRESS_LED_OFF; int ret; ret = i2c_smbus_write_byte_data(info->client, CYPRESS_GEN, buf); return ret; } */ static int __devinit cypress_touchkey_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct input_dev *input_dev; int ret = 0; int i; u8 buf[2] = {0,}; #ifdef TOUCHKEY_UPDATE_ONBOOT u8 mod_ver; u8 fw_ver; #endif if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -EIO; info = kzalloc(sizeof(struct cypress_touchkey_info), GFP_KERNEL); if (!info) { dev_err(&client->dev, "fail to memory allocation.\n"); goto err_mem_alloc; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&client->dev, "fail to allocate input device.\n"); goto err_input_dev_alloc; } info->client = client; info->input_dev = input_dev; info->pdata = client->dev.platform_data; info->irq = client->irq; memcpy(info->keycode, cypress_touchkey_keycode, ARRAY_SIZE(cypress_touchkey_keycode)); snprintf(info->phys, sizeof(info->phys), "%s/input0", dev_name(&client->dev)); input_dev->name = "sec_touchkey"; input_dev->phys = info->phys; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; set_bit(EV_SYN, input_dev->evbit); set_bit(EV_KEY, input_dev->evbit); set_bit(EV_LED, input_dev->evbit); set_bit(LED_MISC, input_dev->ledbit); for (i = 0; i < ARRAY_SIZE(info->keycode); i++) { set_bit(info->keycode[i], input_dev->keybit); } input_set_drvdata(input_dev, info); ret = input_register_device(input_dev); if (ret) { dev_err(&client->dev, "failed to register input dev (%d).\n", ret); goto err_reg_input_dev; } do_gettimeofday(&info->start); i2c_set_clientdata(client, info); cypress_touchkey_con_hw(info, true); #ifdef TOUCHKEY_UPDATE_ONBOOT if (system_rev >= JANICE_R0_3) { mod_ver = JANICE_TOUCHKEY_HW03_MOD_VER; fw_ver = JANICE_TOUCHKEY_M_04_FW_VER; } else { mod_ver = JANICE_TOUCHKEY_HW02_MOD_VER; fw_ver = JANICE_TOUCHKEY_M_03_FW_VER; } #endif ret = i2c_smbus_read_i2c_block_data(info->client, CYPRESS_FW_VER, ARRAY_SIZE(buf), buf); if (ret != ARRAY_SIZE(buf)) dev_err(&client->dev, "failed to check FW ver.\n"); else { info->fw_ver = buf[0]; info->mod_ver = buf[1]; #if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP) #ifdef CONFIG_DEBUG_PRINTK printk(KERN_DEBUG "[TouchKey] %s : Mod Ver 0x%02x\n", __func__, info->mod_ver); #else ; #endif #ifdef CONFIG_DEBUG_PRINTK printk(KERN_DEBUG "[TouchKey] FW mod 0x%02x\n", info->fw_ver); #else ; #endif #endif #ifdef TOUCHKEY_UPDATE_ONBOOT if ((info->mod_ver == mod_ver) && (info->fw_ver < fw_ver)) touch_FW_update(); #endif } cypress_thd_change(vbus_state); cypress_touchkey_auto_cal(info); #ifdef CONFIG_HAS_EARLYSUSPEND info->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; info->early_suspend.suspend = cypress_touchkey_early_suspend; info->early_suspend.resume = cypress_touchkey_late_resume; register_early_suspend(&info->early_suspend); #endif /* CONFIG_HAS_EARLYSUSPEND */ info->key_wq = create_singlethread_workqueue("cypress_key_wq"); INIT_WORK(&info->key_work, cypress_touchkey_work); touchkey_wq = create_singlethread_workqueue("cypress_tsk_update_wq"); #ifdef CONFIG_LEDS_CLASS mutex_init(&info->touchkey_mutex); info->led_wq = create_singlethread_workqueue("cypress_touchkey"); INIT_WORK(&info->led_work, cypress_touchkey_led_work); info->leds.name = TOUCHKEY_BACKLIGHT; info->leds.brightness = LED_FULL; info->leds.max_brightness = LED_FULL; info->leds.brightness_set = cypress_touchkey_brightness_set; info->current_status = 1; ret = led_classdev_register(&client->dev, &info->leds); if (ret) { goto err_req_irq; } #endif ret = request_threaded_irq(client->irq, NULL, cypress_touchkey_interrupt, IRQF_TRIGGER_RISING, client->dev.driver->name, info); if (ret < 0) { dev_err(&client->dev, "Failed to request IRQ %d (err: %d).\n", client->irq, ret); goto err_req_irq; } FUNC_CALLED; return 0; err_req_irq: #ifdef CONFIG_LEDS_CLASS destroy_workqueue(info->led_wq); #endif destroy_workqueue(info->key_wq); destroy_workqueue(touchkey_wq); input_unregister_device(input_dev); input_dev = NULL; err_reg_input_dev: err_input_dev_alloc: input_free_device(input_dev); kfree(info); err_mem_alloc: return ret; }
static inline void stop_timing(void) { do_gettimeofday(&finish); }