static void psi_update_work(struct work_struct *work) { struct delayed_work *dwork; struct psi_group *group; bool nonidle; dwork = to_delayed_work(work); group = container_of(dwork, struct psi_group, clock_work); /* * If there is task activity, periodically fold the per-cpu * times and feed samples into the running averages. If things * are idle and there is no data to process, stop the clock. * Once restarted, we'll catch up the running averages in one * go - see calc_avgs() and missed_periods. */ nonidle = update_stats(group); if (nonidle) { unsigned long delay = 0; u64 now; now = sched_clock(); if (group->next_update > now) delay = nsecs_to_jiffies(group->next_update - now) + 1; schedule_delayed_work(dwork, delay); } }
static ssize_t ak09911c_get_selftest(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0, dac_ret, adc_ret; int sf_ret, sf[3] = {0,}; struct ak09911c_v mag; struct ak09911c_p *data = dev_get_drvdata(dev); if (atomic_read(&data->enable) == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_POWERDOWN); cancel_delayed_work_sync(&data->work); } sf_ret = ak09911c_selftest(data, &dac_ret, sf); adc_ret = ak09911c_read_mag_xyz(data, &mag); if (atomic_read(&data->enable) == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_SNG_MEASURE); schedule_delayed_work(&data->work, nsecs_to_jiffies(atomic_read(&data->delay))); } ret = sf_ret + dac_ret + adc_ret; return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", ret, sf_ret, sf[0], sf[1], sf[2], dac_ret, adc_ret, mag.x, mag.y, mag.z); }
static void cm3323_work_func_light(struct work_struct *work) { struct cm3323_p *data = container_of((struct delayed_work *)work, struct cm3323_p, work); unsigned long delay = nsecs_to_jiffies(atomic_read(&data->delay)); cm3323_i2c_read_word(data, REG_RED, &data->color[0]); cm3323_i2c_read_word(data, REG_GREEN, &data->color[1]); cm3323_i2c_read_word(data, REG_BLUE, &data->color[2]); cm3323_i2c_read_word(data, REG_WHITE, &data->color[3]); input_report_rel(data->input, REL_RED, data->color[0] + 1); input_report_rel(data->input, REL_GREEN, data->color[1] + 1); input_report_rel(data->input, REL_BLUE, data->color[2] + 1); input_report_rel(data->input, REL_WHITE, data->color[3] + 1); input_sync(data->input); if (((int64_t)atomic_read(&data->delay) * (int64_t)data->time_count) >= ((int64_t)LIGHT_LOG_TIME * NSEC_PER_SEC)) { pr_info("[SENSOR]: %s - r = %u g = %u b = %u w = %u\n", __func__, data->color[0], data->color[1], data->color[2], data->color[3]); data->time_count = 0; } else { data->time_count++; } schedule_delayed_work(&data->work, delay); }
/* * Set the timer */ void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, ktime_t now) { unsigned long t_j, now_j = jiffies; ktime_t t; bool queue = false; if (call->state < RXRPC_CALL_COMPLETE) { t = call->expire_at; if (!ktime_after(t, now)) { trace_rxrpc_timer(call, why, now, now_j); queue = true; goto out; } if (!ktime_after(call->resend_at, now)) { call->resend_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) queue = true; } else if (ktime_before(call->resend_at, t)) { t = call->resend_at; } if (!ktime_after(call->ack_at, now)) { call->ack_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) queue = true; } else if (ktime_before(call->ack_at, t)) { t = call->ack_at; } if (!ktime_after(call->ping_at, now)) { call->ping_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) queue = true; } else if (ktime_before(call->ping_at, t)) { t = call->ping_at; } t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); t_j += jiffies; /* We have to make sure that the calculated jiffies value falls * at or after the nsec value, or we may loop ceaselessly * because the timer times out, but we haven't reached the nsec * timeout yet. */ t_j++; if (call->timer.expires != t_j || !timer_pending(&call->timer)) { mod_timer(&call->timer, t_j); trace_rxrpc_timer(call, why, now, now_j); } } out: if (queue) rxrpc_queue_call(call); }
static int ak09911c_resume(struct device *dev) { struct ak09911c_p *data = dev_get_drvdata(dev); if (atomic_read(&data->enable) == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_SNG_MEASURE); schedule_delayed_work(&data->work, nsecs_to_jiffies(atomic_read(&data->delay))); } return 0; }
static ssize_t ak09911c_get_selftest(struct device *dev, struct device_attribute *attr, char *buf) { int status, dac_ret = -1, adc_ret = -1; int sf_ret, sf[3] = {0,}, retries; struct ak09911c_v mag; struct ak09911c_p *data = dev_get_drvdata(dev); /* STATUS */ if ((data->asa[0] == 0) | (data->asa[0] == 0xff) | (data->asa[1] == 0) | (data->asa[1] == 0xff) | (data->asa[2] == 0) | (data->asa[2] == 0xff)) status = -1; else status = 0; if (atomic_read(&data->enable) == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_POWERDOWN); cancel_delayed_work_sync(&data->work); } sf_ret = ak09911c_selftest(data, &dac_ret, sf); for (retries = 0; retries < 5; retries++) { if (ak09911c_read_mag_xyz(data, &mag) == 0) { if ((mag.x < 1600) && (mag.x > -1600) && (mag.y < 1600) && (mag.y > -1600) && (mag.z < 1600) && (mag.z > -1600)) adc_ret = 0; else pr_err("[SENSOR]: %s adc specout %d, %d, %d\n", __func__, mag.x, mag.y, mag.z); break; } msleep(20); pr_err("%s adc retries %d", __func__, retries); } if (atomic_read(&data->enable) == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_SNG_MEASURE); schedule_delayed_work(&data->work, nsecs_to_jiffies(atomic_read(&data->delay))); } return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", status, sf_ret, sf[0], sf[1], sf[2], dac_ret, adc_ret, mag.x, mag.y, mag.z); }
static void cm3323_work_func_light(struct work_struct *work) { struct cm3323_p *data = container_of((struct delayed_work *)work, struct cm3323_p, work); unsigned long delay = nsecs_to_jiffies(atomic_read(&data->delay)); cm3323_i2c_read_word(data, REG_RED, &data->color[0]); cm3323_i2c_read_word(data, REG_GREEN, &data->color[1]); cm3323_i2c_read_word(data, REG_BLUE, &data->color[2]); cm3323_i2c_read_word(data, REG_WHITE, &data->color[3]); input_report_rel(data->input, REL_RED, data->color[0] + 1); input_report_rel(data->input, REL_GREEN, data->color[1] + 1); input_report_rel(data->input, REL_BLUE, data->color[2] + 1); input_report_rel(data->input, REL_WHITE, data->color[3] + 1); input_sync(data->input); #ifdef CONFIG_SENSORS_ESD_DEFENCE if ((data->color[0] == 0) && (data->color[1] == 0) && (data->color[3] == 0) && (data->color[2] == 0) && (data->reset_cnt < 20)) data->zero_cnt++; else data->zero_cnt = 0; if (data->zero_cnt >= 25) { pr_info("[SENSOR]: %s - ESD Defence Reset!\n", __func__); cm3323_i2c_write(data, REG_CS_CONF1, als_reg_setting[1][1]); usleep_range(1000, 10000); cm3323_i2c_write(data, REG_CS_CONF1, als_reg_setting[0][1]); data->zero_cnt = 0; data->reset_cnt++; } #endif if (((int64_t)atomic_read(&data->delay) * (int64_t)data->time_count) >= ((int64_t)LIGHT_LOG_TIME * NSEC_PER_SEC)) { pr_info("[SENSOR]: %s - r = %u g = %u b = %u w = %u\n", __func__, data->color[0], data->color[1], data->color[2], data->color[3]); data->time_count = 0; } else { data->time_count++; } schedule_delayed_work(&data->work, delay); }
static void ak09911c_set_enable(struct ak09911c_p *data, int enable) { int pre_enable = atomic_read(&data->enable); if (enable) { if (pre_enable == 0) { ak09911c_ecs_set_mode(data, AK09911C_MODE_SNG_MEASURE); schedule_delayed_work(&data->work, nsecs_to_jiffies(atomic_read(&data->delay))); atomic_set(&data->enable, 1); } } else { if (pre_enable == 1) { ak09911c_ecs_set_mode(data, AK09911C_MODE_POWERDOWN); cancel_delayed_work_sync(&data->work); atomic_set(&data->enable, 0); } } }
static void ak09911c_work_func(struct work_struct *work) { struct ak09911c_v mag; struct ak09911c_p *data = container_of((struct delayed_work *)work, struct ak09911c_p, work); unsigned long delay = nsecs_to_jiffies(atomic_read(&data->delay)); int ret; ret = ak09911c_read_mag_xyz(data, &mag); if (ret >= 0) { input_report_rel(data->input, REL_X, mag.x); input_report_rel(data->input, REL_Y, mag.y); input_report_rel(data->input, REL_Z, mag.z); input_sync(data->input); data->magdata = mag; } schedule_delayed_work(&data->work, delay); }
/* called by the modify qp verb */ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { int err; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); union ib_gid sgid; struct ib_gid_attr sgid_attr; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { int max_dest_rd_atomic = __roundup_pow_of_two(attr->max_dest_rd_atomic); qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; free_rd_atomic_resources(qp); err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); if (err) return err; } if (mask & IB_QP_CUR_STATE) qp->attr.cur_qp_state = attr->qp_state; if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; if (mask & IB_QP_ACCESS_FLAGS) qp->attr.qp_access_flags = attr->qp_access_flags; if (mask & IB_QP_PKEY_INDEX) qp->attr.pkey_index = attr->pkey_index; if (mask & IB_QP_PORT) qp->attr.port_num = attr->port_num; if (mask & IB_QP_QKEY) qp->attr.qkey = attr->qkey; if (mask & IB_QP_AV) { ib_get_cached_gid(&rxe->ib_dev, 1, attr->ah_attr.grh.sgid_index, &sgid, &sgid_attr); rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av, &attr->ah_attr); rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, &sgid_attr, &sgid); if (sgid_attr.ndev) dev_put(sgid_attr.ndev); } if (mask & IB_QP_ALT_PATH) { ib_get_cached_gid(&rxe->ib_dev, 1, attr->alt_ah_attr.grh.sgid_index, &sgid, &sgid_attr); rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av, &attr->alt_ah_attr); rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, &sgid_attr, &sgid); if (sgid_attr.ndev) dev_put(sgid_attr.ndev); qp->attr.alt_port_num = attr->alt_port_num; qp->attr.alt_pkey_index = attr->alt_pkey_index; qp->attr.alt_timeout = attr->alt_timeout; } if (mask & IB_QP_PATH_MTU) { qp->attr.path_mtu = attr->path_mtu; qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); } if (mask & IB_QP_TIMEOUT) { qp->attr.timeout = attr->timeout; if (attr->timeout == 0) { qp->qp_timeout_jiffies = 0; } else { /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ int j = nsecs_to_jiffies(4096ULL << attr->timeout); qp->qp_timeout_jiffies = j ? j : 1; } } if (mask & IB_QP_RETRY_CNT) { qp->attr.retry_cnt = attr->retry_cnt; qp->comp.retry_cnt = attr->retry_cnt; pr_debug("qp#%d set retry count = %d\n", qp_num(qp), attr->retry_cnt); } if (mask & IB_QP_RNR_RETRY) { qp->attr.rnr_retry = attr->rnr_retry; qp->comp.rnr_retry = attr->rnr_retry; pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), attr->rnr_retry); } if (mask & IB_QP_RQ_PSN) { qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); qp->resp.psn = qp->attr.rq_psn; pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), qp->resp.psn); } if (mask & IB_QP_MIN_RNR_TIMER) { qp->attr.min_rnr_timer = attr->min_rnr_timer; pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), attr->min_rnr_timer); } if (mask & IB_QP_SQ_PSN) { qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); qp->req.psn = qp->attr.sq_psn; qp->comp.psn = qp->attr.sq_psn; pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); } if (mask & IB_QP_PATH_MIG_STATE) qp->attr.path_mig_state = attr->path_mig_state; if (mask & IB_QP_DEST_QPN) qp->attr.dest_qp_num = attr->dest_qp_num; if (mask & IB_QP_STATE) { qp->attr.qp_state = attr->qp_state; switch (attr->qp_state) { case IB_QPS_RESET: pr_debug("qp#%d state -> RESET\n", qp_num(qp)); rxe_qp_reset(qp); break; case IB_QPS_INIT: pr_debug("qp#%d state -> INIT\n", qp_num(qp)); qp->req.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT; break; case IB_QPS_RTR: pr_debug("qp#%d state -> RTR\n", qp_num(qp)); qp->resp.state = QP_STATE_READY; break; case IB_QPS_RTS: pr_debug("qp#%d state -> RTS\n", qp_num(qp)); qp->req.state = QP_STATE_READY; break; case IB_QPS_SQD: pr_debug("qp#%d state -> SQD\n", qp_num(qp)); rxe_qp_drain(qp); break; case IB_QPS_SQE: pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); /* Not possible from modify_qp. */ break; case IB_QPS_ERR: pr_debug("qp#%d state -> ERR\n", qp_num(qp)); rxe_qp_error(qp); break; } } return 0; }
static bool update_stats(struct psi_group *group) { u64 deltas[NR_PSI_STATES - 1] = { 0, }; unsigned long missed_periods = 0; unsigned long nonidle_total = 0; u64 now, expires, period; int cpu; int s; mutex_lock(&group->stat_lock); /* * Collect the per-cpu time buckets and average them into a * single time sample that is normalized to wallclock time. * * For averaging, each CPU is weighted by its non-idle time in * the sampling period. This eliminates artifacts from uneven * loading, or even entirely idle CPUs. */ for_each_possible_cpu(cpu) { u32 times[NR_PSI_STATES]; u32 nonidle; get_recent_times(group, cpu, times); nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); nonidle_total += nonidle; for (s = 0; s < PSI_NONIDLE; s++) deltas[s] += (u64)times[s] * nonidle; } /* * Integrate the sample into the running statistics that are * reported to userspace: the cumulative stall times and the * decaying averages. * * Pressure percentages are sampled at PSI_FREQ. We might be * called more often when the user polls more frequently than * that; we might be called less often when there is no task * activity, thus no data, and clock ticks are sporadic. The * below handles both. */ /* total= */ for (s = 0; s < NR_PSI_STATES - 1; s++) group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL)); /* avgX= */ now = sched_clock(); expires = group->next_update; if (now < expires) goto out; if (now - expires >= psi_period) missed_periods = div_u64(now - expires, psi_period); /* * The periodic clock tick can get delayed for various * reasons, especially on loaded systems. To avoid clock * drift, we schedule the clock in fixed psi_period intervals. * But the deltas we sample out of the per-cpu buckets above * are based on the actual time elapsing between clock ticks. */ group->next_update = expires + ((1 + missed_periods) * psi_period); period = now - (group->last_update + (missed_periods * psi_period)); group->last_update = now; for (s = 0; s < NR_PSI_STATES - 1; s++) { u32 sample; sample = group->total[s] - group->total_prev[s]; /* * Due to the lockless sampling of the time buckets, * recorded time deltas can slip into the next period, * which under full pressure can result in samples in * excess of the period length. * * We don't want to report non-sensical pressures in * excess of 100%, nor do we want to drop such events * on the floor. Instead we punt any overage into the * future until pressure subsides. By doing this we * don't underreport the occurring pressure curve, we * just report it delayed by one period length. * * The error isn't cumulative. As soon as another * delta slips from a period P to P+1, by definition * it frees up its time T in P. */ if (sample > period) sample = period; group->total_prev[s] += sample; calc_avgs(group->avg[s], missed_periods, sample, period); } out: mutex_unlock(&group->stat_lock); return nonidle_total; }
static irqreturn_t serial_ir_irq_handler(int i, void *blah) { ktime_t kt; int counter, dcd; u8 status; ktime_t delkt; unsigned int data; static int last_dcd = -1; if ((sinp(UART_IIR) & UART_IIR_NO_INT)) { /* not our interrupt */ return IRQ_NONE; } counter = 0; do { counter++; status = sinp(UART_MSR); if (counter > RS_ISR_PASS_LIMIT) { dev_err(&serial_ir.pdev->dev, "Trapped in interrupt"); break; } if ((status & hardware[type].signal_pin_change) && sense != -1) { /* get current time */ kt = ktime_get(); /* * The driver needs to know if your receiver is * active high or active low, or the space/pulse * sense could be inverted. */ /* calc time since last interrupt in nanoseconds */ dcd = (status & hardware[type].signal_pin) ? 1 : 0; if (dcd == last_dcd) { dev_err(&serial_ir.pdev->dev, "ignoring spike: %d %d %lldns %lldns\n", dcd, sense, ktime_to_ns(kt), ktime_to_ns(serial_ir.lastkt)); continue; } delkt = ktime_sub(kt, serial_ir.lastkt); if (ktime_compare(delkt, ktime_set(15, 0)) > 0) { data = IR_MAX_DURATION; /* really long time */ if (!(dcd ^ sense)) { /* sanity check */ dev_err(&serial_ir.pdev->dev, "dcd unexpected: %d %d %lldns %lldns\n", dcd, sense, ktime_to_ns(kt), ktime_to_ns(serial_ir.lastkt)); /* * detecting pulse while this * MUST be a space! */ sense = sense ? 0 : 1; } } else { data = ktime_to_ns(delkt); } frbwrite(data, !(dcd ^ sense)); serial_ir.lastkt = kt; last_dcd = dcd; } } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */ mod_timer(&serial_ir.timeout_timer, jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout)); ir_raw_event_handle(serial_ir.rcdev); return IRQ_HANDLED; }
static void cm3323_light_enable(struct cm3323_p *data) { cm3323_i2c_write(data, REG_CS_CONF1, als_reg_setting[0][1]); schedule_delayed_work(&data->work, nsecs_to_jiffies(atomic_read(&data->delay))); }