static int parse_delay(char *str, uint64_t *delay, uint64_t *nlanes) { unsigned long scan_delay; unsigned long scan_nlanes; if (sscanf(str, "%lu:%lu", &scan_delay, &scan_nlanes) != 2) return (1); /* * We explicitly disallow a delay of zero here, because we key * off this value being non-zero in translate_device(), to * determine if the fault is a ZINJECT_DELAY_IO fault or not. */ if (scan_delay == 0) return (1); /* * The units for the CLI delay parameter is milliseconds, but * the data passed to the kernel is interpreted as nanoseconds. * Thus we scale the milliseconds to nanoseconds here, and this * nanosecond value is used to pass the delay to the kernel. */ *delay = MSEC2NSEC(scan_delay); *nlanes = scan_nlanes; return (0); }
static void insert_timer(iu_tq_t *tq, iu_timer_node_t *node, uint64_t msec) { iu_timer_node_t *after = NULL; /* * find the node to insert this new node "after". we do this * instead of the more intuitive "insert before" because with * the insert before approach, a null `before' node pointer * is overloaded in meaning (it could be null because there * are no items in the list, or it could be null because this * is the last item on the list, which are very different cases). */ node->iutn_abs_timeout = gethrtime() + MSEC2NSEC(msec); if (tq->iutq_head != NULL && tq->iutq_head->iutn_abs_timeout < node->iutn_abs_timeout) for (after = tq->iutq_head; after->iutn_next != NULL; after = after->iutn_next) if (after->iutn_next->iutn_abs_timeout > node->iutn_abs_timeout) break; node->iutn_next = after ? after->iutn_next : tq->iutq_head; node->iutn_prev = after; if (after == NULL) tq->iutq_head = node; else after->iutn_next = node; if (node->iutn_next != NULL) node->iutn_next->iutn_prev = node; }
/* * Non-blocking connect call function */ static int nsldapi_os_connect_with_to(LBER_SOCKET sockfd, struct sockaddr *saptr, int salen, LDAP *ld) { #ifndef _WINDOWS int flags; #endif /* _WINDOWS */ int n, error; int len; fd_set rset, wset; struct timeval tval; #ifdef _WINDOWS int nonblock = 1; int block = 0; fd_set eset; #endif /* _WINDOWS */ int msec = ld->ld_connect_timeout; /* milliseconds */ int continue_on_intr = 0; #ifdef _SOLARIS_SDK hrtime_t start_time = 0, tmp_time, tv_time; /* nanoseconds */ #else long start_time = 0, tmp_time; /* seconds */ #endif LDAPDebug( LDAP_DEBUG_TRACE, "nsldapi_connect_nonblock timeout: %d (msec)\n", msec, 0, 0); #ifdef _WINDOWS ioctlsocket(sockfd, FIONBIO, &nonblock); #else flags = fcntl(sockfd, F_GETFL, 0); fcntl(sockfd, F_SETFL, flags | O_NONBLOCK); #endif /* _WINDOWS */ error = 0; if ((n = connect(sockfd, saptr, salen)) < 0) #ifdef _WINDOWS if ((n != SOCKET_ERROR) && (WSAGetLastError() != WSAEWOULDBLOCK)) { #else if (errno != EINPROGRESS) { #endif /* _WINDOWS */ #ifdef LDAP_DEBUG if ( ldap_debug & LDAP_DEBUG_TRACE ) { perror("connect"); } #endif return (-1); } /* success */ if (n == 0) goto done; FD_ZERO(&rset); FD_SET(sockfd, &rset); wset = rset; #ifdef _WINDOWS eset = rset; #endif /* _WINDOWS */ if (msec < 0 && msec != LDAP_X_IO_TIMEOUT_NO_TIMEOUT) { LDAPDebug( LDAP_DEBUG_TRACE, "Invalid timeout value detected.." "resetting connect timeout to default value " "(LDAP_X_IO_TIMEOUT_NO_TIMEOUT\n", 0, 0, 0); msec = LDAP_X_IO_TIMEOUT_NO_TIMEOUT; } else { if (msec != 0) { tval.tv_sec = msec / MILLISEC; tval.tv_usec = (MICROSEC / MILLISEC) * (msec % MILLISEC); #ifdef _SOLARIS_SDK start_time = gethrtime(); tv_time = MSEC2NSEC(msec); #else start_time = (long)time(NULL); #endif } else { tval.tv_sec = 0; tval.tv_usec = 0; } } /* if timeval structure == NULL, select will block indefinitely */ /* != NULL, and value == 0, select will */ /* not block */ /* Windows is a bit quirky on how it behaves w.r.t nonblocking */ /* connects. If the connect fails, the exception fd, eset, is */ /* set to show the failure. The first argument in select is */ /* ignored */ #ifdef _WINDOWS if ((n = select(sockfd +1, &rset, &wset, &eset, (msec != LDAP_X_IO_TIMEOUT_NO_TIMEOUT) ? &tval : NULL)) == 0) { errno = WSAETIMEDOUT; return (-1); } /* if wset is set, the connect worked */ if (FD_ISSET(sockfd, &wset) || FD_ISSET(sockfd, &rset)) { len = sizeof(error); if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, (char *)&error, &len) < 0) return (-1); goto done; } /* if eset is set, the connect failed */ if (FD_ISSET(sockfd, &eset)) { return (-1); } /* failure on select call */ if (n == SOCKET_ERROR) { perror("select error: SOCKET_ERROR returned"); return (-1); } #else /* * if LDAP_BITOPT_RESTART and select() is interrupted * try again. */ do { continue_on_intr = 0; if ((n = select(sockfd +1, &rset, &wset, NULL, (msec != LDAP_X_IO_TIMEOUT_NO_TIMEOUT) ? \ &tval : NULL)) == 0) { errno = ETIMEDOUT; return (-1); } if (n < 0) { if ((ld->ld_options & LDAP_BITOPT_RESTART) && (errno == EINTR)) { continue_on_intr = 1; errno = 0; FD_ZERO(&rset); FD_SET(sockfd, &rset); wset = rset; /* honour the timeout */ if ((msec != LDAP_X_IO_TIMEOUT_NO_TIMEOUT) && (msec != 0)) { #ifdef _SOLARIS_SDK tmp_time = gethrtime(); if ((tv_time -= (tmp_time - start_time)) <= 0) { #else tmp_time = (long)time(NULL); if ((tval.tv_sec -= (tmp_time - start_time)) <= 0) { #endif /* timeout */ errno = ETIMEDOUT; return (-1); } #ifdef _SOLARIS_SDK tval.tv_sec = tv_time / NANOSEC; tval.tv_usec = (tv_time % NANOSEC) / (NANOSEC / MICROSEC); #endif start_time = tmp_time; } } else { #ifdef LDAP_DEBUG perror("select error: "); #endif return (-1); } } } while (continue_on_intr == 1); if (FD_ISSET(sockfd, &rset) || FD_ISSET(sockfd, &wset)) { len = sizeof(error); if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, (char *)&error, &len) < 0) return (-1); #ifdef LDAP_DEBUG } else if ( ldap_debug & LDAP_DEBUG_TRACE ) { perror("select error: sockfd not set"); #endif } #endif /* _WINDOWS */ done: #ifdef _WINDOWS ioctlsocket(sockfd, FIONBIO, &block); #else fcntl(sockfd, F_SETFL, flags); #endif /* _WINDOWS */ if (error) { errno = error; return (-1); } return (0); } static int nsldapi_os_ioctl( LBER_SOCKET s, int option, int *statusp ) { int err; #ifdef _WINDOWS u_long iostatus; #endif if ( FIONBIO != option ) { return( -1 ); } #ifdef _WINDOWS iostatus = *(u_long *)statusp; err = ioctlsocket( s, FIONBIO, &iostatus ); #else err = ioctl( s, FIONBIO, (caddr_t)statusp ); #endif return( err ); } int nsldapi_connect_to_host( LDAP *ld, Sockbuf *sb, const char *hostlist, int defport, int secure, char **krbinstancep ) /* * "defport" must be in host byte order * zero is returned upon success, -1 if fatal error, -2 EINPROGRESS * if -1 is returned, ld_errno is set */ { int s; LDAPDebug( LDAP_DEBUG_TRACE, "nsldapi_connect_to_host: %s, port: %d\n", NULL == hostlist ? "NULL" : hostlist, defport, 0 ); /* * If an extended I/O connect callback has been defined, just use it. */ if ( NULL != ld->ld_extconnect_fn ) { unsigned long connect_opts = 0; if ( ld->ld_options & LDAP_BITOPT_ASYNC) { connect_opts |= LDAP_X_EXTIOF_OPT_NONBLOCKING; } if ( secure ) { connect_opts |= LDAP_X_EXTIOF_OPT_SECURE; } s = ld->ld_extconnect_fn( hostlist, defport, ld->ld_connect_timeout, connect_opts, ld->ld_ext_session_arg, &sb->sb_ext_io_fns.lbextiofn_socket_arg #ifdef _SOLARIS_SDK , NULL ); #else );
TUNABLE_QUAD("vfs.zfs.write_limit_min", &zfs_write_limit_min); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, write_limit_min, CTLFLAG_RDTUN, &zfs_write_limit_min, 0, "Minimum write limit"); TUNABLE_QUAD("vfs.zfs.write_limit_max", &zfs_write_limit_max); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, write_limit_max, CTLFLAG_RDTUN, &zfs_write_limit_max, 0, "Maximum data payload per txg"); TUNABLE_QUAD("vfs.zfs.write_limit_inflated", &zfs_write_limit_inflated); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, write_limit_inflated, CTLFLAG_RDTUN, &zfs_write_limit_inflated, 0, "Maximum size of the dynamic write limit"); TUNABLE_QUAD("vfs.zfs.write_limit_override", &zfs_write_limit_override); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, write_limit_override, CTLFLAG_RDTUN, &zfs_write_limit_override, 0, "Force a txg if dirty buffers exceed this value (bytes)"); hrtime_t zfs_throttle_delay = MSEC2NSEC(10); hrtime_t zfs_throttle_resolution = MSEC2NSEC(10); int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) { uint64_t obj; int err; err = zap_lookup(dp->dp_meta_objset, dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, name, sizeof (obj), 1, &obj); if (err) return (err); return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
static void mmp_thread(void *arg) { spa_t *spa = (spa_t *)arg; mmp_thread_t *mmp = &spa->spa_mmp; boolean_t last_spa_suspended = spa_suspended(spa); boolean_t last_spa_multihost = spa_multihost(spa); callb_cpr_t cpr; hrtime_t max_fail_ns = zfs_multihost_fail_intervals * MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)); mmp_thread_enter(mmp, &cpr); /* * The mmp_write_done() function calculates mmp_delay based on the * prior value of mmp_delay and the elapsed time since the last write. * For the first mmp write, there is no "last write", so we start * with fake, but reasonable, default non-zero values. */ mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1); mmp->mmp_last_write = gethrtime() - mmp->mmp_delay; while (!mmp->mmp_thread_exiting) { uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals; uint64_t mmp_interval = MSEC2NSEC( MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)); boolean_t suspended = spa_suspended(spa); boolean_t multihost = spa_multihost(spa); hrtime_t start, next_time; start = gethrtime(); if (multihost) { next_time = start + mmp_interval / MAX(vdev_count_leaves(spa), 1); } else { next_time = start + MSEC2NSEC(MMP_DEFAULT_INTERVAL); } /* * When MMP goes off => on, or spa goes suspended => * !suspended, we know no writes occurred recently. We * update mmp_last_write to give us some time to try. */ if ((!last_spa_multihost && multihost) || (last_spa_suspended && !suspended)) { mutex_enter(&mmp->mmp_io_lock); mmp->mmp_last_write = gethrtime(); mutex_exit(&mmp->mmp_io_lock); } else if (last_spa_multihost && !multihost) { mutex_enter(&mmp->mmp_io_lock); mmp->mmp_delay = 0; mutex_exit(&mmp->mmp_io_lock); } last_spa_multihost = multihost; last_spa_suspended = suspended; /* * Smooth max_fail_ns when its factors are decreased, because * making (max_fail_ns < mmp_interval) results in the pool being * immediately suspended before writes can occur at the new * higher frequency. */ if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) { max_fail_ns = ((31 * max_fail_ns) + (mmp_interval * mmp_fail_intervals)) / 32; } else { max_fail_ns = mmp_interval * mmp_fail_intervals; } /* * Suspend the pool if no MMP write has succeeded in over * mmp_interval * mmp_fail_intervals nanoseconds. */ if (!suspended && mmp_fail_intervals && multihost && (start - mmp->mmp_last_write) > max_fail_ns) { zio_suspend(spa, NULL); } if (multihost) mmp_write_uberblock(spa); CALLB_CPR_SAFE_BEGIN(&cpr); (void) cv_timedwait_sig(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock, ddi_get_lbolt() + ((next_time - gethrtime()) / (NANOSEC / hz))); CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock); } /* Outstanding writes are allowed to complete. */ if (mmp->mmp_zio_root) zio_wait(mmp->mmp_zio_root); mmp->mmp_zio_root = NULL; mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr); }
ds1287_detach, /* detach */ nodev, /* reset */ &ds1287_cbops, /* cb_ops */ (struct bus_ops *)NULL, /* bus_ops */ NULL, /* power */ ddi_quiesce_not_supported, /* devo_quiesce */ }; static void *ds1287_state; static int instance = -1; /* Driver Tunables */ static int ds1287_interrupt_priority = 15; static int ds1287_softint_priority = 2; static hrtime_t power_button_debounce = MSEC2NSEC(10); static hrtime_t power_button_abort_interval = 1.5 * NANOSEC; static int power_button_abort_presses = 3; static int power_button_abort_enable = 1; static int power_button_enable = 1; static int power_button_pressed = 0; static int power_button_cancel = 0; static int power_button_timeouts = 0; static int timeout_cancel = 0; static int additional_presses = 0; static ddi_iblock_cookie_t ds1287_lo_iblock; static ddi_iblock_cookie_t ds1287_hi_iblock; static ddi_softintr_t ds1287_softintr_id; static kmutex_t ds1287_reg_mutex; /* Protects ds1287 Registers */