/* * For now, we don't support locale specific character classes. This is * a capability that needs to be added (locales should be able to define * their own character classes.) */ wctype_t wctype_l(const char *property, locale_t loc) { static const struct { const char *name; wctype_t mask; } props[] = { { "alnum", _CTYPE_A|_CTYPE_D }, { "alpha", _CTYPE_A }, { "blank", _CTYPE_B }, { "cntrl", _CTYPE_C }, { "digit", _CTYPE_D }, { "graph", _CTYPE_G }, { "lower", _CTYPE_L }, { "print", _CTYPE_R }, { "punct", _CTYPE_P }, { "space", _CTYPE_S }, { "upper", _CTYPE_U }, { "xdigit", _CTYPE_X }, { "ideogram", _CTYPE_I }, /* BSD extension */ { "special", _CTYPE_T }, /* BSD extension */ { "phonogram", _CTYPE_Q }, /* BSD extension */ { "rune", -1 }, /* BSD extension */ { NULL, 0 }, /* Default */ }; int i; _NOTE(ARGUNUSED(loc)); i = 0; while (props[i].name != NULL && strcmp(props[i].name, property) != 0) i++; return (props[i].mask); }
/*ARGSUSED*/ void ghd_target_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, ccc_t *cccp, gtgt_t *gtgtp) { _NOTE(ARGUNUSED(hba_dip,tgt_dip)) gdev_t *gdevp = gtgtp->gt_gdevp; GDBG_WAITQ(("ghd_target_free(%d,%d) gdevp-0x%p gtgtp 0x%p\n", gtgtp->gt_target, gtgtp->gt_lun, (void *)gdevp, (void *)gtgtp)); /* * grab both mutexes so the queue structures * stay stable while deleting this instance */ mutex_enter(&cccp->ccc_hba_mutex); mutex_enter(&cccp->ccc_waitq_mutex); ASSERT(gdevp->gd_ninstances > 0); /* * remove this per-instance structure from the device list and * free the memory */ GTGT_DEATTACH(gtgtp, gdevp); kmem_free((caddr_t)gtgtp, gtgtp->gt_size); if (gdevp->gd_ninstances == 1) { GDBG_WAITQ(("ghd_target_free: N=1 gdevp 0x%p\n", (void *)gdevp)); /* * If there's now just one instance left attached to this * device then reset the queue's max active value * from that instance's saved value. */ gtgtp = GDEVP2GTGTP(gdevp); GDEV_MAXACTIVE(gdevp) = gtgtp->gt_maxactive; } else if (gdevp->gd_ninstances == 0) { /* else no instances left */ GDBG_WAITQ(("ghd_target_free: N=0 gdevp 0x%p\n", (void *)gdevp)); /* detach this per-dev-structure from the HBA's dev list */ GDEV_QDETACH(gdevp, cccp); kmem_free(gdevp, sizeof (*gdevp)); } #if defined(GHD_DEBUG) || defined(__lint) else { /* leave maxactive set to 1 */ GDBG_WAITQ(("ghd_target_free: N>1 gdevp 0x%p\n", (void *)gdevp)); } #endif ghd_waitq_process_and_mutex_exit(cccp); }
static int audio_strclose(queue_t *rq, int flag, cred_t *credp) { audio_client_t *c; audio_dev_t *d; int rv; _NOTE(ARGUNUSED(flag)); _NOTE(ARGUNUSED(credp)); if ((c = rq->q_ptr) == NULL) { return (ENXIO); } if (ddi_can_receive_sig() || (ddi_get_pid() == 0)) { rv = auclnt_drain(c); } /* make sure we won't get any upcalls */ auimpl_client_deactivate(c); /* * Pick up any data sitting around in input buffers. This * avoids leaving record data stuck in queues. */ if (c->c_istream.s_engine != NULL) audio_engine_produce(c->c_istream.s_engine); /* get a local hold on the device */ d = c->c_dev; auimpl_dev_hold(c->c_dev); /* Turn off queue processing... */ qprocsoff(rq); /* Call personality specific close handler */ c->c_close(c); auimpl_client_destroy(c); /* notify peers that a change has occurred */ atomic_inc_uint(&d->d_serial); /* now we can drop the release we had on the device */ auimpl_dev_release(d); return (rv); }
/* * Prepare a response with V3/V4 referral format. * * For more details, see comments for smb_dfs_encode_refv2() or see * MS-DFSC specification. */ static uint32_t smb_dfs_encode_refv3x(smb_request_t *sr, mbuf_chain_t *mbc, dfs_info_t *referrals, uint16_t ver) { _NOTE(ARGUNUSED(sr)) uint16_t entsize, rep_bufsize, hdrsize; uint16_t server_type; uint16_t flags = 0; uint16_t path_offs, altpath_offs, netpath_offs; uint16_t targetsz, total_targetsz = 0; uint16_t dfs_pathsz; uint16_t r; hdrsize = (ver == DFS_REFERRAL_V3) ? DFS_REFV3_ENTSZ : DFS_REFV4_ENTSZ; rep_bufsize = MBC_MAXBYTES(mbc); dfs_pathsz = smb_wcequiv_strlen(referrals->i_uncpath) + 2; entsize = hdrsize + dfs_pathsz + dfs_pathsz + smb_dfs_referrals_unclen(referrals, 0); if (entsize > rep_bufsize) { /* need room for at least one referral */ return (NT_STATUS_BUFFER_OVERFLOW); } server_type = (referrals->i_type == DFS_OBJECT_ROOT) ? DFS_SRVTYPE_ROOT : DFS_SRVTYPE_NONROOT; rep_bufsize -= entsize; for (r = 0; r < referrals->i_ntargets; r++) { path_offs = (referrals->i_ntargets - r) * hdrsize; altpath_offs = path_offs + dfs_pathsz; netpath_offs = altpath_offs + dfs_pathsz + total_targetsz; targetsz = smb_dfs_referrals_unclen(referrals, r); if (r != 0) { entsize = hdrsize + targetsz; if (entsize > rep_bufsize) /* silently drop targets that do not fit */ break; rep_bufsize -= entsize; flags = 0; } else if (ver == DFS_REFERRAL_V4) { flags = DFS_ENTFLG_T; } (void) smb_mbc_encodef(mbc, "wwwwlwww16.", ver, hdrsize, server_type, flags, referrals->i_timeout, path_offs, altpath_offs, netpath_offs); total_targetsz += targetsz; } smb_dfs_encode_targets(mbc, referrals); return (NT_STATUS_SUCCESS); }
/* * audioixp_sync() * * Description: * This is called by the framework to synchronize DMA caches. * * Arguments: * void *arg The DMA engine to sync */ static void audioixp_sync(void *arg, unsigned nframes) { audioixp_port_t *port = arg; _NOTE(ARGUNUSED(nframes)); (void) ddi_dma_sync(port->samp_dmah, 0, 0, port->sync_dir); }
static int ctfsrc_collect_types_cb(ctf_id_t id, boolean_t root, void *arg) { _NOTE(ARGUNUSED(root, arg)); (void) ctf_type_name(g_fp, id, idnames[id].ci_name, sizeof (idnames[id].ci_name)); idnames[id].ci_id = id; return (0); }
uint_t rge_intr(caddr_t arg1, caddr_t arg2) { rge_t *rgep = (rge_t *)arg1; uint16_t int_status; _NOTE(ARGUNUSED(arg2)) mutex_enter(rgep->genlock); if (rgep->suspended) { mutex_exit(rgep->genlock); return (DDI_INTR_UNCLAIMED); } /* * Was this interrupt caused by our device... */ int_status = rge_reg_get16(rgep, INT_STATUS_REG); if (!(int_status & rgep->int_mask)) { mutex_exit(rgep->genlock); return (DDI_INTR_UNCLAIMED); /* indicate it wasn't our interrupt */ } rgep->stats.intr++; /* * Clear interrupt * For PCIE chipset, we need disable interrupt first. */ if (rgep->chipid.is_pcie) rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE); rge_reg_put16(rgep, INT_STATUS_REG, int_status); /* * Cable link change interrupt */ if (int_status & LINK_CHANGE_INT) { rge_chip_cyclic(rgep); } mutex_exit(rgep->genlock); /* * Receive interrupt */ if (int_status & RGE_RX_INT) rge_receive(rgep); /* * Re-enable interrupt for PCIE chipset */ if (rgep->chipid.is_pcie) rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */ }
__checkReturn int siena_mon_reconfigure( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); return (0); }
/* * FileAttributeTagInformation * * If dattr includes FILE_ATTRIBUTE_REPARSE_POINT, the * second dword should be the reparse tag. Otherwise * the tag value should be set to zero. * We don't support reparse points, so we set the tag * to zero. */ static uint32_t smb2_qif_tags(smb_request_t *sr, smb_queryinfo_t *qi) { _NOTE(ARGUNUSED(qi)) (void) smb_mbc_encodef( &sr->raw_data, "ll", 0, 0); return (0); }
/* * FMA error callback. * Register error handling callback with our parent. We will just call * our children's error callbacks and return their status. */ static int acpinex_err_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *impl_data) { _NOTE(ARGUNUSED(impl_data)); /* Call our childrens error handlers */ return (ndi_fm_handler_dispatch(dip, NULL, derr)); }
static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd) { _NOTE(ARGUNUSED(wq)); /* Free the DMA handle */ if (wqmd->dma_handle != NULL) (void) ddi_dma_free_handle(&(wqmd->dma_handle)); wqmd->dma_handle = NULL; } /* oce_wqm_dtor */
void ath_hal_printf(struct ath_hal *ah, const char *fmt, ...) { va_list ap; _NOTE(ARGUNUSED(ah)) va_start(ap, fmt); vcmn_err(CE_CONT, fmt, ap); va_end(ap); }
/* * igb_tx_copy * * Copy the mblk fragment to the pre-allocated tx buffer */ static int igb_tx_copy(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp, uint32_t len, boolean_t copy_done) { dma_buffer_t *tx_buf; uint32_t desc_num; _NOTE(ARGUNUSED(tx_ring)); tx_buf = &tcb->tx_buf; /* * Copy the packet data of the mblk fragment into the * pre-allocated tx buffer, which is maintained by the * tx control block. * * Several mblk fragments can be copied into one tx buffer. * The destination address of the current copied fragment in * the tx buffer is next to the end of the previous copied * fragment. */ if (len > 0) { bcopy(mp->b_rptr, tx_buf->address + tx_buf->len, len); tx_buf->len += len; tcb->frag_num++; } desc_num = 0; /* * If it is the last fragment copied to the current tx buffer, * in other words, if there's no remaining fragment or the remaining * fragment requires a new tx control block to process, we need to * complete the current copy processing by syncing up the current * DMA buffer and saving the descriptor data. */ if (copy_done) { /* * Sync the DMA buffer of the packet data */ DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV); tcb->tx_type = USE_COPY; /* * Save the address and length to the private data structure * of the tx control block, which will be used to fill the * tx descriptor ring after all the fragments are processed. */ igb_save_desc(tcb, tx_buf->dma_address, tx_buf->len); desc_num++; } return (desc_num); }
static void bd_xfer_dtor(void *buf, void *arg) { bd_xfer_impl_t *xi = buf; _NOTE(ARGUNUSED(arg)); if (xi->i_dmah) ddi_dma_free_handle(&xi->i_dmah); xi->i_dmah = NULL; }
/* * Return the specified buffer (srbdp) to the ring it came from (brp). * * Note: * If the driver is compiled with only one buffer ring *and* one * return ring, then the buffers must be returned in sequence. * In this case, we don't have to consider anything about the * buffer at all; we can simply advance the cyclic counter. And * we don't even need the refill mutex <rf_lock>, as the caller * will already be holding the (one-and-only) <rx_lock>. * * If the driver supports multiple buffer rings, but only one * return ring, the same still applies (to each buffer ring * separately). */ static void bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp) { uint64_t slot; _NOTE(ARGUNUSED(srbdp)) slot = brp->rf_next; brp->rf_next = NEXT(slot, brp->desc.nslots); bge_mbx_put(bgep, brp->chip_mbx_reg, slot); }
/* printflike */ void mylogger(int pri, const char *format, ...) { _NOTE(ARGUNUSED(pri)) va_list args; va_start(args, format); (void) vfprintf(stderr, format, args); (void) fprintf(stderr, "\n"); va_end(args); }
static int _none_mbsinit(const mbstate_t *unused) { _NOTE(ARGUNUSED(unused)); /* * Encoding is not state dependent - we are always in the * initial state. */ return (1); }
/* * Extracts the value from the rge parameter array and prints * the parameter value. cp points to the required parameter. */ static int rge_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) { nd_param_t *ndp; _NOTE(ARGUNUSED(q, credp)) ndp = (nd_param_t *)cp; (void) mi_mpprintf(mp, "%d", ndp->ndp_val); return (0); }
void oce_m_propinfo(void *arg, const char *name, mac_prop_id_t pr_num, mac_prop_info_handle_t prh) { _NOTE(ARGUNUSED(arg)); switch (pr_num) { case MAC_PROP_AUTONEG: case MAC_PROP_EN_AUTONEG: case MAC_PROP_ADV_1000FDX_CAP: case MAC_PROP_EN_1000FDX_CAP: case MAC_PROP_ADV_1000HDX_CAP: case MAC_PROP_EN_1000HDX_CAP: case MAC_PROP_ADV_100FDX_CAP: case MAC_PROP_EN_100FDX_CAP: case MAC_PROP_ADV_100HDX_CAP: case MAC_PROP_EN_100HDX_CAP: case MAC_PROP_ADV_10FDX_CAP: case MAC_PROP_EN_10FDX_CAP: case MAC_PROP_ADV_10HDX_CAP: case MAC_PROP_EN_10HDX_CAP: case MAC_PROP_ADV_100T4_CAP: case MAC_PROP_EN_100T4_CAP: case MAC_PROP_ADV_10GFDX_CAP: case MAC_PROP_EN_10GFDX_CAP: case MAC_PROP_SPEED: case MAC_PROP_DUPLEX: mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); break; case MAC_PROP_MTU: mac_prop_info_set_range_uint32(prh, OCE_MIN_MTU, OCE_MAX_MTU); break; case MAC_PROP_PRIVATE: { char valstr[64]; int value; if (strcmp(name, "_tx_ring_size") == 0) { value = OCE_DEFAULT_TX_RING_SIZE; } else if (strcmp(name, "_rx_ring_size") == 0) { value = OCE_DEFAULT_RX_RING_SIZE; } else { return; } (void) snprintf(valstr, sizeof (valstr), "%d", value); mac_prop_info_set_default_str(prh, valstr); mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); break; } } } /* oce_m_propinfo */
/* * FileAccessInformation */ static uint32_t smb2_qif_access(smb_request_t *sr, smb_queryinfo_t *qi) { _NOTE(ARGUNUSED(qi)) smb_ofile_t *of = sr->fid_ofile; (void) smb_mbc_encodef( &sr->raw_data, "l", of->f_granted_access); return (0); }
/* * This is used for 8-bit encodings. */ int towide_none(wchar_t *c, const char *mb, unsigned n) { _NOTE(ARGUNUSED(n)); if (mb_cur_max != 1) { werr("invalid or unsupported multibyte locale"); return (-1); } *c = (uint8_t)*mb; return (1); }
void test_wcsrtombs_thr_work(test_t t, void *arg) { _NOTE(ARGUNUSED(arg)); for (int j = 0; j < NUMITR; j++) { test_debugf(t, "iteration %d", j); for (int i = 0; locales[i].locale != NULL; i++) { test_wcsrtombs_thr_iter(t, locales[i].locale, locales[i].test); } } test_passed(t); }
static int ctfdump_objects_cb(const char *name, ctf_id_t id, ulong_t symidx, void *arg) { _NOTE(ARGUNUSED(arg)); int len; len = snprintf(NULL, 0, " [%lu] %ld", g_stats.cs_ndata, id); ctfdump_printf(CTFDUMP_OBJECTS, " [%lu] %ld %*s%s (%lu)\n", g_stats.cs_ndata, id, MAX(15 - len, 0), "", name, symidx); g_stats.cs_ndata++; return (0); }
/* * audio1575_open() * * Description: * Opens a DMA engine for use. * * Arguments: * void *arg The DMA engine to set up * int flag Open flags * unsigned *nframesp Receives number of frames * caddr_t *bufp Receives kernel data buffer * * Returns: * 0 on success * errno on failure */ static int audio1575_open(void *arg, int flag, unsigned *nframesp, caddr_t *bufp) { audio1575_port_t *port = arg; _NOTE(ARGUNUSED(flag)); port->count = 0; *nframesp = port->nframes; *bufp = port->samp_kaddr; return (0); }
/* * Initialize FMA resources for child devices. * Called when child calls ddi_fm_init(). */ static int acpinex_fm_init_child(dev_info_t *dip, dev_info_t *tdip, int cap, ddi_iblock_cookie_t *ibc) { _NOTE(ARGUNUSED(tdip, cap)); acpinex_softstate_t *softsp = ddi_get_soft_state(acpinex_softstates, ddi_get_instance(dip)); *ibc = softsp->ans_fm_ibc; return (softsp->ans_fm_cap); }
/* * Packets from the mac device come here. We pass them to the peer if * the destination mac address matches or it's a multicast/broadcast * address. */ static void xnbo_from_mac_filter(void *arg, mac_resource_handle_t mrh, mblk_t *mp, boolean_t loopback) { _NOTE(ARGUNUSED(loopback)); xnb_t *xnbp = arg; xnbo_t *xnbop = xnbp->xnb_flavour_data; mblk_t *next, *keep, *keep_head, *free, *free_head; keep = keep_head = free = free_head = NULL; #define ADD(list, bp) \ if (list != NULL) \ list->b_next = bp; \ else \ list##_head = bp; \ list = bp; for (; mp != NULL; mp = next) { mac_header_info_t hdr_info; next = mp->b_next; mp->b_next = NULL; if (mac_header_info(xnbop->o_mh, mp, &hdr_info) != 0) { ADD(free, mp); continue; } if ((hdr_info.mhi_dsttype == MAC_ADDRTYPE_BROADCAST) || (hdr_info.mhi_dsttype == MAC_ADDRTYPE_MULTICAST)) { ADD(keep, mp); continue; } if (bcmp(hdr_info.mhi_daddr, xnbp->xnb_mac_addr, sizeof (xnbp->xnb_mac_addr)) == 0) { ADD(keep, mp); continue; } ADD(free, mp); } #undef ADD if (keep_head != NULL) xnbo_from_mac(xnbp, mrh, keep_head, B_FALSE); if (free_head != NULL) freemsgchain(free_head); }
/* * FilePositionInformation */ static uint32_t smb2_qif_position(smb_request_t *sr, smb_queryinfo_t *qi) { _NOTE(ARGUNUSED(qi)) smb_ofile_t *of = sr->fid_ofile; uint64_t pos; mutex_enter(&of->f_mutex); pos = of->f_seek_pos; mutex_exit(&of->f_mutex); (void) smb_mbc_encodef( &sr->raw_data, "q", pos); return (0); }
int smb_kmod_ioctl(int cmd, smb_ioc_header_t *ioc, uint32_t len) { int rc; _NOTE(ARGUNUSED(len)); if (!smbdrv_opened) return (EBADF); if (cmd == SMB_IOC_CONFIG) fksmbd_adjust_config(ioc); rc = fksmbsrv_drv_ioctl(cmd, ioc); return (rc); }
static void * signal_thread(void *ignoreme) { NOTE(ARGUNUSED(ignoreme)) sigset_t sigset; int signum; boolean_t stop = B_FALSE; DMSG(D_THREAD, "signal thread awaiting signals."); (void) sigfillset(&sigset); while (!stop) { const char *sigstr; if (sigwait(&sigset, &signum) != 0) { DMSG(D_THREAD, "sigwait failed: %s", strerror(errno)); continue; } sigstr = (signum < _sys_siglistn) ? _sys_siglist[signum] : _sys_siglist[0]; DMSG(D_THREAD, "signal %d (%s) caught.", signum, sigstr); switch (signum) { case SIGHUP: break; case SIGTERM: case SIGINT: lldp_quit(); stop = B_TRUE; continue; case SIGWAITING: break; default: DMSG(D_THREAD, "ignoring signal %d.", signum); } } DMSG(D_THREAD, "exiting"); exit(0); NOTE(NOTREACHED) return (NULL); }
/* Checks for pending rx buffers with Stack */ int oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout) { int ti; _NOTE(ARGUNUSED(dev)); for (ti = 0; ti < timeout; ti++) { if (rq->pending > 0) { OCE_MSDELAY(10); continue; } else { rq->pending = 0; break; } } return (rq->pending); }