/* * s1394_fini() * cleans up the 1394 Software Framework's structures that were allocated * in s1394_init(). */ static void s1394_fini() { TNF_PROBE_0_DEBUG(s1394_fini_enter, S1394_TNF_SL_STACK, ""); mutex_destroy(&s1394_statep->hal_list_mutex); kmem_free(s1394_statep, sizeof (s1394_state_t)); TNF_PROBE_0_DEBUG(s1394_fini_exit, S1394_TNF_SL_STACK, ""); }
/* * hci1394_isr_bus_reset() * Process a 1394 bus reset. This signifies that a bus reset has started. * A bus reset will not be complete until a selfid complete interrupt * comes in. */ static void hci1394_isr_bus_reset(hci1394_state_t *soft_state) { int status; ASSERT(soft_state != NULL); TNF_PROBE_0_DEBUG(hci1394_isr_bus_reset_enter, HCI1394_TNF_HAL_STACK, ""); /* * Set the driver state to reset. If we cannot, we have been shutdown. * The only way we can get in this code is if we have a multi-processor * machine and the HAL is shutdown by one processor running in base * context while this interrupt handler runs in another processor. * We will disable all interrupts and just return. We shouldn't have * to disable the interrupts, but we will just in case. */ status = hci1394_state_set(&soft_state->drvinfo, HCI1394_BUS_RESET); if (status != DDI_SUCCESS) { hci1394_ohci_intr_master_disable(soft_state->ohci); return; } /* * Save away reset generation count so we can detect self-id-compete * interrupt which disappears in event register. This is discussed in * more detail in hci1394_isr() */ soft_state->drvinfo.di_gencnt = hci1394_ohci_current_busgen(soft_state->ohci); soft_state->drvinfo.di_stats.st_bus_reset_count++; /* * Mask off busReset until SelfIdComplete comes in. The bus reset * interrupt will be asserted until the SelfIdComplete interrupt * comes in (i.e. you cannot clear the interrupt until a SelfIdComplete * interrupt). Therefore, we disable the interrupt via its mask so we * don't get stuck in the ISR indefinitely. */ hci1394_ohci_intr_disable(soft_state->ohci, OHCI_INTR_BUS_RESET); /* Reset the ATREQ and ATRESP Q's */ hci1394_async_atreq_reset(soft_state->async); hci1394_async_atresp_reset(soft_state->async); /* Inform Services Layer about Bus Reset */ h1394_bus_reset(soft_state->drvinfo.di_sl_private, (void **)&soft_state->sl_selfid_buf); TNF_PROBE_0_DEBUG(hci1394_isr_bus_reset_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlist_add() * Add the node to the tail of the linked list. The list is protected by a * mutex at the iblock_cookie passed in during init. */ void hci1394_tlist_add(hci1394_tlist_handle_t tlist_handle, hci1394_tlist_node_t *node) { ASSERT(tlist_handle != NULL); ASSERT(node != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_add_enter, HCI1394_TNF_HAL_STACK, ""); mutex_enter(&tlist_handle->tl_mutex); /* add's always go at the end of the list */ node->tln_next = NULL; /* Set state that this node is currently on the tlist */ node->tln_on_list = B_TRUE; /* enter in the expire time (in uS) */ if (tlist_handle->tl_timer_enabled == B_TRUE) { node->tln_expire_time = gethrtime() + tlist_handle->tl_timer_info.tlt_timeout; } /* if there is nothing in the list */ if (tlist_handle->tl_tail == NULL) { tlist_handle->tl_head = node; tlist_handle->tl_tail = node; node->tln_prev = NULL; if ((tlist_handle->tl_timer_enabled == B_TRUE) && (tlist_handle->tl_state == HCI1394_TLIST_TIMEOUT_OFF)) { /* turn the timer on */ tlist_handle->tl_timeout_id = timeout( hci1394_tlist_callback, tlist_handle, t1394_tlist_nsectohz( tlist_handle->tl_timer_info.tlt_timer_resolution)); tlist_handle->tl_state = HCI1394_TLIST_TIMEOUT_ON; } } else { /* put the node on the end of the list */ tlist_handle->tl_tail->tln_next = node; node->tln_prev = tlist_handle->tl_tail; tlist_handle->tl_tail = node; /* * if timeouts are enabled, we don't have to call * timeout() because the timer is already on. */ } mutex_exit(&tlist_handle->tl_mutex); TNF_PROBE_0_DEBUG(hci1394_tlist_add_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlabel_reset() * resets the tlabel tracking structures to an initial state where no * tlabels are outstanding and all tlabels are registered as good. This * routine should be called every bus reset. */ void hci1394_tlabel_reset(hci1394_tlabel_handle_t tlabel_handle) { int index; int index2; ASSERT(tlabel_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlabel_reset_enter, HCI1394_TNF_HAL_STACK, ""); mutex_enter(&tlabel_handle->tb_mutex); TNF_PROBE_0_DEBUG(hci1394_tlabel_reset, HCI1394_TNF_HAL_TLABEL, ""); /* Bus reset optimization. handle broadcast writes separately */ if (tlabel_handle->tb_bcast_sent == B_TRUE) { tlabel_handle->tb_free[IEEE1394_BROADCAST_NODEID] = (uint64_t)0xFFFFFFFFFFFFFFFF; tlabel_handle->tb_bad[IEEE1394_BROADCAST_NODEID] = (uint64_t)0; tlabel_handle->tb_bad_timestamp[IEEE1394_BROADCAST_NODEID] = (hrtime_t)0; tlabel_handle->tb_last[IEEE1394_BROADCAST_NODEID] = 0; for (index2 = 0; index2 < TLABEL_RANGE; index2++) { tlabel_handle->tb_lookup[IEEE1394_BROADCAST_NODEID ][index2] = NULL; } } /* * Mark all tlabels as free. No bad tlabels. Start the first tlabel * alloc at 0. Cleanout the lookup table. An optimization to only do * this up to the max node we have seen on the bus has been added. */ for (index = 0; index <= tlabel_handle->tb_max_node; index++) { tlabel_handle->tb_free[index] = (uint64_t)0xFFFFFFFFFFFFFFFF; tlabel_handle->tb_bad[index] = (uint64_t)0; tlabel_handle->tb_bad_timestamp[index] = (hrtime_t)0; tlabel_handle->tb_last[index] = 0; for (index2 = 0; index2 < TLABEL_RANGE; index2++) { tlabel_handle->tb_lookup[index][index2] = NULL; } } tlabel_handle->tb_max_node = 0; tlabel_handle->tb_bcast_sent = B_FALSE; mutex_exit(&tlabel_handle->tb_mutex); TNF_PROBE_0_DEBUG(hci1394_tlabel_reset_exit, HCI1394_TNF_HAL_STACK, ""); }
static int hci1394_ioctl_read_selfid32(hci1394_state_t *soft_state, hci1394_ioctl_readselfid32_t *read_selfid, int mode) { int status; uint_t offset; uint32_t data; ASSERT(soft_state != NULL); ASSERT(read_selfid != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_enter, HCI1394_TNF_HAL_STACK, ""); /* * make sure we are not trying to copy more data than the selfid buffer * can hold. count is in quadlets and max_selfid_size is in bytes. */ if ((read_selfid->count * 4) > OHCI_MAX_SELFID_SIZE) { TNF_PROBE_0(hci1394_ioctl_read_selfid32_cnt_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit, HCI1394_TNF_HAL_STACK, ""); return (EINVAL); } /* * copy the selfid buffer one word at a time into the user buffer. The * combination between having to do ddi_get32's (for endian reasons) and * a ddi_copyout() make it easier to do it one word at a time. */ for (offset = 0; offset < read_selfid->count; offset++) { /* read word from selfid buffer */ hci1394_ohci_selfid_read(soft_state->ohci, offset, &data); /* copy the selfid word into the user buffer */ status = ddi_copyout(&data, (void *)(uintptr_t)(read_selfid->buf + (offset * 4)), 4, mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_read_selfid32_co_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } } TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
/* * s1394_init() * initializes the 1394 Software Framework's structures, i.e. the HAL list * and associated mutex. */ static int s1394_init() { TNF_PROBE_0_DEBUG(s1394_init_enter, S1394_TNF_SL_STACK, ""); s1394_statep = kmem_zalloc(sizeof (s1394_state_t), KM_SLEEP); s1394_statep->hal_head = NULL; s1394_statep->hal_tail = NULL; mutex_init(&s1394_statep->hal_list_mutex, NULL, MUTEX_DRIVER, NULL); TNF_PROBE_0_DEBUG(s1394_init_exit, S1394_TNF_SL_STACK, ""); return (0); }
/* * hci1394_tlist_peek() * get the node at the head of the linked list. This function does not * remove the node from the list. */ void hci1394_tlist_peek(hci1394_tlist_handle_t tlist_handle, hci1394_tlist_node_t **node) { ASSERT(tlist_handle != NULL); ASSERT(node != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_peek_enter, HCI1394_TNF_HAL_STACK, ""); mutex_enter(&tlist_handle->tl_mutex); *node = tlist_handle->tl_head; mutex_exit(&tlist_handle->tl_mutex); TNF_PROBE_0_DEBUG(hci1394_tlist_peek_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlist_timeout_update() * update the timeout to a different value. timeout is in uS. The update * does not happen immediately. The new timeout will not take effect until * the all of nodes currently present in the list are gone. It only makes * sense to call this function when you have the timeout feature enabled. */ void hci1394_tlist_timeout_update(hci1394_tlist_handle_t tlist_handle, hrtime_t timeout) { ASSERT(tlist_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_update_timeout_enter, HCI1394_TNF_HAL_STACK, ""); /* set timeout to the new timeout */ tlist_handle->tl_timer_info.tlt_timeout = timeout; TNF_PROBE_0_DEBUG(hci1394_tlist_update_timeout_exit, HCI1394_TNF_HAL_STACK, ""); }
/* For preop search we do two things: * 1) based on the search base, we preselect the acls. * 2) also get hold of a acl_pblock for use */ static int aclplugin_preop_search ( Slapi_PBlock *pb ) { int scope; const char *base = NULL; Slapi_DN *sdn = NULL; int optype; int isRoot; int isProxy = 0; int rc = 0; char *errtxt = NULL; char *proxy_dn = NULL; TNF_PROBE_0_DEBUG(aclplugin_preop_search_start ,"ACL",""); slapi_pblock_get ( pb, SLAPI_OPERATION_TYPE, &optype ); slapi_pblock_get ( pb, SLAPI_REQUESTOR_ISROOT, &isRoot ); if (LDAP_SUCCESS == proxyauth_get_dn(pb, &proxy_dn, &errtxt) && proxy_dn) { isProxy = 1; } slapi_ch_free_string(&proxy_dn); if ( isRoot && !isProxy) { TNF_PROBE_1_DEBUG(aclplugin_preop_search_end ,"ACL","", tnf_string,isroot,""); return rc; } slapi_pblock_get( pb, SLAPI_SEARCH_TARGET_SDN, &sdn ); base = slapi_sdn_get_dn(sdn); /* For anonymous client doing search nothing needs to be set up */ if ( optype == SLAPI_OPERATION_SEARCH && aclanom_is_client_anonymous ( pb ) && ! slapi_dn_issuffix( base, "cn=monitor") ) { TNF_PROBE_1_DEBUG(aclplugin_preop_search_end ,"ACL","", tnf_string,anon,""); return rc; } if ( 0 == ( rc = aclplugin_preop_common( pb ))) { slapi_pblock_get( pb, SLAPI_SEARCH_SCOPE, &scope ); acllist_init_scan ( pb, scope, base ); } TNF_PROBE_0_DEBUG(aclplugin_preop_search_end ,"ACL",""); return rc; }
/* * nx1394_add_eventcall() * This gets called when a child node calls ddi_add_eventcall(). Registers * the specified callback for the requested event cookie with the ndi * event framework. * dip is the hal dip. This routine calls ndi_event_add_callback(), * allowing requests for events we don't generate to pass up the tree. */ static int nx1394_add_eventcall(dev_info_t *dip, dev_info_t *rdip, ddi_eventcookie_t cookie, void (*callback)(), void *arg, ddi_callback_id_t *cb_id) { int ret; s1394_hal_t *hal; #if defined(DEBUG) char *event_name = NULL; #endif hal = s1394_dip_to_hal(dip); ASSERT(hal); TNF_PROBE_0_DEBUG(nx1394_add_eventcall_enter, S1394_TNF_SL_NEXUS_STACK, ""); ret = ndi_event_add_callback(hal->hal_ndi_event_hdl, rdip, cookie, callback, arg, NDI_NOSLEEP, cb_id); #if defined(DEBUG) event_name = ndi_event_cookie_to_name(hal->hal_ndi_event_hdl, cookie); if (event_name == NULL) event_name = ""; #endif TNF_PROBE_4_DEBUG(nx1394_add_eventcall_exit, S1394_TNF_SL_NEXUS_STACK, "", tnf_opaque, parent_dip, (void *)dip, tnf_opaque, requestor_dip, (void *)rdip, tnf_string, event_name, event_name, tnf_int, request_status, ret); return (ret); }
/* * nx1394_remove_eventcall() * Called as a result of a child node calling ddi_remove_eventcall(). * Unregisters the callback corresponding to the callback id passed in. */ static int nx1394_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id) { int ret; s1394_hal_t *hal; ddi_eventcookie_t cookie; #if defined(DEBUG) char *event_name = NULL; #endif ASSERT(cb_id); cookie = ((ndi_event_callbacks_t *)cb_id)->ndi_evtcb_cookie; hal = s1394_dip_to_hal(dip); ASSERT(hal); TNF_PROBE_0_DEBUG(nx1394_remove_eventcall_enter, S1394_TNF_SL_NEXUS_STACK, ""); ret = ndi_event_remove_callback(hal->hal_ndi_event_hdl, cb_id); #if defined(DEBUG) event_name = ndi_event_cookie_to_name(hal->hal_ndi_event_hdl, cookie); if (event_name == NULL) event_name = ""; TNF_PROBE_4_DEBUG(nx1394_remove_eventcall_exit, S1394_TNF_SL_NEXUS_STACK, "", tnf_opaque, parent_dip, (void *)dip, tnf_opaque, callback_id, (void *)cb_id, tnf_string, event_name, event_name, tnf_int, request_status, ret); #endif return (ret); }
/* * nx1394_dma_allochdl() * Merges the ddi_dma_attr_t passed in by the target (using * ddi_dma_alloc_handle() call) with that of the hal and passes the alloc * handle request up the device by calling ddi_dma_allochdl(). */ static int nx1394_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, int (*waitfnp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) { s1394_hal_t *hal; ddi_dma_attr_t *hal_attr; int status; _NOTE(SCHEME_PROTECTS_DATA("unique (per thread)", ddi_dma_attr_t)) TNF_PROBE_0_DEBUG(nx1394_dma_allochdl_enter, S1394_TNF_SL_NEXUS_STACK, ""); /* * If hal calls ddi_dma_alloc_handle, dip == rdip == hal dip. * Unfortunately, we cannot verify this (by way of looking up for hal * dip) here because h1394_attach() may happen much later. */ if (dip != rdip) { hal = s1394_dip_to_hal(ddi_get_parent(rdip)); ASSERT(hal); hal_attr = &hal->halinfo.dma_attr; ASSERT(hal_attr); ddi_dma_attr_merge(attr, hal_attr); } status = ddi_dma_allochdl(dip, rdip, attr, waitfnp, arg, handlep); TNF_PROBE_1_DEBUG(nx1394_dma_allochdl_exit, S1394_TNF_SL_NEXUS_STACK, "", tnf_int, status, status); return (status); }
static int hci1394_ioctl_rdvreg(hci1394_state_t *soft_state, void *arg, int mode) { hci1394_ioctl_rdvreg_t rdvreg; int status; ASSERT(soft_state != NULL); ASSERT(arg != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_enter, HCI1394_TNF_HAL_STACK, ""); status = ddi_copyin(arg, &rdvreg, sizeof (hci1394_ioctl_rdvreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdvreg_ci_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } status = hci1394_vendor_reg_read(soft_state->vendor, rdvreg.regset, rdvreg.addr, &rdvreg.data); if (status != DDI_SUCCESS) { TNF_PROBE_0(hci1394_ioctl_rdvreg_vrr_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EINVAL); } status = ddi_copyout(&rdvreg, arg, sizeof (hci1394_ioctl_rdvreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdvreg_co_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
/* * s1394_fa_completion_cb() * FA completion callback: restore command and call original callback */ static void s1394_fa_completion_cb(cmd1394_cmd_t *cmd) { s1394_hal_t *hal = cmd->cmd_callback_arg; TNF_PROBE_0_DEBUG(s1394_fa_completion_cb_enter, S1394_TNF_SL_FA_STACK, ""); s1394_fa_restore_cmd(hal, cmd); if (cmd->completion_callback) { cmd->completion_callback(cmd); } TNF_PROBE_0_DEBUG(s1394_fa_completion_cb_exit, S1394_TNF_SL_FA_STACK, ""); }
/* * hci1394_tlabel_set_reclaim_time() * This function should be called if a change to the reclaim_time is * required after the initial call to init(). It is not necessary to call * this function if the reclaim time never changes. * * Currently, bad tlabels are reclaimed in tlabel_alloc(). * It looks like the following for a given node: * * if bad tlabels exist * if ((current time + reclaim time) >= last bad tlabel time) * free all bad tlabels. */ void hci1394_tlabel_set_reclaim_time(hci1394_tlabel_handle_t tlabel_handle, hrtime_t reclaim_time_nS) { ASSERT(tlabel_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlabel_set_reclaim_time_enter, HCI1394_TNF_HAL_STACK, ""); /* * We do not need to lock the tlabel structure in this because we are * doing a single write to reclaim_time. If this changes in the future, * we may need to add calls to lock() and unlock(). */ tlabel_handle->tb_reclaim_time = reclaim_time_nS; TNF_PROBE_0_DEBUG(hci1394_tlabel_set_reclaim_time_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_isr_isoch_it() * Process each isoch transmit context which has its interrupt asserted. The * interrupt will be asserted when an isoch transmit descriptor with the * interrupt bit is finished being processed. */ static void hci1394_isr_isoch_it(hci1394_state_t *soft_state) { uint32_t i; uint32_t mask = 0x00000001; uint32_t ev; int num_it_contexts; hci1394_iso_ctxt_t *ctxtp; ASSERT(soft_state != NULL); TNF_PROBE_0_DEBUG(hci1394_isr_isoch_it_enter, HCI1394_TNF_HAL_STACK, ""); num_it_contexts = hci1394_isoch_xmit_count_get(soft_state->isoch); /* * Main isochTx int is not clearable. it is automatically * cleared by the hw when the it_intr_event is cleared. */ /* loop until no more IT events */ while ((ev = hci1394_ohci_it_intr_asserted(soft_state->ohci)) != 0) { /* clear the events we just learned about */ hci1394_ohci_it_intr_clear(soft_state->ohci, ev); /* for each interrupting IR context, process the interrupt */ for (i = 0; i < num_it_contexts; i++) { /* * if the intr bit is on for a context, * call xmit/recv common processing code */ if (ev & mask) { ctxtp = hci1394_isoch_xmit_ctxt_get( soft_state->isoch, i); hci1394_ixl_interrupt(soft_state, ctxtp, B_FALSE); } mask <<= 1; } } TNF_PROBE_0_DEBUG(hci1394_isr_isoch_it_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlabel_fini() * Frees up the space allocated in init(). Notice that a pointer to the * handle is used for the parameter. fini() will set your handle to NULL * before returning. */ void hci1394_tlabel_fini(hci1394_tlabel_handle_t *tlabel_handle) { hci1394_tlabel_t *tstruct; ASSERT(tlabel_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlabel_fini_enter, HCI1394_TNF_HAL_STACK, ""); tstruct = (hci1394_tlabel_t *)*tlabel_handle; mutex_destroy(&tstruct->tb_mutex); kmem_free(tstruct, sizeof (hci1394_tlabel_t)); /* set handle to null. This helps catch bugs. */ *tlabel_handle = NULL; TNF_PROBE_0_DEBUG(hci1394_tlabel_fini_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlist_timeout_cancel() * cancel any scheduled timeouts. This should be called after the list is * empty and there is no chance for any other nodes to be placed on the list. * This function is meant to be called during a suspend or detach. */ void hci1394_tlist_timeout_cancel(hci1394_tlist_handle_t tlist_handle) { ASSERT(tlist_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_timeout_cancel_enter, HCI1394_TNF_HAL_STACK, ""); /* * Cancel the timeout. Do NOT use the tlist mutex here. It could cause a * deadlock. */ if (tlist_handle->tl_state == HCI1394_TLIST_TIMEOUT_ON) { (void) untimeout(tlist_handle->tl_timeout_id); tlist_handle->tl_state = HCI1394_TLIST_TIMEOUT_OFF; } TNF_PROBE_0_DEBUG(hci1394_tlist_timeout_cancel_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * * acl_access_allowed_main * Main interface to the plugin. Calls different access check functions * based on the flag. * * * Returns: * LDAP_SUCCESS -- access is granted * LDAP_INSUFFICIENT_ACCESS -- access denied * <other ldap error> -- ex: opererations error * */ int acl_access_allowed_main ( Slapi_PBlock *pb, Slapi_Entry *e, char **attrs, struct berval *val, int access , int flags, char **errbuf) { int rc =0; char *attr = NULL; TNF_PROBE_0_DEBUG(acl_access_allowed_main_start,"ACL",""); if (attrs && *attrs) attr = attrs[0]; if (ACLPLUGIN_ACCESS_READ_ON_ENTRY == flags) { rc = acl_read_access_allowed_on_entry ( pb, e, attrs, access); } else if ( ACLPLUGIN_ACCESS_READ_ON_ATTR == flags) { if (attr == NULL) { slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "acl_access_allowed_main - Missing attribute\n" ); rc = LDAP_OPERATIONS_ERROR; } else { rc = acl_read_access_allowed_on_attr ( pb, e, attr, val, access); } } else if ( ACLPLUGIN_ACCESS_READ_ON_VLV == flags) rc = acl_access_allowed_disjoint_resource ( pb, e, attr, val, access); else if ( ACLPLUGIN_ACCESS_MODRDN == flags) rc = acl_access_allowed_modrdn ( pb, e, attr, val, access); else if ( ACLPLUGIN_ACCESS_GET_EFFECTIVE_RIGHTS == flags) rc = acl_get_effective_rights ( pb, e, attrs, val, access, errbuf ); else rc = acl_access_allowed ( pb, e, attr, val, access); /* generate the appropriate error message */ if ( ( rc != LDAP_SUCCESS ) && errbuf && ( ACLPLUGIN_ACCESS_GET_EFFECTIVE_RIGHTS != flags ) && ( access & ( SLAPI_ACL_WRITE | SLAPI_ACL_ADD | SLAPI_ACL_DELETE | SLAPI_ACL_MODDN ))) { char *edn = slapi_entry_get_dn ( e ); acl_gen_err_msg(access, edn, attr, errbuf); } TNF_PROBE_0_DEBUG(acl_access_allowed_main_end,"ACL",""); return rc; }
/* * hci1394_tlist_fini() * Frees up the space allocated in init(). Notice that a pointer to the * handle is used for the parameter. fini() will set your handle to NULL * before returning. Make sure that any pending timeouts are canceled. */ void hci1394_tlist_fini(hci1394_tlist_handle_t *tlist_handle) { hci1394_tlist_t *list; ASSERT(tlist_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_fini_enter, HCI1394_TNF_HAL_STACK, ""); list = (hci1394_tlist_t *)*tlist_handle; hci1394_tlist_timeout_cancel(list); mutex_destroy(&list->tl_mutex); kmem_free(list, sizeof (hci1394_tlist_t)); /* set handle to null. This helps catch bugs. */ *tlist_handle = NULL; TNF_PROBE_0_DEBUG(hci1394_tlist_fini_exit, HCI1394_TNF_HAL_STACK, ""); }
static int hci1394_ioctl_rdphy(hci1394_state_t *soft_state, void *arg, int mode) { hci1394_ioctl_rdphy_t rdphy; int status; ASSERT(soft_state != NULL); ASSERT(arg != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_enter, HCI1394_TNF_HAL_STACK, ""); status = ddi_copyin(arg, &rdphy, sizeof (hci1394_ioctl_rdphy_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdphy_ci_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } status = hci1394_ohci_phy_read(soft_state->ohci, rdphy.addr, &rdphy.data); if (status != DDI_SUCCESS) { TNF_PROBE_0(hci1394_ioctl_rdphy_pr_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit, HCI1394_TNF_HAL_STACK, ""); return (EINVAL); } status = ddi_copyout(&rdphy, arg, sizeof (hci1394_ioctl_rdphy_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdphy_co_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
int s1394_cmp_register(s1394_target_t *target, t1394_cmp_evts_t *evts) { s1394_hal_t *hal = target->on_hal; static t1394_cmp_evts_t default_evts = { NULL, NULL }; TNF_PROBE_0_DEBUG(s1394_cmp_register_enter, S1394_TNF_SL_CMP_STACK, ""); rw_enter(&hal->target_list_rwlock, RW_WRITER); /* * if registering the first target, claim and initialize addresses */ if (s1394_fa_list_is_empty(hal, S1394_FA_TYPE_CMP)) { if (s1394_fa_claim_addr(hal, S1394_FA_TYPE_CMP_OMPR, &s1394_cmp_ompr_descr) != DDI_SUCCESS) { rw_exit(&hal->target_list_rwlock); return (DDI_FAILURE); } if (s1394_fa_claim_addr(hal, S1394_FA_TYPE_CMP_IMPR, &s1394_cmp_impr_descr) != DDI_SUCCESS) { s1394_fa_free_addr(hal, S1394_FA_TYPE_CMP_OMPR); rw_exit(&hal->target_list_rwlock); return (DDI_FAILURE); } s1394_cmp_init(hal); } /* Add on the target list (we only use one list) */ s1394_fa_list_add(hal, target, S1394_FA_TYPE_CMP); if (evts == NULL) { evts = &default_evts; } target->target_fa[S1394_FA_TYPE_CMP].fat_u.cmp.cm_evts = *evts; rw_exit(&hal->target_list_rwlock); TNF_PROBE_0_DEBUG(s1394_cmp_register_exit, S1394_TNF_SL_CMP_STACK, ""); return (DDI_SUCCESS); }
/* * s1394_fa_free_addr_blk() * Free fixed address block. */ void s1394_fa_free_addr(s1394_hal_t *hal, s1394_fa_type_t type) { s1394_fa_hal_t *falp = &hal->hal_fa[type]; int ret; TNF_PROBE_0_DEBUG(s1394_fa_free_addr_enter, S1394_TNF_SL_FA_STACK, ""); /* Might have been freed already */ if (falp->fal_addr_blk != NULL) { ret = s1394_free_addr_blk(hal, falp->fal_addr_blk); if (ret != DDI_SUCCESS) { TNF_PROBE_1(s1394_fa_free_addr_error, S1394_TNF_SL_FA_STACK, "", tnf_int, ret, ret); } falp->fal_addr_blk = NULL; } TNF_PROBE_0_DEBUG(s1394_fa_free_addr_exit, S1394_TNF_SL_FA_STACK, ""); }
/* * hci1394_tlist_remove() * This is an internal function which removes the given node from the list. * The list MUST be locked before calling this function. */ static void hci1394_tlist_remove(hci1394_tlist_t *list, hci1394_tlist_node_t *node) { ASSERT(list != NULL); ASSERT(node != NULL); ASSERT(node->tln_on_list == B_TRUE); ASSERT(MUTEX_HELD(&list->tl_mutex)); TNF_PROBE_0_DEBUG(hci1394_tlist_remove_enter, HCI1394_TNF_HAL_STACK, ""); /* if this is the only node on the list */ if ((list->tl_head == node) && (list->tl_tail == node)) { list->tl_head = NULL; list->tl_tail = NULL; /* if the node is at the head of the list */ } else if (list->tl_head == node) { list->tl_head = node->tln_next; node->tln_next->tln_prev = NULL; /* if the node is at the tail of the list */ } else if (list->tl_tail == node) { list->tl_tail = node->tln_prev; node->tln_prev->tln_next = NULL; /* if the node is in the middle of the list */ } else { node->tln_prev->tln_next = node->tln_next; node->tln_next->tln_prev = node->tln_prev; } /* Set state that this node has been removed from the list */ node->tln_on_list = B_FALSE; /* cleanup the node's link pointers */ node->tln_prev = NULL; node->tln_next = NULL; TNF_PROBE_0_DEBUG(hci1394_tlist_remove_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlabel_init() * Initialize the tlabel structures. These structures will be protected * by a mutex at the iblock_cookie passed in. Bad tlabels will be usable * when > reclaim_time_nS has gone by. init() returns a handle to be used * for the rest of the tlabel functions. */ void hci1394_tlabel_init(hci1394_drvinfo_t *drvinfo, hrtime_t reclaim_time_nS, hci1394_tlabel_handle_t *tlabel_handle) { hci1394_tlabel_t *tstruct; ASSERT(tlabel_handle != NULL); TNF_PROBE_0_DEBUG(hci1394_tlabel_init_enter, HCI1394_TNF_HAL_STACK, ""); /* alloc space for tlabel data */ tstruct = kmem_alloc(sizeof (hci1394_tlabel_t), KM_SLEEP); /* setup handle which is returned from this function */ *tlabel_handle = tstruct; /* * Initialize tlabel structure. We start with max node set to the * maxiumum node we could have so that we make sure the arrays are * initialized correctly in hci1394_tlabel_reset(). */ tstruct->tb_drvinfo = drvinfo; tstruct->tb_reclaim_time = reclaim_time_nS; tstruct->tb_max_node = TLABEL_RANGE - 1; tstruct->tb_bcast_sent = B_FALSE; mutex_init(&tstruct->tb_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie); /* * The mutex must be initialized before tlabel_reset() * is called. This is because tlabel_reset is also * used in normal tlabel processing (i.e. not just during * initialization) */ hci1394_tlabel_reset(tstruct); TNF_PROBE_0_DEBUG(hci1394_tlabel_init_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlist_get() * get the node at the head of the linked list. This function also removes * the node from the list. */ void hci1394_tlist_get(hci1394_tlist_handle_t tlist_handle, hci1394_tlist_node_t **node) { ASSERT(tlist_handle != NULL); ASSERT(node != NULL); TNF_PROBE_0_DEBUG(hci1394_tlist_get_enter, HCI1394_TNF_HAL_STACK, ""); mutex_enter(&tlist_handle->tl_mutex); /* set the return parameter */ *node = tlist_handle->tl_head; /* remove the node from the tlist */ if (*node != NULL) { hci1394_tlist_remove(tlist_handle, *node); } mutex_exit(&tlist_handle->tl_mutex); TNF_PROBE_0_DEBUG(hci1394_tlist_get_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * hci1394_tlabel_bad() * Register the specified tlabel as bad. tlabel_lookup() will no longer * return a registered opaque command and this tlabel will not be returned * from alloc() until > reclaim_time has passed. See set_reclaim_time() for * more info. */ void hci1394_tlabel_bad(hci1394_tlabel_handle_t tlabel_handle, hci1394_tlabel_info_t *tlabel_info) { uint_t node_number; uint_t tlabel; ASSERT(tlabel_handle != NULL); ASSERT(tlabel_info != NULL); TNF_PROBE_0_DEBUG(hci1394_tlabel_bad_enter, HCI1394_TNF_HAL_STACK, ""); /* figure out what node and tlabel we are using */ node_number = IEEE1394_NODE_NUM(tlabel_info->tbi_destination); tlabel = tlabel_info->tbi_tlabel & TLABEL_MASK; mutex_enter(&tlabel_handle->tb_mutex); TNF_PROBE_2(hci1394_tlabel_timeout, HCI1394_TNF_HAL_ERROR, "", tnf_uint, nodeid, node_number, tnf_uint, bad_tlabel, tlabel_info->tbi_tlabel); /* * Put the tlabel in the bad list and NULL out the (void *) in the * lookup structure. We may see this tlabel shortly if the device is * late in responding. We want to make sure to drop the message if we * do. Set the bad timestamp to the current time plus the reclaim time. * This is the "new" time when all of the bad tlabels for this node will * be free'd. */ tlabel_handle->tb_bad_timestamp[node_number] = gethrtime() + tlabel_handle->tb_reclaim_time; tlabel_handle->tb_bad[node_number] |= ((uint64_t)1 << tlabel); tlabel_handle->tb_lookup[node_number][tlabel] = NULL; mutex_exit(&tlabel_handle->tb_mutex); TNF_PROBE_0_DEBUG(hci1394_tlabel_bad_exit, HCI1394_TNF_HAL_STACK, ""); }
/* * s1394_fa_claim_addr_blk() * Claim fixed address block. */ int s1394_fa_claim_addr(s1394_hal_t *hal, s1394_fa_type_t type, s1394_fa_descr_t *descr) { t1394_alloc_addr_t addr; s1394_fa_hal_t *falp = &hal->hal_fa[type]; int ret; TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_enter, S1394_TNF_SL_FA_STACK, ""); /* Might have been claimed already */ if (falp->fal_addr_blk != NULL) { TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_exit, S1394_TNF_SL_FA_STACK, ""); return (DDI_SUCCESS); } falp->fal_descr = descr; bzero(&addr, sizeof (addr)); addr.aa_type = T1394_ADDR_FIXED; addr.aa_address = descr->fd_addr; addr.aa_length = descr->fd_size; addr.aa_enable = descr->fd_enable; addr.aa_evts = descr->fd_evts; addr.aa_arg = hal; ret = s1394_claim_addr_blk(hal, &addr); if (ret != DDI_SUCCESS) { TNF_PROBE_2(s1394_fa_claim_addr_error, S1394_TNF_SL_FA_ERROR, "", tnf_int, type, type, tnf_int, ret, ret); } else { falp->fal_addr_blk = (s1394_addr_space_blk_t *)addr.aa_hdl; } TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_exit, S1394_TNF_SL_FA_STACK, ""); return (ret); }
static void s1394_cmp_impr_recv_read_request(cmd1394_cmd_t *req) { s1394_hal_t *hal = req->cmd_callback_arg; s1394_cmp_hal_t *cmp = &hal->hal_cmp; TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_enter, S1394_TNF_SL_CMP_STACK, ""); if (req->cmd_type != CMD1394_ASYNCH_RD_QUAD) { req->cmd_result = IEEE1394_RESP_TYPE_ERROR; } else { rw_enter(&cmp->cmp_impr_rwlock, RW_READER); req->cmd_u.q.quadlet_data = cmp->cmp_impr_val; rw_exit(&cmp->cmp_impr_rwlock); req->cmd_result = IEEE1394_RESP_COMPLETE; } (void) s1394_send_response(hal, req); TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_exit, S1394_TNF_SL_CMP_STACK, ""); }
static void s1394_cmp_impr_recv_lock_request(cmd1394_cmd_t *req) { s1394_hal_t *hal = req->cmd_callback_arg; s1394_cmp_hal_t *cmp = &hal->hal_cmp; boolean_t notify = B_TRUE; TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_enter, S1394_TNF_SL_CMP_STACK, ""); if ((req->cmd_type != CMD1394_ASYNCH_LOCK_32) || (req->cmd_u.l32.lock_type != CMD1394_LOCK_COMPARE_SWAP)) { req->cmd_result = IEEE1394_RESP_TYPE_ERROR; notify = B_FALSE; } else { rw_enter(&cmp->cmp_impr_rwlock, RW_WRITER); req->cmd_u.l32.old_value = cmp->cmp_impr_val; if (cmp->cmp_impr_val == req->cmd_u.l32.arg_value) { /* write only allowed bits */ cmp->cmp_impr_val = (req->cmd_u.l32.data_value & IEC61883_CMP_IMPR_LOCK_MASK) | (cmp->cmp_impr_val & ~IEC61883_CMP_IMPR_LOCK_MASK); } rw_exit(&cmp->cmp_impr_rwlock); req->cmd_result = IEEE1394_RESP_COMPLETE; } (void) s1394_send_response(hal, req); /* notify all targets */ if (notify) { s1394_cmp_notify_reg_change(hal, T1394_CMP_IMPR, NULL); } TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_exit, S1394_TNF_SL_CMP_STACK, ""); }