__private_extern__ void ipf_unref(void) { lck_mtx_lock(kipf_lock); if (kipf_ref == 0) panic("ipf_unref: kipf_ref == 0\n"); kipf_ref--; if (kipf_ref == 0 && kipf_delayed_remove != 0) { struct ipfilter *filter; while ((filter = TAILQ_FIRST(&tbr_filters))) { ipf_detach_func ipf_detach = filter->ipf_filter.ipf_detach; void* cookie = filter->ipf_filter.cookie; TAILQ_REMOVE(filter->ipf_head, filter, ipf_link); TAILQ_REMOVE(&tbr_filters, filter, ipf_tbr); kipf_delayed_remove--; if (ipf_detach) { lck_mtx_unlock(kipf_lock); ipf_detach(cookie); lck_mtx_lock(kipf_lock); /* In case some filter got to run while we released the lock */ if (kipf_ref != 0) break; } } } lck_mtx_unlock(kipf_lock); }
errno_t ipf_remove( ipfilter_t filter_ref) { struct ipfilter *match = (struct ipfilter*)filter_ref; struct ipfilter_list *head; if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters)) return EINVAL; head = match->ipf_head; lck_mtx_lock(kipf_lock); TAILQ_FOREACH(match, head, ipf_link) { if (match == (struct ipfilter*)filter_ref) { ipf_detach_func ipf_detach = match->ipf_filter.ipf_detach; void* cookie = match->ipf_filter.cookie; /* * Cannot detach when they are filters running */ if (kipf_ref) { kipf_delayed_remove++; TAILQ_INSERT_TAIL(&tbr_filters, match, ipf_tbr); match->ipf_filter.ipf_input = 0; match->ipf_filter.ipf_output = 0; lck_mtx_unlock(kipf_lock); } else { TAILQ_REMOVE(head, match, ipf_link); lck_mtx_unlock(kipf_lock); if (ipf_detach) ipf_detach(cookie); FREE(match, M_IFADDR); /* This will force TCP to re-evaluate its use of TSO */ OSAddAtomic(-1, &kipf_count); if (use_routegenid) routegenid_update(); } return 0; } } lck_mtx_unlock(kipf_lock); return ENOENT; }