int npf_config_flush(int fd) { nl_config_t *ncf; int error; ncf = npf_config_create(); if (ncf == NULL) { return ENOMEM; } ncf->ncf_flush = true; error = npf_config_submit(ncf, fd); npf_config_destroy(ncf); return error; }
int Mod_fw_replace(FW_handle_T handle, const char *set_name, List_T cidrs, short af) { struct fw_handle *fwh = handle->fwh; int fd, nadded = 0; char *cidr, *fd_path = NULL; char *table = (char *) set_name; void *handler; struct List_entry *entry; nl_config_t *ncf; nl_table_t *nt; struct IP_addr m, n; int ret; uint8_t maskbits; char parsed[INET6_ADDRSTRLEN]; if(List_size(cidrs) == 0) return 0; ncf = npf_config_create(); nt = npf_table_create(TABLE_ID, NPF_TABLE_HASH); /* This should somehow be atomic. */ LIST_EACH(cidrs, entry) { if((cidr = List_entry_value(entry)) != NULL && IP_str_to_addr_mask(cidr, &n, &m) != -1) { ret = sscanf(cidr, "%39[^/]/%u", parsed, &maskbits); if(ret != 2 || maskbits == 0 || maskbits > IP_MAX_MASKBITS) continue; npf_table_add_entry(nt, af, (npf_addr_t *) &n, *((npf_netmask_t *) &maskbits)); nadded++; } } npf_table_insert(ncf, nt); npf_config_submit(ncf, fwh->npfdev); npf_config_destroy(ncf); npf_table_destroy(nt); nt = NULL; ncf = NULL; return nadded; err: return -1; }
void npf_config_fini(void) { npf_conndb_t *cd = npf_conndb_create(); /* Flush the connections. */ mutex_enter(&npf_config_lock); npf_conn_tracking(false); pserialize_perform(npf_config_psz); npf_conn_load(cd, false); npf_ifmap_flush(); mutex_exit(&npf_config_lock); npf_config_destroy(npf_config); pserialize_destroy(npf_config_psz); mutex_destroy(&npf_config_lock); }
int npfctl_config_send(int fd, const char *out) { int error; if (out) { _npf_config_setsubmit(npf_conf, out); printf("\nSaving to %s\n", out); } if (!defgroup_set) { errx(EXIT_FAILURE, "default group was not defined"); } error = npf_config_submit(npf_conf, fd); if (error) { nl_error_t ne; _npf_config_error(npf_conf, &ne); npfctl_print_error(&ne); } npf_config_destroy(npf_conf); return error; }
/* * npf_config_load: the main routine performing configuration load. * Performs the necessary synchronisation and destroys the old config. */ void npf_config_load(npf_ruleset_t *rset, npf_tableset_t *tset, npf_ruleset_t *nset, npf_rprocset_t *rpset, npf_conndb_t *conns, bool flush) { const bool load = conns != NULL; npf_config_t *nc, *onc; nc = kmem_zalloc(sizeof(npf_config_t), KM_SLEEP); nc->n_rules = rset; nc->n_tables = tset; nc->n_nat_rules = nset; nc->n_rprocs = rpset; nc->n_default_pass = flush; /* * Acquire the lock and perform the first phase: * - Scan and use existing dynamic tables, reload only static. * - Scan and use matching NAT policies to preserve the connections. */ mutex_enter(&npf_config_lock); if ((onc = npf_config) != NULL) { npf_ruleset_reload(rset, onc->n_rules, load); npf_tableset_reload(tset, onc->n_tables); npf_ruleset_reload(nset, onc->n_nat_rules, load); } /* * Set the new config and release the lock. */ membar_sync(); npf_config = nc; if (onc == NULL) { /* Initial load, done. */ npf_ifmap_flush(); npf_conn_load(conns, !flush); mutex_exit(&npf_config_lock); return; } /* * If we are going to flush the connections or load the new ones, * then disable the connection tracking for the grace period. */ if (flush || conns) { npf_conn_tracking(false); } /* Synchronise: drain all references. */ pserialize_perform(npf_config_psz); if (flush) { npf_ifmap_flush(); } /* * G/C the existing connections and, if passed, load the new ones. * If not flushing - enable the connection tracking. */ npf_conn_load(conns, !flush); mutex_exit(&npf_config_lock); /* Finally, it is safe to destroy the old config. */ npf_config_destroy(onc); }