static void mpt_set_options(struct mpt_softc *mpt) { int bitmap; bitmap = 0; if (getenv_int("mpt_disable", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->disabled = 1; } } bitmap = 0; if (getenv_int("mpt_debug", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG; } } bitmap = 0; if (getenv_int("mpt_debug1", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG1; } } bitmap = 0; if (getenv_int("mpt_debug2", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG2; } } bitmap = 0; if (getenv_int("mpt_debug3", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG3; } } mpt->cfg_role = MPT_ROLE_DEFAULT; bitmap = 0; if (getenv_int("mpt_nil_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role = 0; } mpt->do_cfg_role = 1; } bitmap = 0; if (getenv_int("mpt_tgt_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_TARGET; } mpt->do_cfg_role = 1; } bitmap = 0; if (getenv_int("mpt_ini_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_INITIATOR; } mpt->do_cfg_role = 1; } mpt->msi_enable = 0; }
/* fill opts with default values for all options */ void dr_options_default_(dr_options * opts) { * opts = dr_options_default_values; if (getenv_bool("DAG_RECORDER", &opts->on) || getenv_bool("DR", &opts->on)) {} if (getenv_str("DAG_RECORDER_FILE_PREFIX", &opts->dag_file_prefix) || getenv_str("DR_PREFIX", &opts->dag_file_prefix)) {} if (getenv_bool("DAG_RECORDER_DAG_FILE", &opts->dag_file_yes) || getenv_bool("DR_DAG", &opts->dag_file_yes)) {} if (getenv_bool("DAG_RECORDER_STAT_FILE", &opts->stat_file_yes) || getenv_bool("DR_STAT", &opts->stat_file_yes)) {} if (getenv_bool("DAG_RECORDER_GPL_FILE", &opts->gpl_file_yes) || getenv_bool("DR_GPL", &opts->gpl_file_yes)) {} if (getenv_bool("DAG_RECORDER_DOT_FILE", &opts->dot_file_yes) || getenv_bool("DR_DOT", &opts->dot_file_yes)) {} if (getenv_bool("DAG_RECORDER_TEXT_FILE", &opts->text_file_yes) || getenv_bool("DR_TEXT", &opts->text_file_yes)) {} /* NOTE: we do not set sqlite_file via environment variables */ if (getenv_int("DAG_RECORDER_GPL_SIZE", &opts->gpl_sz) || getenv_int("DR_GPL_SZ", &opts->gpl_sz)) {} if (getenv_str("DAG_RECORDER_TEXT_FILE_SEP", &opts->text_file_sep) || getenv_str("DR_TEXT_SEP", &opts->text_file_sep)) {} if (getenv_byte("DAG_RECORDER_DBG_LEVEL", &opts->dbg_level) || getenv_byte("DR_DBG", &opts->dbg_level)) {} if (getenv_byte("DAG_RECORDER_VERBOSE_LEVEL", &opts->verbose_level) || getenv_byte("DR_VERBOSE", &opts->verbose_level)) {} if (getenv_byte("DAG_RECORDER_CHK_LEVEL", &opts->chk_level) || getenv_byte("DR_CHK", &opts->chk_level)) {} if (getenv_ull("DAG_RECORDER_UNCOLLAPSE_MIN", &opts->uncollapse_min) || getenv_ull("DR_UNCOLLAPSE_MIN", &opts->uncollapse_min)) {} if (getenv_ull("DAG_RECORDER_COLLAPSE_MAX", &opts->collapse_max) || getenv_ull("DR_COLLAPSE_MAX", &opts->collapse_max)) {} if (getenv_long("DAG_RECORDER_NODE_COUNT", &opts->node_count_target) || getenv_long("DR_NC", &opts->node_count_target)) {} if (getenv_long("DAG_RECORDER_PRUNE_THRESHOLD", &opts->prune_threshold) || getenv_long("DR_PRUNE", &opts->prune_threshold)) {} if (getenv_long("DAG_RECORDER_COLLAPSE_MAX_COUNT", &opts->collapse_max_count) || getenv_long("DR_COLLAPSE_MAX_COUNT", &opts->collapse_max_count)) {} if (getenv_long("DAG_RECORDER_ALLOC_UNIT_MB", &opts->alloc_unit_mb) || getenv_long("DR_ALLOC_UNIT_MB", &opts->alloc_unit_mb)) {} if (getenv_long("DAG_RECORDER_PRE_ALLOC_PER_WORKER", &opts->pre_alloc_per_worker) || getenv_long("DR_PRE_ALLOC_PER_WORKER", &opts->pre_alloc_per_worker)) {} if (getenv_long("DAG_RECORDER_PRE_ALLOC", &opts->pre_alloc) || getenv_long("DR_PRE_ALLOC", &opts->pre_alloc)) {} }
/* * This *must* be called first before any of the functions above!!! */ void snd_unit_init(void) { int i; if (snd_unit_initialized != 0) return; snd_unit_initialized = 1; if (getenv_int("hw.snd.maxunit", &i) != 0) { if (i < SND_UNIT_UMIN) i = SND_UNIT_UMIN; else if (i > SND_UNIT_UMAX) i = SND_UNIT_UMAX; else i = roundup2(i, 2); for (snd_u_shift = 0; (i >> (snd_u_shift + 1)) != 0; snd_u_shift++) ; /* * Make room for channels/clones allocation unit * to fit within 24bit MAXMINOR limit. */ snd_c_shift = 24 - snd_u_shift - snd_d_shift; } if (bootverbose != 0) printf("%s() u=0x%08x [%d] d=0x%08x [%d] c=0x%08x [%d]\n", __func__, SND_U_MASK, snd_max_u() + 1, SND_D_MASK, snd_max_d() + 1, SND_C_MASK, snd_max_c() + 1); }
bool jit_dump_enabled() { if (!jit_dump_flag_initialized) { jit_dump_flag = getenv_int("MKLDNN_JIT_DUMP"); jit_dump_flag_initialized = true; } return jit_dump_flag != 0; }
static int e1000phy_attach(device_t dev) { struct mii_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; getenv_int("e1000phy_debug", &e1000phy_debug); sc = device_get_softc(dev); ma = device_get_ivars(dev); sc->mii_dev = device_get_parent(dev); mii = device_get_softc(sc->mii_dev); LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); sc->mii_inst = mii->mii_instance; sc->mii_phy = ma->mii_phyno; sc->mii_service = e1000phy_service; sc->mii_pdata = mii; sc->mii_flags |= MIIF_NOISOLATE; mii->mii_instance++; e1000phy_reset(sc); device_printf(dev, " "); #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) /* ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), E1000_CR_ISOLATE); */ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), E1000_CR_SPEED_10); printf("10baseT, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX); printf("10baseT-FDX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), E1000_CR_SPEED_100); printf("100baseTX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX); printf("100baseTX-FDX, "); /* * 1000BT-simplex not supported; driver must ignore this entry, * but it must be present in order to manually set full-duplex. */ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0, sc->mii_inst), E1000_CR_SPEED_1000); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); printf("1000baseTX-FDX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); printf("auto\n"); #undef ADD MIIBUS_MEDIAINIT(sc->mii_dev); return(0); }
static size_t get_block_size(void) { uint64_t sz; if (getenv_int("MTBL_MERGE_BLOCK_SIZE", &sz)) return ((size_t) sz); return (DEFAULT_BLOCK_SIZE); }
int main(int argc, char *argv[]) { int indent = 0; size_t flags = 0; json_t *json; json_error_t error; if(argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); return 2; } indent = getenv_int("JSON_INDENT"); if(indent < 0 || indent > 255) { fprintf(stderr, "invalid value for JSON_INDENT: %d\n", indent); return 2; } if(indent > 0) flags |= JSON_INDENT(indent); if(getenv_int("JSON_COMPACT") > 0) flags |= JSON_COMPACT; if(getenv_int("JSON_ENSURE_ASCII")) flags |= JSON_ENSURE_ASCII; if(getenv_int("JSON_PRESERVE_ORDER")) flags |= JSON_PRESERVE_ORDER; if(getenv_int("JSON_SORT_KEYS")) flags |= JSON_SORT_KEYS; json = json_loadf(stdin, 0, &error); if(!json) { fprintf(stderr, "%d\n%s\n", error.line, error.text); return 1; } json_dumpf(json, stdout, flags); json_decref(json); return 0; }
static int dcons_dbg_probe(void) { int dcons_gdb; if (getenv_int("dcons_gdb", &dcons_gdb) == 0) return (-1); return (dcons_gdb); }
/* * generic PCI ATA device probe */ static int hv_ata_pci_probe(device_t dev) { int ata_disk_enable = 0; if(bootverbose) device_printf(dev, "hv_ata_pci_probe dev_class/subslcass = %d, %d\n", pci_get_class(dev), pci_get_subclass(dev)); /* is this a storage class device ? */ if (pci_get_class(dev) != PCIC_STORAGE) return (ENXIO); /* is this an IDE/ATA type device ? */ if (pci_get_subclass(dev) != PCIS_STORAGE_IDE) return (ENXIO); if(bootverbose) device_printf(dev, "Hyper-V probe for disabling ATA-PCI, emulated driver\n"); /* * On Hyper-V the default is to use the enlightened driver for * IDE disks. However, if the user wishes to use the native * ATA driver, the environment variable * hw_ata.disk_enable must be explicitly set to 1. */ if (hv_check_for_hyper_v()) { if (getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { if(bootverbose) device_printf(dev, "hw.ata.disk_enable flag is disabling Hyper-V" " ATA driver support\n"); return (ENXIO); } } if(bootverbose) device_printf(dev, "Hyper-V ATA storage driver enabled.\n"); return (BUS_PROBE_VENDOR); }
void read_env ( void ) { JGREYLIST = getenv ( "JGREYLIST" ) ; JGREYLIST_DIR = getenv ( "JGREYLIST_DIR" ) ; JGREYLIST_NOREV = getenv ( "JGREYLIST_NOREV" ) ; TCPREMOTEHOST = getenv ( "TCPREMOTEHOST" ) ; TCPREMOTEIP = getenv ( "TCPREMOTEIP" ) ; JGREYLIST_BY_IP = getenv_int ( "JGREYLIST_BY_IP" , 0 ) ; JGREYLIST_HOLDTIME = getenv_int ( "JGREYLIST_HOLDTIME" , 120 ) ; JGREYLIST_LOG = getenv_int ( "JGREYLIST_LOG" , 1 ) ; JGREYLIST_LOG_PID = getenv_int ( "JGREYLIST_LOG_PID" , 1 ) ; JGREYLIST_LOG_SMTP = getenv_int ( "JGREYLIST_LOG_SMTP" , 0 ) ; JGREYLIST_TIMEOUT = getenv_int ( "JGREYLIST_TIMEOUT" , 60 ) ; JGREYLIST_LIMIT = getenv_int ( "JGREYLIST_LIMIT" , 0 ) ; if ( JGREYLIST_TIMEOUT < 5 ) JGREYLIST_TIMEOUT = 5 ; if ( JGREYLIST_TIMEOUT > 300 ) JGREYLIST_TIMEOUT = 300 ; }
int erts_run_erl_log_init(int daemon, char* logdir) { char *p; #ifdef __OSE__ run_erl **re_pp; if (!run_erl_pp_key) ose_create_ppdata("run_erl_ppdata",&run_erl_pp_key); re_pp = (run_erl **)ose_get_ppdata(run_erl_pp_key); *re_pp = malloc(sizeof(run_erl)); #endif STDSTATUS = NULL; LOG_GENERATIONS = DEFAULT_LOG_GENERATIONS; LOG_MAXSIZE = DEFAULT_LOG_MAXSIZE; LOG_ACTIVITY_MINUTES = DEFAULT_LOG_ACTIVITY_MINUTES; LOG_ALIVE_IN_GMT = 0; RUN_DAEMON = 0; LOG_ALIVE_MINUTES = DEFAULT_LOG_ALIVE_MINUTES; LFD = 0; PROTOCOL_VER = RUN_ERL_LO_VER; /* assume lowest to begin with */ /* Get values for LOG file handling from the environment */ if ((p = getenv_int("RUN_ERL_LOG_ALIVE_MINUTES"))) { LOG_ALIVE_MINUTES = atoi(p); if (!LOG_ALIVE_MINUTES) { ERROR1(LOG_ERR,"Minimum value for RUN_ERL_LOG_ALIVE_MINUTES is 1 " "(current value is %s)",p); } LOG_ACTIVITY_MINUTES = LOG_ALIVE_MINUTES / 3; if (!LOG_ACTIVITY_MINUTES) { ++LOG_ACTIVITY_MINUTES; } } if ((p = getenv_int( "RUN_ERL_LOG_ACTIVITY_MINUTES"))) { LOG_ACTIVITY_MINUTES = atoi(p); if (!LOG_ACTIVITY_MINUTES) { ERROR1(LOG_ERR,"Minimum value for RUN_ERL_LOG_ACTIVITY_MINUTES is 1 " "(current value is %s)",p); } } if ((p = getenv_int("RUN_ERL_LOG_ALIVE_FORMAT"))) { if (strlen(p) > ALIVE_BUFFSIZ) { ERROR1(LOG_ERR, "RUN_ERL_LOG_ALIVE_FORMAT can contain a maximum of " "%d characters", ALIVE_BUFFSIZ); } strn_cpy(LOG_ALIVE_FORMAT, sizeof(LOG_ALIVE_FORMAT), p); } else { strn_cpy(LOG_ALIVE_FORMAT, sizeof(LOG_ALIVE_FORMAT), DEFAULT_LOG_ALIVE_FORMAT); } if ((p = getenv_int("RUN_ERL_LOG_ALIVE_IN_UTC")) && strcmp(p,"0")) { ++LOG_ALIVE_IN_GMT; } if ((p = getenv_int("RUN_ERL_LOG_GENERATIONS"))) { LOG_GENERATIONS = atoi(p); if (LOG_GENERATIONS < LOG_MIN_GENERATIONS) ERROR1(LOG_ERR,"Minimum RUN_ERL_LOG_GENERATIONS is %d", LOG_MIN_GENERATIONS); if (LOG_GENERATIONS > LOG_MAX_GENERATIONS) ERROR1(LOG_ERR,"Maximum RUN_ERL_LOG_GENERATIONS is %d", LOG_MAX_GENERATIONS); } if ((p = getenv_int("RUN_ERL_LOG_MAXSIZE"))) { LOG_MAXSIZE = atoi(p); if (LOG_MAXSIZE < LOG_MIN_MAXSIZE) ERROR1(LOG_ERR,"Minimum RUN_ERL_LOG_MAXSIZE is %d", LOG_MIN_MAXSIZE); } RUN_DAEMON = daemon; strn_cpy(LOG_DIR, sizeof(LOG_DIR), logdir); strn_cpy(STATUSFILE, sizeof(STATUSFILE), LOG_DIR); strn_cat(STATUSFILE, sizeof(STATUSFILE), STATUSFILENAME); return 0; }
/*---------------------------------------------------------------------- * PLA_OMP_SGEMM - Single Precision General Matrix Matrix Multiply *--------------------------------------------------------------------*/ void PLA_OMP_sgemm ( char* transa, char* transb, int m, int n, int k, float* alpha, float* a, int lda, float* b, int ldb, float* beta, float* c, int ldc ) { int nt, rows, cols, tid; /* Executable Statements */ /* printf("Entry to PLA_OMP_sgemm\n"); */ /* Determine how many threads we can get */ #ifdef _OPENMP nt = omp_get_max_threads(); /* printf("omp_get_max_threads returned nt = %d\n", nt ); */ #else if ((nt = getenv_int("OMP_NUM_THREADS")) == 0) nt = 1; /* printf("omp disabled. num threads statically set to %d\n", nt ); */ #endif /* Make sure there's enough computations to warrant multithreading ... */ if ( nt <= 1 || n < nt || n*m < 100 ) { PLA_sgemm( transa, transb, &m, &n, &k, alpha, a, &lda, b, &ldb, beta, c, &ldc); } else { /* Use OpenMP to parallelize SGEMM computation */ if ((rows = getenv_int("PLA_OMP_ROWS")) == 0) rows = 1; if ((cols = getenv_int("PLA_OMP_COLS")) == 0) cols = nt; if ( rows*cols != nt ) { printf("PLA_OMP_gemm: Error: Rows and cols don't match!!\n"); exit(1); } /* * We decompose C, A, and B into blocks of rows and columns as * specified by the rows and cols determined above. */ #ifdef _OPENMP #pragma omp parallel for default(shared) schedule(static,1) #endif for (tid=0; tid < nt; tid++) { int zbrow, zbcol; /* zero based row & col for this thread */ int mb, nb; /* number of rows and cols in a typical block (not in last row or col) */ int ib, jb; /* number of rows and cols in the block for this thread */ float * ablock, * bblock; /* blocks of A and B to use */ /* Use the thread id to determine the block of the matrix C that will be computed by this thread. Threads are assigned blocks of C in column major order. Thus ... */ zbrow = tid % rows; zbcol = tid / rows; /* The upper left corner (of C) of the block for this thread is C( zbrow*mb, zbcol*nb ). Now we compute the size of the typical block. */ mb = (m+rows-1) / rows; /* mb = ceiling( m / rows ) */ nb = (n+cols-1) / cols; /* nb = ceiling( n / cols ) */ /* If the matrix dimensions don't divide evenly into our rows and columns, we make up for it in the last row/col */ ib = min( mb, m-(zbrow*mb) ); jb = min( nb, n-(zbcol*nb) ); /* printf("Tid=%d, ib=%d, jb=%d \n", tid, ib, jb); */ /* Now get ptrs to blocks of A and B to pass in to the BLAS */ if ( *transa == 'N' ) ablock = &a[ (zbrow*mb) ]; /* element A[ zbrow*mb +1 , 1 ] */ else ablock = &a[ (zbrow*mb) * lda ]; /* element A[ 1, zbrow*mb +1 ] */ if ( *transb == 'N' ) bblock = &b[ (zbcol*nb) * ldb ]; /* element B[ 1, zbcol*nb + 1 ] */ else bblock = &b[ (zbcol*nb) ]; /* element B[ zbcol*nb + 1, 1 ] */ PLA_sgemm( transa, transb, &ib, &jb, &k, alpha, ablock, &lda, bblock, &ldb, beta, &c[ (zbcol*nb) * ldc + (zbrow*mb)], &ldc); /* element C[ zbrow*mb+1, zbcol*nb+1 ] */ } /* End of parallel section */ } /* printf("Return from PLA_OMP_sgemm\n"); */ /* End of PLA_OMP_SGEMM */ }
int use_env() { int indent; size_t flags = 0; json_t *json; json_error_t error; #ifdef _WIN32 /* On Windows, set stdout and stderr to binary mode to avoid outputting DOS line terminators */ _setmode(_fileno(stdout), _O_BINARY); _setmode(_fileno(stderr), _O_BINARY); #endif indent = getenv_int("JSON_INDENT"); if(indent < 0 || indent > 255) { fprintf(stderr, "invalid value for JSON_INDENT: %d\n", indent); return 2; } if(indent > 0) flags |= JSON_INDENT(indent); if(getenv_int("JSON_COMPACT") > 0) flags |= JSON_COMPACT; if(getenv_int("JSON_ENSURE_ASCII")) flags |= JSON_ENSURE_ASCII; if(getenv_int("JSON_PRESERVE_ORDER")) flags |= JSON_PRESERVE_ORDER; if(getenv_int("JSON_SORT_KEYS")) flags |= JSON_SORT_KEYS; if(getenv_int("STRIP")) { /* Load to memory, strip leading and trailing whitespace */ size_t size = 0, used = 0; char *buffer = NULL; while(1) { size_t count; size = (size == 0 ? 128 : size * 2); buffer = realloc(buffer, size); if(!buffer) { fprintf(stderr, "Unable to allocate %d bytes\n", (int)size); return 1; } count = fread(buffer + used, 1, size - used, stdin); if(count < size - used) { buffer[used + count] = '\0'; break; } used += count; } json = json_loads(strip(buffer), 0, &error); free(buffer); } else json = json_loadf(stdin, 0, &error); if(!json) { fprintf(stderr, "%d %d %d\n%s\n", error.line, error.column, error.position, error.text); return 1; } json_dumpf(json, stdout, flags); json_decref(json); return 0; }
int main(int argc, char *argv[]) { int indent = 0; size_t flags = 0; json_t *json; json_error_t error; FILE* file = stdin; if(argc > 2) { fprintf(stderr, "usage: %s\n", argv[0]); return 2; } if(argc == 2) { file = fopen(argv[1], "r"); } if( !file ) { perror("invalid file"); return 2; } indent = getenv_int("JSON_INDENT"); if(indent < 0 || indent > 255) { fprintf(stderr, "invalid value for JSON_INDENT: %d\n", indent); return 2; } if(indent > 0) flags |= JSON_INDENT(indent); if(getenv_int("JSON_COMPACT") > 0) flags |= JSON_COMPACT; if(getenv_int("JSON_ENSURE_ASCII")) flags |= JSON_ENSURE_ASCII; if(getenv_int("JSON_PRESERVE_ORDER")) flags |= JSON_PRESERVE_ORDER; if(getenv_int("JSON_SORT_KEYS")) flags |= JSON_SORT_KEYS; if(getenv_int("JSON_ASSUME_OBJECT")) flags |= JSON_ASSUME_OBJECT; if(getenv_int("JSON_ALLOW_EQUAL_SIGN")) flags |= JSON_ALLOW_EQUAL_SIGN; if(getenv_int("JSON_QUOTELESS_KEYS")) flags |= JSON_QUOTELESS_KEYS; if(getenv_int("STRIP")) { /* Load to memory, strip leading and trailing whitespace */ size_t size = 0, used = 0; char *buffer = NULL; while(1) { int count; size = (size == 0 ? 128 : size * 2); buffer = realloc(buffer, size); if(!buffer) { fprintf(stderr, "Unable to allocate %d bytes\n", (int)size); return 1; } count = fread(buffer + used, 1, size - used, stdin); if(count < size - used) { buffer[used + count] = '\0'; break; } used += count; } json = json_loads(strip(buffer), flags, &error); free(buffer); } else json = json_loadf(file, flags, &error); if(!json) { fprintf(stderr, "%d %d %d\n%s\n", error.line, error.column, error.position, error.text); return 1; } json_dumpf(json, stdout, flags); json_decref(json); return 0; }
static int e1000phy_attach(device_t dev) { struct mii_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; const char *sep = ""; getenv_int("e1000phy_debug", &e1000phy_debug); sc = device_get_softc(dev); ma = device_get_ivars(dev); sc->mii_dev = device_get_parent(dev); mii = device_get_softc(sc->mii_dev); LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); sc->mii_inst = mii->mii_instance; sc->mii_phy = ma->mii_phyno; sc->mii_service = e1000phy_service; sc->mii_pdata = mii; sc->mii_flags |= MIIF_NOISOLATE; mii->mii_instance++; e1000phy_reset(sc); #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) #define PRINT(s) printf("%s%s", sep, s); sep = ", " #if 0 ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), E1000_CR_ISOLATE); #endif device_printf(dev, " "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_TX, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); PRINT("1000baseTX-FDX"); /* TODO - apparently 1000BT-simplex not supported? ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_TX, 0, sc->mii_inst), E1000_CR_SPEED_1000); PRINT("1000baseTX"); */ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX); PRINT("100baseTX-FDX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), E1000_CR_SPEED_100); PRINT("100baseTX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX); PRINT("10baseTX-FDX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), E1000_CR_SPEED_10); PRINT("10baseTX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); PRINT("auto"); printf("\n"); #undef ADD #undef PRINT MIIBUS_MEDIAINIT(sc->mii_dev); return(0); }
/* * Pick an IRQ to use for this unrouted link. */ static uint8_t acpi_pci_link_choose_irq(device_t dev, struct link *link) { char tunable_buffer[64], link_name[5]; u_int8_t best_irq, pos_irq; int best_weight, pos_weight, i; KASSERT(!link->l_routed, ("%s: link already routed", __func__)); KASSERT(!PCI_INTERRUPT_VALID(link->l_irq), ("%s: link already has an IRQ", __func__)); /* Check for a tunable override. */ if (ACPI_SUCCESS(acpi_short_name(acpi_get_handle(dev), link_name, sizeof(link_name)))) { snprintf(tunable_buffer, sizeof(tunable_buffer), "hw.pci.link.%s.%d.irq", link_name, link->l_res_index); if (getenv_int(tunable_buffer, &i) && PCI_INTERRUPT_VALID(i)) { if (!link_valid_irq(link, i)) device_printf(dev, "Warning, IRQ %d is not listed as valid\n", i); return (i); } snprintf(tunable_buffer, sizeof(tunable_buffer), "hw.pci.link.%s.irq", link_name); if (getenv_int(tunable_buffer, &i) && PCI_INTERRUPT_VALID(i)) { if (!link_valid_irq(link, i)) device_printf(dev, "Warning, IRQ %d is not listed as valid\n", i); return (i); } } /* * If we have a valid BIOS IRQ, use that. We trust what the BIOS * says it routed over what _CRS says the link thinks is routed. */ if (PCI_INTERRUPT_VALID(link->l_bios_irq)) return (link->l_bios_irq); /* * If we don't have a BIOS IRQ but do have a valid IRQ from _CRS, * then use that. */ if (PCI_INTERRUPT_VALID(link->l_initial_irq)) return (link->l_initial_irq); /* * Ok, we have no useful hints, so we have to pick from the * possible IRQs. For ISA IRQs we only use interrupts that * have already been used by the BIOS. */ best_irq = PCI_INVALID_IRQ; best_weight = INT_MAX; for (i = 0; i < link->l_num_irqs; i++) { pos_irq = link->l_irqs[i]; if (pos_irq < NUM_ISA_INTERRUPTS && (pci_link_bios_isa_irqs & 1 << pos_irq) == 0) continue; pos_weight = pci_link_interrupt_weights[pos_irq]; if (pos_weight < best_weight) { best_weight = pos_weight; best_irq = pos_irq; } } /* * If this is an ISA IRQ, try using the SCI if it is also an ISA * interrupt as a fallback. */ if (link->l_isa_irq) { pos_irq = AcpiGbl_FADT.SciInterrupt; pos_weight = pci_link_interrupt_weights[pos_irq]; if (pos_weight < best_weight) { best_weight = pos_weight; best_irq = pos_irq; } } if (PCI_INTERRUPT_VALID(best_irq)) { if (bootverbose) device_printf(dev, "Picked IRQ %u with weight %d\n", best_irq, best_weight); } else device_printf(dev, "Unable to choose an IRQ\n"); return (best_irq); }
static int mecia_attach(device_t dev) { int error; int irq; void *ih; device_t kid; struct resource *r; int rid; struct slot *slt; struct mecia_slot *sp; sp = MECIA_DEVICE2SOFTC(dev); sp->unit = validunits++; kid = device_add_child(dev, NULL, -1); if (kid == NULL) { device_printf(dev, "Can't add pccard bus slot 0\n"); return (ENXIO); } device_probe_and_attach(kid); slt = pccard_init_slot(kid, &mecia_cinfo); if (slt == 0) { device_printf(dev, "Can't get pccard info slot 0\n"); return (ENXIO); } slt->cdata = sp; sp->slt = slt; validunits++; rid = 0; r = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); if (!r) return (ENXIO); irq = bus_get_resource_start(dev, SYS_RES_IRQ, 0); if (irq == 0) { /* See if the user has requested a specific IRQ */ if (!getenv_int("machdep.pccard.mecia_irq", &irq)) irq = 0; } rid = 0; r = 0; if (irq > 0) { r = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, irq, irq, 1, RF_ACTIVE); } if (r && ((1 << (rman_get_start(r))) & MECIA_INT_MASK_ALLOWED) == 0) { device_printf(dev, "Hardware does not support irq %d, trying polling.\n", irq); bus_release_resource(dev, SYS_RES_IRQ, rid, r); r = 0; irq = 0; } if (r) { error = bus_setup_intr(dev, r, INTR_TYPE_MISC, meciaintr, (void *) sp, &ih); if (error) { bus_release_resource(dev, SYS_RES_IRQ, rid, r); return (error); } irq = rman_get_start(r); device_printf(dev, "management irq %d\n", irq); } else { irq = 0; } if (irq == 0) { meciatimeout_ch = timeout(meciatimeout, (void *) sp, hz/2); device_printf(dev, "Polling mode\n"); } sp->last_reg1 = inb(MECIA_REG1); if (sp->last_reg1 & MECIA_CARDEXIST) { /* PCMCIA card exist */ sp->slt->laststate = sp->slt->state = filled; pccard_event(sp->slt, card_inserted); } else { sp->slt->laststate = sp->slt->state = empty; } sp->slt->irq = irq; return (bus_generic_attach(dev)); }