t_point *rotate_axe(t_point *p, t_rotation *r) { int x = p->x, y = p->y, z = p->z; t_point **rot = init_rot(r->theta, r->center); p->x = (x * rot[0]->x) + (0 * rot[0]->y) + (0 * rot[0]->z); p->y = (0 * rot[1]->x) + (y * rot[1]->y) + (0 * rot[1]->z); p->z = (0 * rot[2]->x) + (0 * rot[2]->y) + (z * rot[2]->z); free(rot[0]); free(rot[1]); free(rot[2]); free(rot); return (p); }
int mdrunner(gmx_hw_opt_t *hw_opt, FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose, gmx_bool bCompact, int nstglobalcomm, ivec ddxyz, int dd_node_order, real rdd, real rconstr, const char *dddlb_opt, real dlb_scale, const char *ddcsx, const char *ddcsy, const char *ddcsz, const char *nbpu_opt, int nstlist_cmdline, gmx_int64_t nsteps_cmdline, int nstepout, int resetstep, int gmx_unused nmultisim, int repl_ex_nst, int repl_ex_nex, int repl_ex_seed, real pforce, real cpt_period, real max_hours, int imdport, unsigned long Flags) { gmx_bool bForceUseGPU, bTryUseGPU, bRerunMD; t_inputrec *inputrec; t_state *state = NULL; matrix box; gmx_ddbox_t ddbox = {0}; int npme_major, npme_minor; t_nrnb *nrnb; gmx_mtop_t *mtop = NULL; t_mdatoms *mdatoms = NULL; t_forcerec *fr = NULL; t_fcdata *fcd = NULL; real ewaldcoeff_q = 0; real ewaldcoeff_lj = 0; struct gmx_pme_t **pmedata = NULL; gmx_vsite_t *vsite = NULL; gmx_constr_t constr; int nChargePerturbed = -1, nTypePerturbed = 0, status; gmx_wallcycle_t wcycle; gmx_bool bReadEkin; gmx_walltime_accounting_t walltime_accounting = NULL; int rc; gmx_int64_t reset_counters; gmx_edsam_t ed = NULL; int nthreads_pme = 1; int nthreads_pp = 1; gmx_membed_t membed = NULL; gmx_hw_info_t *hwinfo = NULL; /* The master rank decides early on bUseGPU and broadcasts this later */ gmx_bool bUseGPU = FALSE; /* CAUTION: threads may be started later on in this function, so cr doesn't reflect the final parallel state right now */ snew(inputrec, 1); snew(mtop, 1); if (Flags & MD_APPENDFILES) { fplog = NULL; } bRerunMD = (Flags & MD_RERUN); bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0); bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU; /* Detect hardware, gather information. This is an operation that is * global for this process (MPI rank). */ hwinfo = gmx_detect_hardware(fplog, cr, bTryUseGPU); gmx_print_detected_hardware(fplog, cr, hwinfo); if (fplog != NULL) { /* Print references after all software/hardware printing */ please_cite(fplog, "Abraham2015"); please_cite(fplog, "Pall2015"); please_cite(fplog, "Pronk2013"); please_cite(fplog, "Hess2008b"); please_cite(fplog, "Spoel2005a"); please_cite(fplog, "Lindahl2001a"); please_cite(fplog, "Berendsen95a"); } snew(state, 1); if (SIMMASTER(cr)) { /* Read (nearly) all data required for the simulation */ read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, state, NULL, mtop); if (inputrec->cutoff_scheme == ecutsVERLET) { /* Here the master rank decides if all ranks will use GPUs */ bUseGPU = (hwinfo->gpu_info.n_dev_compatible > 0 || getenv("GMX_EMULATE_GPU") != NULL); /* TODO add GPU kernels for this and replace this check by: * (bUseGPU && (ir->vdwtype == evdwPME && * ir->ljpme_combination_rule == eljpmeLB)) * update the message text and the content of nbnxn_acceleration_supported. */ if (bUseGPU && !nbnxn_gpu_acceleration_supported(fplog, cr, inputrec, bRerunMD)) { /* Fallback message printed by nbnxn_acceleration_supported */ if (bForceUseGPU) { gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings"); } bUseGPU = FALSE; } prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, mtop, state->box, bUseGPU); } else { if (nstlist_cmdline > 0) { gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme"); } if (hwinfo->gpu_info.n_dev_compatible > 0) { md_print_warn(cr, fplog, "NOTE: GPU(s) found, but the current simulation can not use GPUs\n" " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"); } if (bForceUseGPU) { gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet"); } #ifdef GMX_TARGET_BGQ md_print_warn(cr, fplog, "NOTE: There is no SIMD implementation of the group scheme kernels on\n" " BlueGene/Q. You will observe better performance from using the\n" " Verlet cut-off scheme.\n"); #endif } if (inputrec->eI == eiSD2) { md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n" "it is slower than integrator %s and is slightly less accurate\n" "with constraints. Use the %s integrator.", ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]); } } /* Check and update the hardware options for internal consistency */ check_and_update_hw_opt_1(hw_opt, cr); /* Early check for externally set process affinity. */ gmx_check_thread_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, FALSE); #ifdef GMX_THREAD_MPI if (SIMMASTER(cr)) { if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0) { gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks"); } /* Since the master knows the cut-off scheme, update hw_opt for this. * This is done later for normal MPI and also once more with tMPI * for all tMPI ranks. */ check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme); /* NOW the threads will be started: */ hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo, hw_opt, inputrec, mtop, cr, fplog, bUseGPU); if (hw_opt->nthreads_tmpi > 1) { t_commrec *cr_old = cr; /* now start the threads. */ cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm, oenv, bVerbose, bCompact, nstglobalcomm, ddxyz, dd_node_order, rdd, rconstr, dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz, nbpu_opt, nstlist_cmdline, nsteps_cmdline, nstepout, resetstep, nmultisim, repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce, cpt_period, max_hours, Flags); /* the main thread continues here with a new cr. We don't deallocate the old cr because other threads may still be reading it. */ if (cr == NULL) { gmx_comm("Failed to spawn threads"); } } } #endif /* END OF CAUTION: cr is now reliable */ /* g_membed initialisation * * Because we change the mtop, init_membed is called before the init_parallel * * (in case we ever want to make it run in parallel) */ if (opt2bSet("-membed", nfile, fnm)) { if (MASTER(cr)) { fprintf(stderr, "Initializing membed"); } membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period); } if (PAR(cr)) { /* now broadcast everything to the non-master nodes/threads: */ init_parallel(cr, inputrec, mtop); /* The master rank decided on the use of GPUs, * broadcast this information to all ranks. */ gmx_bcast_sim(sizeof(bUseGPU), &bUseGPU, cr); } if (fplog != NULL) { pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE); fprintf(fplog, "\n"); } /* now make sure the state is initialized and propagated */ set_state_entries(state, inputrec); /* A parallel command line option consistency check that we can only do after any threads have started. */ if (!PAR(cr) && (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0)) { gmx_fatal(FARGS, "The -dd or -npme option request a parallel simulation, " #ifndef GMX_MPI "but %s was compiled without threads or MPI enabled" #else #ifdef GMX_THREAD_MPI "but the number of threads (option -nt) is 1" #else "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec" #endif #endif , output_env_get_program_display_name(oenv) ); } if (bRerunMD && (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI)) { gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun"); } if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr)) { gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank"); } if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))) { if (cr->npmenodes > 0) { gmx_fatal_collective(FARGS, cr, NULL, "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ"); } cr->npmenodes = 0; } if (bUseGPU && cr->npmenodes < 0) { /* With GPUs we don't automatically use PME-only ranks. PME ranks can * improve performance with many threads per GPU, since our OpenMP * scaling is bad, but it's difficult to automate the setup. */ cr->npmenodes = 0; } #ifdef GMX_FAHCORE if (MASTER(cr)) { fcRegisterSteps(inputrec->nsteps, inputrec->init_step); } #endif /* NMR restraints must be initialized before load_checkpoint, * since with time averaging the history is added to t_state. * For proper consistency check we therefore need to extend * t_state here. * So the PME-only nodes (if present) will also initialize * the distance restraints. */ snew(fcd, 1); /* This needs to be called before read_checkpoint to extend the state */ init_disres(fplog, mtop, inputrec, cr, fcd, state, repl_ex_nst > 0); init_orires(fplog, mtop, state->x, inputrec, cr, &(fcd->orires), state); if (DEFORM(*inputrec)) { /* Store the deform reference box before reading the checkpoint */ if (SIMMASTER(cr)) { copy_mat(state->box, box); } if (PAR(cr)) { gmx_bcast(sizeof(box), box, cr); } /* Because we do not have the update struct available yet * in which the reference values should be stored, * we store them temporarily in static variables. * This should be thread safe, since they are only written once * and with identical values. */ tMPI_Thread_mutex_lock(&deform_init_box_mutex); deform_init_init_step_tpx = inputrec->init_step; copy_mat(box, deform_init_box_tpx); tMPI_Thread_mutex_unlock(&deform_init_box_mutex); } if (opt2bSet("-cpi", nfile, fnm)) { /* Check if checkpoint file exists before doing continuation. * This way we can use identical input options for the first and subsequent runs... */ if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) ) { load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog, cr, ddxyz, inputrec, state, &bReadEkin, (Flags & MD_APPENDFILES), (Flags & MD_APPENDFILESSET)); if (bReadEkin) { Flags |= MD_READ_EKIN; } } } if (MASTER(cr) && (Flags & MD_APPENDFILES)) { gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr, Flags, &fplog); } /* override nsteps with value from cmdline */ override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr); if (SIMMASTER(cr)) { copy_mat(state->box, box); } if (PAR(cr)) { gmx_bcast(sizeof(box), box, cr); } /* Essential dynamics */ if (opt2bSet("-ei", nfile, fnm)) { /* Open input and output files, allocate space for ED data structure */ ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr); } if (PAR(cr) && !(EI_TPI(inputrec->eI) || inputrec->eI == eiNM)) { cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr, dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz, mtop, inputrec, box, state->x, &ddbox, &npme_major, &npme_minor); make_dd_communicators(fplog, cr, dd_node_order); /* Set overallocation to avoid frequent reallocation of arrays */ set_over_alloc_dd(TRUE); } else { /* PME, if used, is done on all nodes with 1D decomposition */ cr->npmenodes = 0; cr->duty = (DUTY_PP | DUTY_PME); npme_major = 1; npme_minor = 1; if (inputrec->ePBC == epbcSCREW) { gmx_fatal(FARGS, "pbc=%s is only implemented with domain decomposition", epbc_names[inputrec->ePBC]); } } if (PAR(cr)) { /* After possible communicator splitting in make_dd_communicators. * we can set up the intra/inter node communication. */ gmx_setup_nodecomm(fplog, cr); } /* Initialize per-physical-node MPI process/thread ID and counters. */ gmx_init_intranode_counters(cr); #ifdef GMX_MPI if (MULTISIM(cr)) { md_print_info(cr, fplog, "This is simulation %d out of %d running as a composite GROMACS\n" "multi-simulation job. Setup for this simulation:\n\n", cr->ms->sim, cr->ms->nsim); } md_print_info(cr, fplog, "Using %d MPI %s\n", cr->nnodes, #ifdef GMX_THREAD_MPI cr->nnodes == 1 ? "thread" : "threads" #else cr->nnodes == 1 ? "process" : "processes" #endif ); fflush(stderr); #endif /* Check and update hw_opt for the cut-off scheme */ check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme); /* Check and update hw_opt for the number of MPI ranks */ check_and_update_hw_opt_3(hw_opt); gmx_omp_nthreads_init(fplog, cr, hwinfo->nthreads_hw_avail, hw_opt->nthreads_omp, hw_opt->nthreads_omp_pme, (cr->duty & DUTY_PP) == 0, inputrec->cutoff_scheme == ecutsVERLET); #ifndef NDEBUG if (integrator[inputrec->eI].func != do_tpi && inputrec->cutoff_scheme == ecutsVERLET) { gmx_feenableexcept(); } #endif if (bUseGPU) { /* Select GPU id's to use */ gmx_select_gpu_ids(fplog, cr, &hwinfo->gpu_info, bForceUseGPU, &hw_opt->gpu_opt); } else { /* Ignore (potentially) manually selected GPUs */ hw_opt->gpu_opt.n_dev_use = 0; } /* check consistency across ranks of things like SIMD * support and number of GPUs selected */ gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt, bUseGPU); /* Now that we know the setup is consistent, check for efficiency */ check_resource_division_efficiency(hwinfo, hw_opt, Flags & MD_NTOMPSET, cr, fplog); if (DOMAINDECOMP(cr)) { /* When we share GPUs over ranks, we need to know this for the DLB */ dd_setup_dlb_resource_sharing(cr, hwinfo, hw_opt); } /* getting number of PP/PME threads PME: env variable should be read only on one node to make sure it is identical everywhere; */ /* TODO nthreads_pp is only used for pinning threads. * This is a temporary solution until we have a hw topology library. */ nthreads_pp = gmx_omp_nthreads_get(emntNonbonded); nthreads_pme = gmx_omp_nthreads_get(emntPME); wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme); if (PAR(cr)) { /* Master synchronizes its value of reset_counters with all nodes * including PME only nodes */ reset_counters = wcycle_get_reset_counters(wcycle); gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr); wcycle_set_reset_counters(wcycle, reset_counters); } snew(nrnb, 1); if (cr->duty & DUTY_PP) { bcast_state(cr, state); /* Initiate forcerecord */ fr = mk_forcerec(); fr->hwinfo = hwinfo; fr->gpu_opt = &hw_opt->gpu_opt; init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box, opt2fn("-table", nfile, fnm), opt2fn("-tabletf", nfile, fnm), opt2fn("-tablep", nfile, fnm), opt2fn("-tableb", nfile, fnm), nbpu_opt, FALSE, pforce); /* version for PCA_NOT_READ_NODE (see md.c) */ /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE, "nofile","nofile","nofile","nofile",FALSE,pforce); */ /* Initialize QM-MM */ if (fr->bQMMM) { init_QMMMrec(cr, mtop, inputrec, fr); } /* Initialize the mdatoms structure. * mdatoms is not filled with atom data, * as this can not be done now with domain decomposition. */ mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO); /* Initialize the virtual site communication */ vsite = init_vsite(mtop, cr, FALSE); calc_shifts(box, fr->shift_vec); /* With periodic molecules the charge groups should be whole at start up * and the virtual sites should not be far from their proper positions. */ if (!inputrec->bContinuation && MASTER(cr) && !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols)) { /* Make molecules whole at start of run */ if (fr->ePBC != epbcNONE) { do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x); } if (vsite) { /* Correct initial vsite positions are required * for the initial distribution in the domain decomposition * and for the initial shell prediction. */ construct_vsites_mtop(vsite, mtop, state->x); } } if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) { ewaldcoeff_q = fr->ewaldcoeff_q; ewaldcoeff_lj = fr->ewaldcoeff_lj; pmedata = &fr->pmedata; } else { pmedata = NULL; } } else { /* This is a PME only node */ /* We don't need the state */ done_state(state); ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol); ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj); snew(pmedata, 1); } if (hw_opt->thread_affinity != threadaffOFF) { /* Before setting affinity, check whether the affinity has changed * - which indicates that probably the OpenMP library has changed it * since we first checked). */ gmx_check_thread_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, TRUE); /* Set the CPU affinity */ gmx_set_thread_affinity(fplog, cr, hw_opt, hwinfo); } /* Initiate PME if necessary, * either on all nodes or on dedicated PME nodes only. */ if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)) { if (mdatoms) { nChargePerturbed = mdatoms->nChargePerturbed; if (EVDW_PME(inputrec->vdwtype)) { nTypePerturbed = mdatoms->nTypePerturbed; } } if (cr->npmenodes > 0) { /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/ gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr); gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr); } if (cr->duty & DUTY_PME) { status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec, mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed, (Flags & MD_REPRODUCIBLE), nthreads_pme); if (status != 0) { gmx_fatal(FARGS, "Error %d initializing PME", status); } } } if (integrator[inputrec->eI].func == do_md) { /* Turn on signal handling on all nodes */ /* * (A user signal from the PME nodes (if any) * is communicated to the PP nodes. */ signal_handler_install(); } if (cr->duty & DUTY_PP) { /* Assumes uniform use of the number of OpenMP threads */ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault)); if (inputrec->bPull) { /* Initialize pull code */ inputrec->pull_work = init_pull(fplog, inputrec->pull, inputrec, nfile, fnm, mtop, cr, oenv, inputrec->fepvals->init_lambda, EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags); } if (inputrec->bRot) { /* Initialize enforced rotation code */ init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv, bVerbose, Flags); } if (inputrec->eSwapCoords != eswapNO) { /* Initialize ion swapping code */ init_swapcoords(fplog, bVerbose, inputrec, opt2fn_master("-swap", nfile, fnm, cr), mtop, state->x, state->box, &state->swapstate, cr, oenv, Flags); } constr = init_constraints(fplog, mtop, inputrec, ed, state, cr); if (DOMAINDECOMP(cr)) { GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP"); dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec, Flags & MD_DDBONDCHECK, fr->cginfo_mb); set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, &ddbox); setup_dd_grid(fplog, cr->dd); } /* Now do whatever the user wants us to do (how flexible...) */ integrator[inputrec->eI].func(fplog, cr, nfile, fnm, oenv, bVerbose, bCompact, nstglobalcomm, vsite, constr, nstepout, inputrec, mtop, fcd, state, mdatoms, nrnb, wcycle, ed, fr, repl_ex_nst, repl_ex_nex, repl_ex_seed, membed, cpt_period, max_hours, imdport, Flags, walltime_accounting); if (inputrec->bPull) { finish_pull(inputrec->pull_work); } if (inputrec->bRot) { finish_rot(inputrec->rot); } } else { GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP"); /* do PME only */ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME)); gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec); } wallcycle_stop(wcycle, ewcRUN); /* Finish up, write some stuff * if rerunMD, don't write last frame again */ finish_run(fplog, cr, inputrec, nrnb, wcycle, walltime_accounting, fr ? fr->nbv : NULL, EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr)); /* Free GPU memory and context */ free_gpu_resources(fr, cr, &hwinfo->gpu_info, fr ? fr->gpu_opt : NULL); if (opt2bSet("-membed", nfile, fnm)) { sfree(membed); } gmx_hardware_info_free(hwinfo); /* Does what it says */ print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime()); walltime_accounting_destroy(walltime_accounting); /* PLUMED */ if(plumedswitch){ plumed_finalize(plumedmain); } /* END PLUMED */ /* Close logfile already here if we were appending to it */ if (MASTER(cr) && (Flags & MD_APPENDFILES)) { gmx_log_close(fplog); } rc = (int)gmx_get_stop_condition(); done_ed(&ed); #ifdef GMX_THREAD_MPI /* we need to join all threads. The sub-threads join when they exit this function, but the master thread needs to be told to wait for that. */ if (PAR(cr) && MASTER(cr)) { tMPI_Finalize(); } #endif return rc; }
/***************************************************************************** * MODULE : sizrot2_init_module * FUNCTION : initialize module * RETURN : 0 : success * : negative : fail * NOTE : none ******************************************************************************/ static int sizrot2_init_module(void) { int ret = 0; dbg_printk((_DEBUG_SIZROT2 & 0x01), "sizrot2_init_module() <start>\n"); /* register_chrdev_region */ dev_t_siz = MKDEV(DEV_MAJOR, DEV_MINOR_SIZ); ret = register_chrdev_region(dev_t_siz, 1, "siz"); if (ret < 0) { printk(KERN_ERR " @sizrot2: Fail to regist device (SIZ).\n"); goto fail_register_chrdev_siz; } dev_t_rot0 = MKDEV(DEV_MAJOR, DEV_MINOR_ROT0); ret = register_chrdev_region(dev_t_rot0, 1, "rot0"); if (ret < 0) { printk(KERN_ERR " @sizrot2: Fail to regist device (ROT0).\n"); goto fail_register_chrdev_rot0; } /* cdev_init */ cdev_init(&siz_cdev, &sizrot2_fops); siz_cdev.owner = THIS_MODULE; cdev_init(&rot0_cdev, &sizrot2_fops); rot0_cdev.owner = THIS_MODULE; /* cdev_add */ ret = cdev_add(&siz_cdev, dev_t_siz, 1); if (ret) { printk(KERN_ERR " @sizrot2: Fail to add cdev (SIZ).\n"); goto fail_cdev_add_siz; } ret = cdev_add(&rot0_cdev, dev_t_rot0, 1); if (ret) { printk(KERN_ERR " @sizrot2: Fail to add cdev (ROT0).\n"); goto fail_cdev_add_rot0; } /* class_create */ sizrot2_class = class_create(THIS_MODULE, sizrot2_dev_name); if (IS_ERR(sizrot2_class)) { printk(KERN_ERR " @sizrot2: Fail to create class.\n"); ret = PTR_ERR(sizrot2_class); goto fail_class_create; } /* device_create */ class_dev_siz = device_create(sizrot2_class, NULL, dev_t_siz, NULL, "siz"); if (IS_ERR(class_dev_siz)) { printk(KERN_ERR " @sizrot2: Fail to create class device (SIZ).\n"); ret = PTR_ERR(class_dev_siz); goto fail_class_device_create_siz; } class_dev_rot0 = device_create(sizrot2_class, NULL, dev_t_rot0, NULL, "rot0"); if (IS_ERR(class_dev_rot0)) { printk(KERN_ERR " @sizrot2: Fail to create class device (ROT0).\n"); ret = PTR_ERR(class_dev_rot0); goto fail_class_device_create_rot0; } /* platform_device_register */ dev_set_drvdata(&sizrot2_device.dev, NULL); if (platform_device_register(&sizrot2_device) < 0) { printk(KERN_ERR " @sizrot2: Fail to register platform_device\n"); goto fail_platform_device; } /* platform_driver_register */ if (platform_driver_register(&sizrot2_driver) < 0) { printk(KERN_ERR " @sizrot2: Fail to register platform_driver\n"); goto fail_platform_driver; } spin_lock_init(&sizrot2_lock); /* init SIZ */ ret = init_siz(); if (ret < 0) { printk(KERN_INFO " @sizrot2: SIZ driver initialize <failed>\n"); goto fail_init_hw; } /* init ROT */ ret = init_rot(); if (ret < 0) { printk(KERN_INFO " @sizrot2: ROT driver initialize <failed>\n"); goto fail_init_hw; } dbg_printk(_DEBUG_SIZROT2, "register_chrdev %d\n", DEV_MAJOR); dbg_printk(_DEBUG_SIZROT2, "sizrot2 driver initialize <success>\n"); goto success; fail_init_hw: platform_driver_unregister(&sizrot2_driver); fail_platform_driver: platform_device_unregister(&sizrot2_device); fail_platform_device: device_destroy(sizrot2_class, MKDEV(DEV_MAJOR, DEV_MINOR_ROT0)); fail_class_device_create_rot0: device_destroy(sizrot2_class, MKDEV(DEV_MAJOR, DEV_MINOR_SIZ)); fail_class_device_create_siz: class_destroy(sizrot2_class); fail_class_create: cdev_del(&rot0_cdev); fail_cdev_add_rot0: cdev_del(&siz_cdev); fail_cdev_add_siz: unregister_chrdev_region(dev_t_rot0, 1); fail_register_chrdev_rot0: unregister_chrdev_region(dev_t_siz, 1); fail_register_chrdev_siz: success: dbg_printk((_DEBUG_SIZROT2 & 0x02), "sizrot2_init_module() <end> return (%d)\n", ret); return ret; }
void main() { // TLC5940 dependent // init_5940(); init_7_seg_4_digit(); init_rot(); init_timer(); init(); TRISA.B0 = bit_out; // testing light matrix /* TLC5940 dependent for(i = 0; i < STEPS; i++){ change(sData, i, pattern[channel][i]); } sendData(sData, bitcount); */ INTCON2 = 0x04; GIEH_bit = 1; GIEL_bit = 1; T0IE_bit = 1; // RE-ENABLE AFTER TESTING MIDI! TMR0ON_bit = 1; // clocks every 10us RC2IE_bit = 0; // MIDI receive interrupt drum_0 = 0; drum_1 = 0; drum_2 = 0; drum_3 = 0; delay_ms(100); // init display push_light_matrix(pattern, channel); show_light_matrix(); state = FORWARD; // main loop while(1){ if(switch_up == 0) state = FORWARD; else if(switch_down == 0) state = BACKWARD; else { state = STOPPED; current_step = 0; } if(MIDI_state == MIDI_RECEIVED){ pattern[0][this_MIDI_note - 0x24] = (~pattern[0][this_MIDI_note - 0x24] & 1); MIDI_state = IDLE; update_light_matrix( channel, selected_step, BRIGHTNESS, 0, 0); } // flip the pattern's selected_step with switch_0 if ((switch_0 == _set) && (switch_0_flag != _set)) { pattern[channel][selected_step] = (~pattern[channel][selected_step] & _set); update_light_matrix( channel, selected_step, BRIGHTNESS, 0, 0 ); switch_0_flag = _set; } else if (switch_0 == _clr) { switch_0_flag = _clr; } // advance or retreat one step if running if (sixteenthcount >= (MS_BPM / tempo) && state != STOPPED) { sixteenthcount = 0; drum_0 = pattern[0][current_step]; drum_1 = pattern[1][current_step]; drum_2 = pattern[2][current_step]; drum_3 = pattern[3][current_step]; current_step += state; // forward or backwards if(current_step <= -1){current_step = 15;} else if(current_step >= 16){current_step = 0;} } if (state != STOPPED) send_7seg(to_bcd(current_step * 100 + selected_step)); else send_7seg(to_bcd(tempo)); // turns off all outputs after uh, whatever that time is? if (sixteenthcount >= ((MS_BPM / tempo) / 4)) { drum_0 = off; drum_1 = off; drum_2 = off; drum_3= off; } // shows blinking cursor current_status = pattern[channel][selected_step]; if (blinkcount >= (BLINK_TIME /*>> (current_status * 2)*/)){ blinkcount = 0; blink = ~blink; push_light_matrix(pattern, channel); if(blink == 0) update_light_matrix(channel, selected_step, current_status * BRIGHTNESS, 0, 0); else update_light_matrix(channel, selected_step, 0, 0, BRIGHTNESS); show_light_matrix(); } // rotary left controls selected step last_rot_0 = this_rot_0; this_rot_0 = (rot_0b << 1) | rot_0a; selected_step += check_rotary(this_rot_0, last_rot_0); if(selected_step <= -1){selected_step = 15;} else if(selected_step >= 16){selected_step = 0;} // rotary right adjusts channel if running, tempo if stopped if(state != STOPPED){ last_rot_1 = this_rot_1; this_rot_1 = (rot_1a << 1) | rot_1b; channel += check_rotary(this_rot_1, last_rot_1); if(channel <= -1){channel = 0;} else if(channel >= 5){channel = 4;} }else{ last_rot_1 = this_rot_1; this_rot_1 = (rot_1a << 1) | rot_1b; tempo += check_rotary(this_rot_1, last_rot_1); if(tempo <= 9){tempo = 10;} else if(tempo >= 251){tempo = 250;} } // TLC 5940 dep //sendData(sData, bitcount); // TLC 5940 dep //gsClock(); } }