static int ma600_change_speed(struct sir_dev *dev, unsigned speed) { u8 byte; IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__, speed, dev->speed); /* dongle already reset, dongle and port at default speed (9600) */ /* Set RTS low for 1 ms */ sirdev_set_dtr_rts(dev, TRUE, FALSE); mdelay(1); /* Write control byte */ byte = get_control_byte(speed); sirdev_raw_write(dev, &byte, sizeof(byte)); /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(15)); /* old ma600 uses 15ms */ #if 1 /* read-back of the control byte. ma600 is the first dongle driver * which uses this so there might be some unidentified issues. * Disable this in case of problems with readback. */ sirdev_raw_read(dev, &byte, sizeof(byte)); if (byte != get_control_byte(speed)) { WARNING("%s(): bad control byte read-back %02x != %02x\n", __FUNCTION__, (unsigned) byte, (unsigned) get_control_byte(speed)); return -1; } else IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__); #endif /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 10ms */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); /* dongle is now switched to the new speed */ dev->speed = speed; return 0; }
/** * For each token of the text, we compute its associated control byte. * We use the unknown word file 'err' in order to determine if a token * must be matched by <!DIC> */ void compute_token_controls(Alphabet* alph,const char* err,struct locate_parameters* p) { struct string_hash* ERR=load_key_list(err,p->mask_encoding_compatibility_input); int n=p->tokens->size; for (int i=0; i<n; i++) { p->token_control[i]=get_control_byte(p->tokens->value[i],alph,ERR,p->tokenization_policy); } free_string_hash(ERR); }
void enable_video(int on) { int s, ena; s = splhigh(); ena = get_control_byte(SYSTEM_ENAB); if (on) ena |= ENA_VIDEO; else ena &= ~ENA_VIDEO; set_control_byte(SYSTEM_ENAB, ena); splx(s); }
/** * This function checks for each tag token like "{extended,extend.V:K}" * if it verifies some patterns. Its behaviour is very similar to the one * of the load_dic_for_locate function. However, as a side effect, this * function fills 'tag_token_list' with the list of tag token numbers. * This list is later used during Locate preprocessings. */ void check_patterns_for_tag_tokens(Alphabet* alphabet,int number_of_patterns, struct lemma_node* root,struct locate_parameters* parameters,Abstract_allocator prv_alloc) { struct string_hash* tokens=parameters->tokens; for (int i=0; i<tokens->size; i++) { if (tokens->value[i][0]=='{' && u_strcmp(tokens->value[i],"{S}") && u_strcmp(tokens->value[i],"{STOP}")) { /* If the token is tag like "{today,.ADV}", we add its number to the tag token list */ parameters->tag_token_list=head_insert(i,parameters->tag_token_list,prv_alloc); /* And we look for the patterns that can match it */ struct dela_entry* entry=tokenize_tag_token(tokens->value[i]); if (entry==NULL) { /* This should never happen */ fatal_error("Invalid tag token in function check_patterns_for_tag_tokens\n"); } /* We add the inflected form to the list of forms associated to the lemma. * This will be used to replace patterns like "<be>" by the actual list of * forms that can be matched by it, for optimization reasons */ add_inflected_form_for_lemma(tokens->value[i],entry->lemma,root); parameters->token_control[i]=(unsigned char)(get_control_byte(tokens->value[i],alphabet,NULL,parameters->tokenization_policy)|DIC_TOKEN_BIT_MASK); if (number_of_patterns) { /* We look for matching patterns only if there are some */ struct list_pointer* list=get_matching_patterns(entry,parameters->pattern_tree_root); if (list!=NULL) { if (parameters->matching_patterns[i]==NULL) { /* We allocate the bit array if needed */ parameters->matching_patterns[i]=new_bit_array(number_of_patterns,ONE_BIT); } struct list_pointer* tmp=list; while (tmp!=NULL) { set_value(parameters->matching_patterns[i],((struct constraint_list*)(tmp->pointer))->pattern_number,1); tmp=tmp->next; } free_list_pointer(list); } } /* At the opposite of DLC lines, a compound word tag like "{all around,.ADV}" * does not need to be put in the compound word tree, since the tag is already * characterized by its token number. */ free_dela_entry(entry); } } }
/** * This function loads a DLF or a DLC. It computes information about tokens * that will be used during the Locate operation. For instance, if we have the * following line: * * extended,.A * * and if the .fst2 to be applied to the text contains the pattern <A> with, * number 456, then the function will mark the "extended" token to be matched * by the pattern 456. Moreover, all case variations will be taken into account, * so that the "Extended" and "EXTENDED" tokens will also be updated. * * The two parameters 'is_DIC_pattern' and 'is_CDIC_pattern' * indicate if the .fst2 contains the corresponding patterns. For instance, if * the pattern "<CDIC>" is used in the grammar, it means that any token sequence that is a * compound word must be marked as be matched by this pattern. */ void load_dic_for_locate(const char* dic_name,int mask_encoding_compatibility_input,Alphabet* alphabet, int number_of_patterns,int is_DIC_pattern, int is_CDIC_pattern, struct lemma_node* root,struct locate_parameters* parameters) { struct string_hash* tokens=parameters->tokens; U_FILE* f; unichar line[DIC_LINE_SIZE]; f=u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input,dic_name,U_READ); if (f==NULL) { error("Cannot open dictionary %s\n",dic_name); return; } /* We parse all the lines */ int lines=0; char name[FILENAME_MAX]; remove_path(dic_name,name); while (EOF!=u_fgets(line,f)) { lines++; if (lines%10000==0) { u_printf("%s: %d lines loaded... \r",name,lines); } if (line[0]=='/') { /* NOTE: DLF and DLC files are not supposed to contain comment * lines, but we test them, just in the case */ continue; } struct dela_entry* entry=tokenize_DELAF_line(line,1); if (entry==NULL) { /* This case should never happen */ error("Invalid dictionary line in load_dic_for_locate\n"); continue; } /* We add the inflected form to the list of forms associated to the lemma. * This will be used to replace patterns like "<be>" by the actual list of * forms that can be matched by it, for optimization reasons */ add_inflected_form_for_lemma(entry->inflected,entry->lemma,root); /* We get the list of all tokens that can be matched by the inflected form of this * this entry, with regards to case variations (see the "extended" example above). */ struct list_int* ptr=get_token_list_for_sequence(entry->inflected,alphabet,tokens); /* We save the list pointer to free it later */ struct list_int* ptr_copy=ptr; /* Here, we will deal with all simple words */ while (ptr!=NULL) { int i=ptr->n; /* If the current token can be matched, then it can be recognized by the "<DIC>" pattern */ parameters->token_control[i]=(unsigned char)(get_control_byte(tokens->value[i],alphabet,NULL,parameters->tokenization_policy)|DIC_TOKEN_BIT_MASK); if (number_of_patterns) { /* We look for matching patterns only if there are some */ struct list_pointer* list=get_matching_patterns(entry,parameters->pattern_tree_root); if (list!=NULL) { /* If we have some patterns to add */ if (parameters->matching_patterns[i]==NULL) { /* We allocate the pattern bit array, if needed */ parameters->matching_patterns[i]=new_bit_array(number_of_patterns,ONE_BIT); } struct list_pointer* tmp=list; while (tmp!=NULL) { /* Then we add all the pattern numbers to the bit array */ set_value(parameters->matching_patterns[i],((struct constraint_list*)(tmp->pointer))->pattern_number,1); tmp=tmp->next; } /* Finally, we free the constraint list */ free_list_pointer(list); } } ptr=ptr->next; } /* Finally, we free the token list */ free_list_int(ptr_copy); if (!is_a_simple_word(entry->inflected,parameters->tokenization_policy,alphabet)) { /* If the inflected form is a compound word */ if (is_DIC_pattern || is_CDIC_pattern) { /* If the .fst2 contains "<DIC>" and/or "<CDIC>", then we * must note that all compound words can be matched by them */ add_compound_word_with_no_pattern(entry->inflected,alphabet,tokens,parameters->DLC_tree,parameters->tokenization_policy); } if (number_of_patterns) { /* We look for matching patterns only if there are some */ /* We look if the compound word can be matched by some patterns */ struct list_pointer* list=get_matching_patterns(entry,parameters->pattern_tree_root); struct list_pointer* tmp=list; while (tmp!=NULL) { /* If the word is matched by at least one pattern, we store it. */ int pattern_number=((struct constraint_list*)(tmp->pointer))->pattern_number; add_compound_word_with_pattern(entry->inflected,pattern_number,alphabet,tokens,parameters->DLC_tree,parameters->tokenization_policy); tmp=tmp->next; } free_list_pointer(list); } } free_dela_entry(entry); } if (lines>10000) { u_printf("\n"); } u_fclose(f); }
int get_segmap(vaddr_t va) { return (get_control_byte(CONTROL_ADDR_BUILD(SEGMAP_BASE, va))); }
int get_context(void) { return (get_control_byte(CONTEXT_REG) & CONTEXT_MASK); }