static uint8_t ieee_talk_handler (void) { buffer_t *buf; uint8_t finalbyte; uint8_t c; uint8_t res; buf = find_buffer(ieee_data.secondary_address); if(buf == NULL) return -1; while (buf->read) { do { finalbyte = (buf->position == buf->lastused); c = buf->data[buf->position]; if (finalbyte && buf->sendeoi) { /* Send with EOI */ res = ieee_putc(c, 1); if(!res) uart_puts_p("EOI: "); } else { /* Send without EOI */ res = ieee_putc(c, 0); } if(res) { if(res==0xfc) { uart_puts_P(PSTR("*** TIMEOUT ABORT***")); uart_putcrlf(); } if(res!=0xfd) { uart_putc('c'); uart_puthex(res); } return 1; } else { uart_putc('>'); uart_puthex(c); uart_putc(' '); if(isprint(c)) uart_putc(c); else uart_putc('?'); uart_putcrlf(); } } while (buf->position++ < buf->lastused); if(buf->sendeoi && ieee_data.secondary_address != 0x0f && !buf->recordlen && buf->refill != directbuffer_refill) { buf->read = 0; break; } if (buf->refill(buf)) { return -1; } /* Search the buffer again, it can change when using large buffers */ buf = find_buffer(ieee_data.secondary_address); } return 0; }
Integer buffer_get_length(Buffer buffer, Error *err) { buf_T *buf = find_buffer(buffer, err); if (!buf) { return 0; } return buf->b_ml.ml_line_count; }
bool defragment(SimpleSignal* sig) { if (!sig->isFragmented()) return true; Uint32 fragId = sig->getFragmentId(); NodeId nodeId = refToNode(sig->header.theSendersBlockRef); DefragBuffer* dbuf; if(sig->isFirstFragment()){ // Make sure buffer does not exist if (find_buffer(nodeId, fragId)) abort(); dbuf = new DefragBuffer(nodeId, fragId); m_buffers.push_back(dbuf); } else { dbuf = find_buffer(nodeId, fragId); if (dbuf == NULL) abort(); } if (dbuf->m_buffer.append(sig->ptr[0].p, sig->ptr[0].sz * sizeof(Uint32))) abort(); // OOM if (!sig->isLastFragment()) return false; // Copy defragmented data into signal... int length = dbuf->m_buffer.length(); delete[] sig->ptr[0].p; sig->ptr[0].sz = (length+3)/4; sig->ptr[0].p = new Uint32[sig->ptr[0].sz]; memcpy(sig->ptr[0].p, dbuf->m_buffer.get_data(), length); // erase the buffer data erase_buffer(dbuf); return true; }
/* DolphinDOS XZ command */ void save_dolphin(void) { buffer_t *buf; uint8_t eoi; /* find the already open file */ buf = find_buffer(1); if (!buf) return; /* reset buffer position */ buf->position = 2; buf->lastused = 2; /* experimental delay to avoid hangs */ delay_us(100); /* handshaking */ parallel_set_dir(PARALLEL_DIR_IN); set_data(0); parallel_clear_rxflag(); parallel_send_handshake(); uart_flush(); /* receive data */ do { /* flush buffer if full */ if (buf->mustflush) if (buf->refill(buf)) return; // FIXME: check error handling in Dolphin while (!parallel_rxflag) ; buf->data[buf->position] = parallel_read(); mark_buffer_dirty(buf); if (buf->lastused < buf->position) buf->lastused = buf->position; buf->position++; /* mark for flushing on wrap */ if (buf->position == 0) buf->mustflush = 1; eoi = !!IEC_CLOCK; parallel_clear_rxflag(); parallel_send_handshake(); } while (!eoi); /* the file will be closed with ATN+0xe1 by DolphinDOS */ }
static unsigned long try_to_load_aligned(unsigned long address, dev_t dev, int b[], int size) { struct buffer_head * bh, * tmp, * arr[8]; unsigned long offset; int * p; int block; bh = create_buffers(address, size); if (!bh) return 0; /* do any of the buffers already exist? punt if so.. */ p = b; for (offset = 0 ; offset < PAGE_SIZE ; offset += size) { block = *(p++); if (!block) goto not_aligned; if (find_buffer(dev, block, size)) goto not_aligned; } tmp = bh; p = b; block = 0; while (1) { arr[block++] = bh; bh->b_count = 1; bh->b_dirt = 0; bh->b_uptodate = 0; bh->b_dev = dev; bh->b_blocknr = *(p++); nr_buffers++; insert_into_queues(bh); if (bh->b_this_page) bh = bh->b_this_page; else break; } buffermem += PAGE_SIZE; bh->b_this_page = tmp; mem_map[MAP_NR(address)]++; read_buffers(arr,block); while (block-- > 0) brelse(arr[block]); ++current->maj_flt; return address; not_aligned: while ((tmp = bh) != NULL) { bh = bh->b_this_page; put_unused_buffer_head(tmp); } return 0; }
struct buffer_head *get_hash_table(kdev_t dev, block_t block) { register struct buffer_head *bh; while((bh = find_buffer(dev, block))) { bh->b_count++; wait_on_buffer(bh); if (bh->b_dev == dev && bh->b_blocknr == block) break; bh->b_count--; } return bh; }
struct buffer_head * getblk(int dev,int block) { struct buffer_head * bh, * tmp; int buffers; repeat: if (bh = get_hash_table(dev,block)) return bh; buffers = NR_BUFFERS; tmp = free_list; do { tmp = tmp->b_next_free; if (tmp->b_count) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } if (tmp->b_dirt) ll_rw_block(WRITEA,tmp); /* and repeat until we find something good */ } while (buffers--); if (!bh) { sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count) goto repeat; while (bh->b_dirt) { sync_dev(bh->b_dev); wait_on_buffer(bh); if (bh->b_count) goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of it's kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }
/* * Ok, this is getblk, and it isn't very clear, again to hinder * race-conditions. Most of the code is seldom used, (ie repeating), * so it should be much more efficient than it looks. */ struct buffer_head * getblk(int dev,int block) { struct buffer_head * tmp; repeat: if ((tmp=get_hash_table(dev,block))) return tmp; tmp = free_list; do { if (!tmp->b_count) { wait_on_buffer(tmp); /* we still have to wait */ if (!tmp->b_count) /* on it, it might be dirty */ break; } tmp = tmp->b_next_free; } while (tmp != free_list || (tmp=NULL)); /* Kids, don't try THIS at home ^^^^^. Magic */ if (!tmp) { printk("Sleeping on free buffer .."); sleep_on(&buffer_wait); printk("ok\n"); goto repeat; } tmp->b_count++; remove_from_queues(tmp); /* * Now, when we know nobody can get to this node (as it's removed from the * free list), we write it out. We can sleep here without fear of race- * conditions. */ if (tmp->b_dirt) sync_dev(tmp->b_dev); /* update buffer contents */ tmp->b_dev=dev; tmp->b_blocknr=block; tmp->b_dirt=0; tmp->b_uptodate=0; /* NOTE!! While we possibly slept in sync_dev(), somebody else might have * added "this" block already, so check for that. Thank God for goto's. */ if (find_buffer(dev,block)) { tmp->b_dev=0; /* ok, someone else has beaten us */ tmp->b_blocknr=0; /* to it - free this block and */ tmp->b_count=0; /* try again */ insert_into_queues(tmp); goto repeat; } /* and then insert into correct position */ insert_into_queues(tmp); return tmp; }
/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are readint them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */ struct buffer_head * get_hash_table(int dev, int block) { struct buffer_head * bh; for (;;) { if (!(bh=find_buffer(dev,block))) return NULL; bh->b_count++; wait_on_buffer(bh); if (bh->b_dev == dev && bh->b_blocknr == block) return bh; bh->b_count--; } }
void make_buffer_name_uniq(char *bname) { int num = 0; char basen[NBUFN]; char bufn[NBUFN]; if (NULL == find_buffer(bname, FALSE)) return; strcpy(basen, bname); basen[14] = '\0'; basen[15] = '\0'; while(TRUE) { sprintf(bufn, "%s%d", basen, num++); if (NULL == find_buffer(bufn, FALSE)) { strcpy(bname, bufn); return; } assert(num < 100); /* fail after 100 */ } }
/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are readint them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */ struct buffer_head * get_hash_table(int dev, int block) { struct buffer_head * bh; repeat: if (!(bh=find_buffer(dev,block))) return NULL; bh->b_count++; wait_on_buffer(bh); if (bh->b_dev != dev || bh->b_blocknr != block) { brelse(bh); goto repeat; } return bh; }
/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are readint them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */ struct buffer_head * get_hash_table(int dev, int block, int size) { struct buffer_head * bh; for (;;) { if (!(bh=find_buffer(dev,block,size))) return NULL; bh->b_count++; wait_on_buffer(bh); if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) { put_last_free(bh); return bh; } bh->b_count--; } }
struct buffer_head * getblk(int dev,int block) { // printk("getblk -------------------------------- 1\n"); struct buffer_head *tmp,*bh; repeat: if((bh = get_hash_buffer(dev,block))) return bh; tmp = buffer_free_list; do{ if(tmp->b_count) continue; if(!bh || (BADNESS(bh) > BADNESS(tmp))) { bh = tmp; if(!BADNESS(tmp)) break; } }while((tmp = tmp->b_next_free) != buffer_free_list); if(!bh) { sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if(bh->b_count) goto repeat; /* while(bh->b_dirt) { sync_dev(bh->b_dev); wait_on_buffer(bh); if(bh_b_count) goto repeat; } */ ///已经被其他的进程取走 if(find_buffer(dev,block)) goto repeat; bh->b_count = 1; bh->b_dirt = 0; bh->b_uptodate = 0; remove_from_queues(bh); bh->b_dev = dev; bh->b_blocknr = block; insert_into_queues(bh); return bh; }
/* * 代码为什么会是这样子的?我听见你问... 原因是竞争条件。由于我们没有对 * 缓冲区上锁(除非我们正在读取它们中的数据),那么当我们(进程)睡眠时 * 缓冲区可能会发生一些问题(例如一个读错误将导致该缓冲区出错)。目前 * 这种情况实际上是不会发生的,但处理的代码已经准备好了。 */ struct buffer_head * get_hash_table(int dev, int block) { struct buffer_head * bh; for (;;) { // 在高速缓冲中寻找给定设备和指定块的缓冲区,如果没有找到则返回NULL,退出。 if (!(bh=find_buffer(dev,block))) return NULL; // 对该缓冲区增加引用计数,并等待该缓冲区解锁(如果已被上锁)。 bh->b_count++; wait_on_buffer(bh); // 由于经过了睡眠状态,因此有必要再验证该缓冲区块的正确性,并返回缓冲区头指针。 if (bh->b_dev == dev && bh->b_blocknr == block) return bh; // 如果该缓冲区所属的设备号或块号在睡眠时发生了改变,则撤消对它的引用计数,重新寻找。 bh->b_count--; } }
static ssize_t buffered_pread(couch_file_handle handle, void *buf, size_t nbyte, off_t offset) { #if LOG_BUFFER //fprintf(stderr, "r"); #endif buffered_file_handle *h = (buffered_file_handle*)handle; // Flush the write buffer before trying to read anything: couchstore_error_t err = flush_buffer(h->write_buffer); if (err < 0) { return err; } ssize_t total_read = 0; while (nbyte > 0) { file_buffer* buffer = find_buffer(h, offset); // Read as much as we can from the current buffer: ssize_t nbyte_read = read_from_buffer(buffer, buf, nbyte, offset); if (nbyte_read == 0) { /*if (nbyte > buffer->capacity) { // Remainder won't fit in a single buffer, so just read it directly: nbyte_read = h->raw_ops->pread(h->raw_ops_handle, buf, nbyte, offset); if (nbyte_read < 0) { return nbyte_read; } } else*/ { // Move the buffer to cover the remainder of the data to be read. off_t block_start = offset - (offset % READ_BUFFER_CAPACITY); err = load_buffer_from(buffer, block_start, (size_t)(offset + nbyte - block_start)); if (err < 0) { return err; } nbyte_read = read_from_buffer(buffer, buf, nbyte, offset); if (nbyte_read == 0) break; // must be at EOF } } buf = (char*)buf + nbyte_read; nbyte -= nbyte_read; offset += nbyte_read; total_read += nbyte_read; } return total_read; }
struct buffer_head *getblk(kdev_t dev, block_t block) { register struct buffer_head *bh; /* If there are too many dirty buffers, we wake up the update process * now so as to ensure that there are still clean buffers available * for user processes to use (and dirty) */ do { bh = get_hash_table(dev, block); if (bh != NULL) { if (buffer_clean(bh) && buffer_uptodate(bh)) put_last_lru(bh); return bh; } /* I think the following check is redundant * So I will remove it for now */ } while(find_buffer(dev, block)); /* * Create a buffer for this job. */ bh = get_free_buffer(); /* OK, FINALLY we know that this buffer is the only one of its kind, * and that it's unused (b_count=0), unlocked (buffer_locked=0), and clean */ bh->b_count = 1; bh->b_dirty = 0; bh->b_lock = 0; bh->b_uptodate = 0; bh->b_dev = dev; bh->b_blocknr = block; bh->b_seg = kernel_ds; return bh; }
static void reiserfsck_check_cached_tree (int dev, int block, int size) { struct buffer_head * bh; int what_node; bh = find_buffer(dev, block, size); if (bh == 0) return; if (!buffer_uptodate (bh)) { die ("reiserfsck_check_cached_tree: found notuptodate buffer"); } bh->b_count ++; if (!B_IS_IN_TREE (bh)) { die ("reiserfsck_check_cached_tree: buffer (%b %z) not in tree", bh, bh); } what_node = who_is_this (bh->b_data, bh->b_size); if ((what_node != THE_LEAF && what_node != THE_INTERNAL) || !is_block_used (bh->b_blocknr) || (is_leaf_node (bh) && is_leaf_bad (bh)) || (is_internal_node(bh) && is_internal_bad (bh))) die ("reiserfsck_check_cached_tree: bad node in the tree"); if (is_internal_node (bh)) { int i; struct disk_child * dc; dc = B_N_CHILD (bh, 0); for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) { reiserfsck_check_cached_tree (dev, dc_block_number(dc), size); g_dkey = B_N_PDELIM_KEY (bh, i); } } else if (is_leaf_node (bh)) { brelse (bh); return; } else { reiserfs_panic ("reiserfsck_check_cached_tree: block %lu has bad block type (%b)", bh->b_blocknr, bh); } brelse (bh); }
/// Sets the current buffer /// /// @param id The buffer handle /// @param[out] err Details of an error that may have occurred void vim_set_current_buffer(Buffer buffer, Error *err) { buf_T *buf = find_buffer(buffer, err); if (!buf) { return; } try_start(); if (do_buffer(DOBUF_GOTO, DOBUF_FIRST, FORWARD, buf->b_fnum, 0) == FAIL) { if (try_end(err)) { return; } char msg[256]; snprintf(msg, sizeof(msg), "failed to switch to buffer %d", (int)buffer); set_api_error(msg, err); return; } try_end(err); }
//检查指定(设备号和块号)的缓冲区是否已经在高速缓冲中。如果指定块已经在高速缓冲中,则返回 //对应缓冲区头指针退出;如果不在,就需要在高速缓冲中设置一个对应设备号和块号的新项。返回相应 //缓冲区头指针 struct buffer_head * getblk(int dev,int block) { struct buffer_head * tmp, * bh; repeat: if ( (bh = get_hash_table(dev,block)) ) return bh; tmp = free_list; do { if (tmp->b_count) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } /* and repeat until we find something good */ } while ((tmp = tmp->b_next_free) != free_list); //如果循环检查所有缓冲块都正在被使用(所有缓冲块的头部引用计数都>0)中,则睡眠等待有空闲 //缓冲块可用。当有空闲块可用时本进程会被明确地唤醒。然后我们就跳转到函数开始处重新查找空闲 //缓冲块 if (!bh) { sleep_on(&buffer_wait); goto repeat; } //如果跑到这里,说明已经找到一个空闲的缓冲区块。于是先等待该缓冲区解锁(如果已经被上锁的话)。 wait_on_buffer(bh); if (bh->b_count) goto repeat; //如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。 while (bh->b_dirt) { sync_dev(bh->b_dev); wait_on_buffer(bh); if (bh->b_count) goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ //当进程为了等待该缓冲块而睡眠时,其他进程可能已经将该缓冲块进入高速缓冲中,因此我们也要对此进行检查 if (find_buffer(dev,block)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of it's kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ //到了这里,最终我们知道该缓冲块是指定参数的唯一一块,而且目前还没有被占用(b_count=0) //也没有被上锁(b_lock=0),而且是干净的(b_dirt=0) //于是我们占用此缓冲块。置引用计数为1,复位修改标志和有效标志 bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; //从hash队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。然后根据 //此新的设备号和块号重新插入空闲链表和hash队列新位置处。并最终返回缓冲头指针。 remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }
struct buffer_head * getblk(int dev, int block, int size) { struct buffer_head * bh, * tmp; int buffers; repeat: if (bh = get_hash_table(dev, block, size)) return bh; if (nr_free_pages > 30) grow_buffers(size); buffers = nr_buffers; bh = NULL; for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) { if (tmp->b_count || tmp->b_size != size) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } #if 0 if (tmp->b_dirt) ll_rw_block(WRITEA,tmp); #endif } if (!bh && nr_free_pages > 5) { grow_buffers(size); goto repeat; } /* and repeat until we find something good */ if (!bh) { sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count || bh->b_size != size) goto repeat; if (bh->b_dirt) { sync_buffers(bh->b_dev); goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block,size)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of it's kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }
void ieee_mainloop(void) { int16_t cmd = 0; set_error(ERROR_DOSVERSION); ieee_data.bus_state = BUS_IDLE; ieee_data.device_state = DEVICE_IDLE; for(;;) { switch(ieee_data.bus_state) { case BUS_SLEEP: /* BUS_SLEEP */ set_atn_irq(0); ieee_bus_idle(); set_error(ERROR_OK); set_busy_led(0); uart_puts_P(PSTR("ieee.c/sleep ")); set_dirty_led(1); /* Wait until the sleep key is used again */ while (!key_pressed(KEY_SLEEP)) system_sleep(); reset_key(KEY_SLEEP); set_atn_irq(1); update_leds(); ieee_data.bus_state = BUS_IDLE; break; case BUS_IDLE: /* BUS_IDLE */ ieee_bus_idle(); while(IEEE_ATN) { ; /* wait for ATN */ if (key_pressed(KEY_NEXT | KEY_PREV | KEY_HOME)) { change_disk(); } else if (key_pressed(KEY_SLEEP)) { reset_key(KEY_SLEEP); ieee_data.bus_state = BUS_SLEEP; break; } else if (display_found && key_pressed(KEY_DISPLAY)) { display_service(); reset_key(KEY_DISPLAY); } system_sleep(); } if (ieee_data.bus_state != BUS_SLEEP) ieee_data.bus_state = BUS_FOUNDATN; break; case BUS_FOUNDATN: /* BUS_FOUNDATN */ ieee_data.bus_state = BUS_ATNPROCESS; cmd = ieee_getc(); break; case BUS_ATNPROCESS: /* BUS_ATNPROCESS */ if(cmd < 0) { uart_putc('c'); ieee_data.bus_state = BUS_IDLE; break; } else cmd &= 0xFF; uart_puts_p("ATN "); uart_puthex(cmd); uart_putcrlf(); if (cmd == 0x3f) { /* UNLISTEN */ if(ieee_data.device_state == DEVICE_LISTEN) { ieee_data.device_state = DEVICE_IDLE; uart_puts_p("UNLISTEN\r\n"); } ieee_data.bus_state = BUS_IDLE; break; } else if (cmd == 0x5f) { /* UNTALK */ if(ieee_data.device_state == DEVICE_TALK) { ieee_data.device_state = DEVICE_IDLE; uart_puts_p("UNTALK\r\n"); } ieee_data.bus_state = BUS_IDLE; break; } else if (cmd == (0x40 + device_address)) { /* TALK */ uart_puts_p("TALK "); uart_puthex(device_address); uart_putcrlf(); ieee_data.device_state = DEVICE_TALK; /* disk drives never talk immediatly after TALK, so stay idle and wait for a secondary address given by 0x60-0x6f DATA */ ieee_data.bus_state = BUS_IDLE; break; } else if (cmd == (0x20 + device_address)) { /* LISTEN */ ieee_data.device_state = DEVICE_LISTEN; uart_puts_p("LISTEN "); uart_puthex(device_address); uart_putcrlf(); ieee_data.bus_state = BUS_IDLE; break; } else if ((cmd & 0xf0) == 0x60) { /* DATA */ /* 8250LP sends data while ATN is still active, so wait for bus controller to release ATN or we will misinterpret data as a command */ while(!IEEE_ATN); if(ieee_data.device_state == DEVICE_LISTEN) { cmd = ieee_listen_handler(cmd); cmd_handler(); break; } else if (ieee_data.device_state == DEVICE_TALK) { ieee_data.secondary_address = cmd & 0x0f; uart_puts_p("DATA T "); uart_puthex(ieee_data.secondary_address); uart_putcrlf(); if(ieee_talk_handler() == TIMEOUT_ABORT) { ieee_data.device_state = DEVICE_IDLE; } ieee_data.bus_state = BUS_IDLE; break; } else { ieee_data.bus_state = BUS_IDLE; break; } } else if (ieee_data.device_state == DEVICE_IDLE) { ieee_data.bus_state = BUS_IDLE; break; /* ----- if we reach this, we're LISTENer or TALKer ----- */ } else if ((cmd & 0xf0) == 0xe0) { /* CLOSE */ ieee_data.secondary_address = cmd & 0x0f; uart_puts_p("CLOSE "); uart_puthex(ieee_data.secondary_address); uart_putcrlf(); /* Close all buffers if sec. 15 is closed */ if(ieee_data.secondary_address == 15) { free_multiple_buffers(FMB_USER_CLEAN); } else { /* Close a single buffer */ buffer_t *buf; buf = find_buffer (ieee_data.secondary_address); if (buf != NULL) { buf->cleanup(buf); free_buffer(buf); } } ieee_data.bus_state = BUS_IDLE; break; } else if ((cmd & 0xf0) == 0xf0) { /* OPEN */ cmd = ieee_listen_handler(cmd); cmd_handler(); break; } else { /* Command for other device or unknown command */ ieee_data.bus_state = BUS_IDLE; } break; } /* switch */ } /* for() */ }
//// 取高速缓冲中指定的缓冲区。 // 检查所指定的缓冲区是否已经在高速缓冲中,如果不在,就需要在高速缓冲中建立一个对应的新项。 // 返回相应缓冲区头指针。 struct buffer_head * getblk(int dev,int block) { struct buffer_head * tmp, * bh; repeat: // 搜索hash 表,如果指定块已经在高速缓冲中,则返回对应缓冲区头指针,退出。 if (bh = get_hash_table(dev,block)) return bh; // 扫描空闲数据块链表,寻找空闲缓冲区。 // 首先让tmp 指向空闲链表的第一个空闲缓冲区头。 tmp = free_list; do { // 如果该缓冲区正被使用(引用计数不等于0),则继续扫描下一项。 if (tmp->b_count) continue; // 如果缓冲头指针bh 为空,或者tmp 所指缓冲头的标志(修改、锁定)权重小于bh 头标志的权重, // 则让bh 指向该tmp 缓冲区头。如果该tmp 缓冲区头表明缓冲区既没有修改也没有锁定标志置位, // 则说明已为指定设备上的块取得对应的高速缓冲区,则退出循环。 if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } /* 重复操作直到找到适合的缓冲区 */ } while ((tmp = tmp->b_next_free) != free_list); // 如果所有缓冲区都正被使用(所有缓冲区的头部引用计数都>0), // 则睡眠,等待有空闲的缓冲区可用。 if (!bh) { sleep_on(&buffer_wait); goto repeat; } // 等待该缓冲区解锁(如果已被上锁的话)。 wait_on_buffer(bh); // 如果该缓冲区又被其它任务使用的话,只好重复上述过程。 if (bh->b_count) goto repeat; // 如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。如果该缓冲区又被其它任务使用 // 的话,只好再重复上述过程。 while (bh->b_dirt) { sync_dev(bh->b_dev); wait_on_buffer(bh); if (bh->b_count) goto repeat; } /* 注意!!当进程为了等待该缓冲块而睡眠时,其它进程可能已经将该缓冲块 */ /* 加入进高速缓冲中,所以要对此进行检查。 */ // 在高速缓冲hash 表中检查指定设备和块的缓冲区是否已经被加入进去。如果是的话,就再次重复 // 上述过程。 if (find_buffer(dev,block)) goto repeat; /* OK,最终我们知道该缓冲区是指定参数的唯一一块, */ /* 而且还没有被使用(b_count=0),未被上锁(b_lock=0),并且是干净的(未被修改的) */ // 于是让我们占用此缓冲区。置引用计数为1,复位修改标志和有效(更新)标志。 bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; // 从hash 队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。 remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; // 然后根据此新的设备号和块号重新插入空闲链表和hash 队列新位置处。并最终返回缓冲头指针。 insert_into_queues(bh); return bh; }
static int16_t ieee_listen_handler (uint8_t cmd) /* Receive characters from IEEE-bus and write them to the listen buffer adressed by ieee_data.secondary_address. If a new command is received (ATN set), return it */ { buffer_t *buf; int16_t c; ieee_data.secondary_address = cmd & 0x0f; buf = find_buffer(ieee_data.secondary_address); /* Abort if there is no buffer or it's not open for writing */ /* and it isn't an OPEN command */ if ((buf == NULL || !buf->write) && (cmd & 0xf0) != 0xf0) { uart_putc('c'); return -1; } switch(cmd & 0xf0) { case 0x60: uart_puts_p("DATA L "); break; case 0xf0: uart_puts_p("OPEN "); break; default: uart_puts_p("Unknown LH! "); break; } uart_puthex(ieee_data.secondary_address); uart_putcrlf(); c = -1; for(;;) { /* Get a character ignoring timeout but but watching ATN */ while((c = ieee_getc()) < 0); if (c & FLAG_ATN) return c; uart_putc('<'); if (c & FLAG_EOI) { uart_puts_p("EOI "); ieee_data.ieeeflags |= EOI_RECVD; } else ieee_data.ieeeflags &= ~EOI_RECVD; uart_puthex(c); uart_putc(' '); c &= 0xff; /* needed for isprint */ if(isprint(c)) uart_putc(c); else uart_putc('?'); uart_putcrlf(); if((cmd & 0x0f) == 0x0f || (cmd & 0xf0) == 0xf0) { if (command_length < CONFIG_COMMAND_BUFFER_SIZE) command_buffer[command_length++] = c; if (ieee_data.ieeeflags & EOI_RECVD) /* Filenames are just a special type of command =) */ ieee_data.ieeeflags |= COMMAND_RECVD; } else { /* Flush buffer if full */ if (buf->mustflush) { if (buf->refill(buf)) return -2; /* Search the buffer again, */ /* it can change when using large buffers */ buf = find_buffer(ieee_data.secondary_address); } buf->data[buf->position] = c; mark_buffer_dirty(buf); if (buf->lastused < buf->position) buf->lastused = buf->position; buf->position++; /* Mark buffer for flushing if position wrapped */ if (buf->position == 0) buf->mustflush = 1; /* REL files must be syncronized on EOI */ if(buf->recordlen && (ieee_data.ieeeflags & EOI_RECVD)) { if (buf->refill(buf)) return -2; } } /* else-buffer */ } /* for(;;) */ }
//// 取高速缓冲中指定的缓冲区。 // 检查所指定的缓冲区是否已经在高速缓冲中,如果不在,就需要在高速缓冲中建立一个对应的新项。 // 返回相应缓冲区头指针。 struct buffer_head * getblk( int dev, int block ) { struct buffer_head *tmp, *bh; repeat: // 搜索hash 表,如果指定块已经在高速缓冲中,则返回对应缓冲区头指针,退出。 if( bh = get_hash_table( dev, block ) ) { return bh; } // 扫描空闲数据块链表,寻找空闲缓冲区。 // 首先让tmp 指向空闲链表的第一个空闲缓冲区头。 tmp = free_list; do { // 如果该缓冲区正被使用(引用计数不等于0),则继续扫描下一项。 if( tmp->b_count ) { continue; } // 如果缓冲头指针bh 为空,或者tmp 所指缓冲头的标志(修改、锁定)权重小于bh 头标志的权重, // 则让bh 指向该tmp 缓冲区头。如果该tmp 缓冲区头表明缓冲区既没有修改也没有锁定标志置位, // 则说明已为指定设备上的块取得对应的高速缓冲区,则退出循环。 if( !bh || BADNESS( tmp ) < BADNESS( bh ) ) { bh = tmp; if( !BADNESS( tmp ) ) { break; } } /* and repeat until we find something good *//* 重复操作直到找到适合的缓冲区 */ } while( ( tmp = tmp->b_next_free ) != free_list ); // 如果所有缓冲区都正被使用(所有缓冲区的头部引用计数都>0),则睡眠,等待有空闲的缓冲区可用。 if( !bh ) { sleep_on( &buffer_wait ); goto repeat; } // 等待该缓冲区解锁(如果已被上锁的话)。 wait_on_buffer( bh ); // 如果该缓冲区又被其它任务使用的话,只好重复上述过程。 if( bh->b_count ) { goto repeat; } // 如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。如果该缓冲区又被其它任务使用 // 的话,只好再重复上述过程。 while( bh->b_dirt ) { sync_dev( bh->b_dev ); wait_on_buffer( bh ); if( bh->b_count ) { goto repeat; } } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ /* 注意!!当进程为了等待该缓冲块而睡眠时,其它进程可能已经将该缓冲块 */ ** / // 在高速缓冲hash 表中检查指定设备和块的缓冲区是否已经被加入进去。如果是的话,就再次重复 // 上述过程。 if( find_buffer( dev, block ) ) { goto repeat; } /* OK, FINALLY we know that this buffer is the only one of it's kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ /* OK,最终我们知道该缓冲区是指定参数的唯一一块,*/ /* 而且还没有被使用(b_count=0),未被上锁(b_lock=0),并且是干净的(未被修改的)*/ // 于是让我们占用此缓冲区。置引用计数为1,复位修改标志和有效(更新)标志。 bh->b_count = 1; bh->b_dirt = 0; bh->b_uptodate = 0; // 从hash 队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。 remove_from_queues( bh ); bh->b_dev = dev; bh->b_blocknr = block; // 然后根据此新的设备号和块号重新插入空闲链表和hash 队列新位置处。并最终返回缓冲头指针。 insert_into_queues( bh ); return bh; }
/** * file_open - open a file on given secondary * @secondary: secondary address used in OPEN call * * This function opens the file named in command_buffer on the given * secondary address. All special names and prefixes/suffixed are handled * here, e.g. $/#/@/,S,W */ void file_open(uint8_t secondary) { buffer_t *buf; uint8_t i = 0; uint8_t recordlen = 0; /* If the secondary is already in use, close the existing buffer */ buf = find_buffer(secondary); if (buf != NULL) { /* FIXME: What should we do if an error occurs? */ cleanup_and_free_buffer(buf); } /* Assume everything will go well unless proven otherwise */ set_error(ERROR_OK); /* Strip 0x0d characters from end of name (C2BD-C2CA) */ if (command_length > 1) { if (command_buffer[command_length-1] == 0x0d) command_length -= 1; else if (command_buffer[command_length-2] == 0x0d) command_length -= 2; } /* Clear the remainder of the command buffer, simplifies parsing */ memset(command_buffer+command_length, 0, sizeof(command_buffer)-command_length); uart_trace(command_buffer,0,command_length); /* Direct access? */ if (command_buffer[0] == '#') { open_buffer(secondary); return; } /* Parse type+mode suffixes */ uint8_t *ptr = command_buffer; enum open_modes mode = OPEN_READ; uint8_t filetype = TYPE_DEL; while(i++ < 2 && *ptr && (ptr = ustrchr(ptr, ','))) { *ptr = 0; ptr++; switch (*ptr) { case 0: break; case 'R': /* Read */ mode = OPEN_READ; break; case 'W': /* Write */ mode = OPEN_WRITE; break; case 'A': /* Append */ mode = OPEN_APPEND; break; case 'M': /* Modify */ mode = OPEN_MODIFY; break; case 'D': /* DEL */ filetype = TYPE_DEL; break; case 'S': /* SEQ */ filetype = TYPE_SEQ; break; case 'P': /* PRG */ filetype = TYPE_PRG; break; case 'U': /* USR */ filetype = TYPE_USR; break; case 'L': /* REL */ filetype = TYPE_REL; mode = OPEN_WRITE; if((ptr = ustrchr(ptr, ','))) recordlen = *(++ptr); i = 2; // stop the scan break; } } /* Load directory? */ if (command_buffer[0] == '$') { load_directory(secondary); return; } /* Parse path+partition numbers */ uint8_t *fname; int8_t res; cbmdirent_t dent; path_t path; /* Parse path and file name */ if (parse_path(command_buffer, &path, &fname, 0)) return; #ifdef CONFIG_M2I /* For M2I only: Remove trailing spaces from name */ if (partition[path.part].fop == &m2iops) { res = ustrlen(fname); while (--res && fname[res] == ' ') fname[res] = 0; } #endif /* Filename matching */ if (opendir(&matchdh, &path)) return; do { res = next_match(&matchdh, fname, NULL, NULL, FLAG_HIDDEN, &dent); if (res > 0) /* Error, abort */ return; /* Don't match on DEL or DIR */ if ((dent.typeflags & TYPE_MASK) != TYPE_DEL && (dent.typeflags & TYPE_MASK) != TYPE_DIR) break; /* But do match if it's for writing */ if (mode == OPEN_WRITE || secondary == 1) break; } while (res == 0); if(res && filetype == TYPE_REL && !recordlen) { set_error(ERROR_SYNTAX_UNABLE); return; } /* If match found is a REL... */ if(!res && (dent.typeflags & TYPE_MASK) == TYPE_REL) { /* requested type must be REL or DEL */ if(filetype != TYPE_REL && filetype != TYPE_DEL) { set_error(ERROR_FILE_TYPE_MISMATCH); return; } filetype = TYPE_REL; mode = OPEN_MODIFY; } /* Force mode+type for secondaries 0/1 */ switch (secondary) { case 0: mode = OPEN_READ; if (filetype == TYPE_DEL) filetype = TYPE_PRG; break; case 1: mode = OPEN_WRITE; if (filetype == TYPE_DEL) filetype = TYPE_PRG; break; default: if (filetype == TYPE_DEL) filetype = TYPE_SEQ; } if (mode == OPEN_WRITE) { if (res == 0) { /* Match found */ if (command_buffer[0] == '@') { /* Make sure there is a free buffer to open the new file later */ if (!check_free_buffers()) { set_error(ERROR_NO_CHANNEL); return; } /* Copy dent because file_delete may change it */ cbmdirent_t dentcopy = dent; /* Rewrite existing file: Delete the old one */ if (file_delete(&path, &dentcopy) == 255) return; /* Force fatops to create a new name based on the (long) CBM- */ /* name instead of creating one with the old SFN and no LFN. */ if (dent.opstype == OPSTYPE_FAT || dent.opstype == OPSTYPE_FAT_X00) dent.pvt.fat.realname[0] = 0; } else { /* Write existing file without replacement: Raise error */ set_error(ERROR_FILE_EXISTS); return; } } else { /* Normal write or non-existing rewrite */ /* Doesn't exist: Copy name to dent */ memset(&dent, 0, sizeof(dent)); ustrncpy(dent.name, fname, CBM_NAME_LENGTH); set_error(ERROR_OK); // because first_match has set FNF } } else if (res != 0) { /* File not found */ set_error(ERROR_FILE_NOT_FOUND); return; } /* Grab a buffer */ buf = alloc_buffer(); if (!buf) return; buf->secondary = secondary; if(filetype == TYPE_REL) { display_filename_write(path.part,CBM_NAME_LENGTH,dent.name); open_rel(&path, &dent, buf, recordlen, (mode == OPEN_MODIFY)); return; } switch (mode) { case OPEN_MODIFY: case OPEN_READ: /* Modify is the same as read, but allows reading *ed files. */ /* FAT doesn't have anything equivalent, so both are mapped to READ */ display_filename_read(path.part,CBM_NAME_LENGTH,dent.name); open_read(&path, &dent, buf); break; case OPEN_WRITE: case OPEN_APPEND: display_filename_write(path.part,CBM_NAME_LENGTH,dent.name); open_write(&path, &dent, filetype, buf, (mode == OPEN_APPEND)); break; } }
void load_gijoe(UNUSED_PARAMETER) { buffer_t *buf; set_data(1); set_clock(1); set_atn_irq(0); /* Wait until the bus has settled */ delay_ms(10); while (!IEC_DATA || !IEC_CLOCK) ; while (1) { /* Handshake */ set_clock(0); while (IEC_DATA) if (check_keys()) return; set_clock(1); uart_flush(); /* First byte is ignored */ if (gijoe_read_byte() < 0) return; /* Read two file name characters */ command_buffer[0] = gijoe_read_byte(); command_buffer[1] = gijoe_read_byte(); set_clock(0); command_buffer[2] = '*'; command_buffer[3] = 0; command_length = 3; /* Open the file */ file_open(0); uart_flush(); buf = find_buffer(0); if (!buf) { set_clock(1); gijoe_send_byte(0xfe); gijoe_send_byte(0xfe); gijoe_send_byte(0xac); gijoe_send_byte(0xf7); continue; } /* file is open, transfer */ while (1) { uint8_t i = buf->position; set_clock(1); delay_us(2); do { if (buf->data[i] == 0xac) gijoe_send_byte(0xac); gijoe_send_byte(buf->data[i]); } while (i++ < buf->lastused); /* Send end marker and wait for the next name */ if (buf->sendeoi) { gijoe_send_byte(0xac); gijoe_send_byte(0xff); cleanup_and_free_buffer(buf); break; } /* Send "another sector following" marker */ gijoe_send_byte(0xac); gijoe_send_byte(0xc3); delay_us(50); set_clock(0); /* Read next block */ if (buf->refill(buf)) { /* Send error marker */ gijoe_send_byte(0xfe); gijoe_send_byte(0xfe); gijoe_send_byte(0xac); gijoe_send_byte(0xf7); cleanup_and_free_buffer(buf); break; } } } }
int jed_buffer_visible (char *b) { return buffer_visible (find_buffer(b)); }
/* DolphinDOS XQ command */ void load_dolphin(void) { /* find the already open buffer */ buffer_t *buf = find_buffer(0); if (!buf) return; buf->position = 2; /* initial handshaking */ // note about the delays: 100us work, not optimized // (doesn't matter much outside the loop) delay_us(100); // experimental delay parallel_set_dir(PARALLEL_DIR_OUT); set_clock(0); parallel_clear_rxflag(); delay_us(100); // experimental delay parallel_send_handshake(); uart_flush(); delay_us(100); // experimental delay /* every sector except the last */ uint8_t i; while (!buf->sendeoi) { iec_bus_t bus_state = iec_bus_read(); /* transmit first byte */ dolphin_write_hs(buf->data[2]); /* check DATA state before transmission */ if (bus_state & IEC_BIT_DATA) { cleanup_and_free_buffer(buf); return; } /* transmit the rest of the sector */ for (i = 3; i != 0; i++) dolphin_write_hs(buf->data[i]); /* read next sector */ if (buf->refill(buf)) { cleanup_and_free_buffer(buf); return; } } /* last sector */ i = 2; do { dolphin_write_hs(buf->data[i]); } while (i++ < buf->lastused); /* final handshake */ set_clock(1); while (!IEC_DATA) ; parallel_send_handshake(); parallel_set_dir(PARALLEL_DIR_IN); cleanup_and_free_buffer(buf); }
struct buffer_head * getblk(dev_t dev, int block, int size) { struct buffer_head * bh, * tmp; int buffers; static int grow_size = 0; repeat: bh = get_hash_table(dev, block, size); if (bh) { if (bh->b_uptodate && !bh->b_dirt) put_last_free(bh); return bh; } grow_size -= size; if (nr_free_pages > min_free_pages && grow_size <= 0) { if (grow_buffers(GFP_BUFFER, size)) grow_size = PAGE_SIZE; } buffers = nr_buffers; bh = NULL; for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) { if (tmp->b_count || tmp->b_size != size) continue; if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } #if 0 if (tmp->b_dirt) { tmp->b_count++; ll_rw_block(WRITEA, 1, &tmp); tmp->b_count--; } #endif } if (!bh) { if (nr_free_pages > 5) if (grow_buffers(GFP_BUFFER, size)) goto repeat; if (!grow_buffers(GFP_ATOMIC, size)) sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count || bh->b_size != size) goto repeat; if (bh->b_dirt) { sync_buffers(0,0); goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block,size)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of its kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; bh->b_req=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }