insmod init_module() register_blkdev() blk_init_queue() blk_dev[] request() default queue blkdevs[] block_device_ops rmmod cleanup_module() unregister_blkdev() blk_cleanup_queue()
static struct { const char *name; struct block_device_operations *bdops; } blkdevs[max_blkdev]; /*MAX_BLKDEV = 255
int register_blkdev(unsigned int major, const char *name, struct block_device_operations *bdops); { if (major == 0){ for (major = MAX_BLKDEV-1; major > 0; major--) { if (blkdevs[major].bdops == NULL) { blkdevs[major].name = name; blkdevs[major].bdops = bdops; return major;} }return EBUSY; } if (major >= MAX_BLKDEV) return EINVAL; if (blkdevs[major].bdops && blkdevs[major].bdops!= bdops) return EBUSY; blkdevs[major].name = name; blkdevs[major].bdops = bdops; return 0; }
int unregister_blkdev(unsigned int major, const char *name); { } if (major >= MAX_BLKDEV) return EINVAL; if (!blkdevs[major].bdops) return EINVAL; if ( strcmp(blkdevs[major].name, name)) return EINVAL; blkdevs[major].name = NULL; blkdevs[major].bdops = NULL; return 0;
struct block_device_operations = { int (*open) (struct inode*, struct file*);/*open int (*release) (struct inode*, struct file*, unsigned, unsigned long);/*close int (*ioctl) (struct inode*, struct file*, unsigned, unsigned long);/*i/o control int (*check_media_change) (kdev_t);/* int (*revalidate) (kdev_t); /* };
struct file_operations def_blk_fops= { open : blkdev_open, /* release : blkdev_close, /* llseek : block_llseek, /* read : block_read, /* write : block_write, /* fsync : block_fsync, /* ioctl : blkdev_ioctl, /* };
User User Application Application Block Block Device Device File File (/dev/xxx) (/dev/xxx) VFS VFS block_read() block_read() block_write() block_write() bread()/breada() bread()/breada() getblk() getblk() getblk() -> -> Buffer Buffer Cache Cache (On (On RAM) RAM) ll_rw_block() ll_rw_block() -> -> Block Block Device Device Driver Driver Handler Handler
struct buffer_head { struct buffer_head *b_next; unsigned long b_blocknr; unsigned short b_size; unsigned short b_list; kdev_t b_dev; atomic_t b_count; kdev_t b_rdev; unsigned long b_state; unsigned long b_flushtime; /* /* /* /* /* ID /* /*real ID /* ( ) /*dirty struct buffer_head *b_next_free; /* struct buffer_head *b_prev_free; /* struct buffer_head *b_this_page; /* struct buffer_head *b_reqnext; /*Request struct buffer_head **b_pprev; /* char *b_data; /* struct page *b_page; /* mappingpage void (*b_end_io) (struct buffer_head * bh, int uptodate); /*I/O routine void *b_private; /* ID unsigned long b_resector; wait_queue_head_t b_wait; /* /* wait }; struct inode *b_inode; /* inode struct list_head b_inode_buffers; /*
BH_Uptodate BH_Dirty (write ) BH_Lock lock( ) BH_Req request,, BH_Mapped mapping BH_New, write BH_Protected protected ( free ) b_dev& b_rdev RAID(Redundant Array of Independent Disk) (, )
struct blk_dev_struct{ request_queue_t request_queue; queue_proc *queue; void *data; };
struct request { struct list_head queue; int elevator_sequence; struct list_head table; struct list_head *free_list; volatile int rq_status; #define RQ_INACTIVE (-1) #define RQ_ACTIVE 1 #define RQ_SCSI_BUSY 0xffff #define RQ_SCSI_DONE 0xfffe #define RQ_SCSI_DISCONNECTING /* /*elevator sequence /* /* /* 0xffe0 }; kdev_t rq_dev; /* ID int cmd; /* (read/write) int errors; /* unsigned long sector; /*sector unsigned long nr_sectors; /* sector unsigned long hard_sectors, hard_nr_sectors; /* sector sector unsigned int nr_segments; /*segment unsigned int nr_hw_segments; /* hw_segment unsigned long current_nr_sectors; /* sector void *special; /* char *buffer; /*I/O struct semaphore *sem; /* struct buffer_head *bh; /* struct buffer_head *bhtail; /* request_queue_t *q; /* elevator_t *e; /*elevator algorithm
typedef struct request_queue request_queue_t; typedef request_queue_t *(queue_proc) (kdev_t dev); struct request_queue { struct list_head request_freelist[2]; struct list_head queue_head; elevator_t elelvator; request_fn_proc *request_fn; merge_request_fn *black_merge_fn; merge_request_fn *front_merge_fn; merge_requests_fn *merge_requests_fn; make_request_fn *make_request_fn; plug_device_fn *plug_device_fn; void *queuedata; }; struct tq_struct plug_tq; char plugged; char head_active; spinlock_t request_lock; wait_queue_head_t wait_for_request;
static inline void blkdev_dequeue_request (struct request *req) { if (req -> e){ req -> e -> dequeue_fn(req); req -> e = NULL; } list_del (&req -> queue); } static inline void end_request(int uptodate){ struct request *req = CURRENT; if ( end_that_request_first(req, uptodate, DEVICE_NAME)) return; #ifdef DEVICE_NO_RANDOM add_blkdev_randomness(major(req -> req_dev)); #endif DEVICE_OFF(req -> rq_dev); blkdev_dequeue_requst(req); end_that_request_last(req); } /* blkdev_dequeue_request(), end_request()