加入收藏 | 设为首页 | 会员中心 | 我要投稿 温州站长网 (https://www.0577zz.com/)- 科技、建站、经验、云计算、5G、大数据,站长网!
当前位置: 首页 > 服务器 > 搭建环境 > Linux > 正文

linux内核md源代码解读 五 先容raidd5阵列的运行

发布时间:2016-11-03 19:39:00 所属栏目:Linux 来源:站长网
导读:副标题#e# 如果看懂了raid1阵列的run函数,那么看raid5阵列run就非常轻松了,因为两者要做的事情都是大同小异。 raid5的run函数很长,但很大一部分跟创建运行是没有关系的,特别是有一段跟reshape相关的,大多数系统都不关注该功能,因此可以直接跳过。经过
副标题[/!--empirenews.page--]

如果看懂了raid1阵列的run函数,那么看raid5阵列run就非常轻松了,因为两者要做的事情都是大同小异。

raid5的run函数很长,但很大一部分跟创建运行是没有关系的,特别是有一段跟reshape相关的,大多数系统都不关注该功能,因此可以直接跳过。经过删减之后的run函数如下:

5307 static int run(struct mddev *mddev)  
5308 {  
5309         struct r5conf *conf;  
5310         int working_disks = 0;  
5311         int dirty_parity_disks = 0;  
5312         struct md_rdev *rdev;  
5313         sector_t reshape_offset = 0;  
5314         int i;  
5315         long long min_offset_diff = 0;  
5316         int first = 1;  
...  
5426         if (mddev->private == NULL)  
5427                 conf = setup_conf(mddev);  
5428         else
5429                 conf = mddev->private;  
5430  
5431         if (IS_ERR(conf))  
5432                 return PTR_ERR(conf);  
5433  
5434         conf->min_offset_diff = min_offset_diff;  
5435         mddev->thread = conf->thread;  
5436         conf->thread = NULL;  
5437         mddev->private = conf;  
...  
5491         /* 
5492          * 0 for a fully functional array, 1 or 2 for a degraded array. 
5493          */
5494         mddev->degraded = calc_degraded(conf);  
...  
5503         /* device size must be a multiple of chunk size */
5504         mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);  
5505         mddev->resync_max_sectors = mddev->dev_sectors;  
...  
5556         md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));  
5557  
5558         if (mddev->queue) {  
...  
5628         }  
5629  
5630         return 0;

是不是感觉超级简单呢,就像有些事情表面上看起来很复杂,但只要认真地去分析之后发现其实是有规律可循的。就像这个run函数,做的事情与raid1的run是相同的,就是建立读写的上下文环境。

5427行,创建struct r5conf,跟进函数:

5131 static struct r5conf *setup_conf(struct mddev *mddev)  
5132 {  
5133         struct r5conf *conf;  
5134         int raid_disk, memory, max_disks;  
5135         struct md_rdev *rdev;  
5136         struct disk_info *disk;  
5137         char pers_name[6];  
5138  
5139         if (mddev->new_level != 5  
5140             && mddev->new_level != 4  
5141             && mddev->new_level != 6) {  
5142                 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)n",  
5143                        mdname(mddev), mddev->new_level);  
5144                 return ERR_PTR(-EIO);  
5145         }  
5146         if ((mddev->new_level == 5  
5147              && !algorithm_valid_raid5(mddev->new_layout)) ||  
5148             (mddev->new_level == 6  
5149              && !algorithm_valid_raid6(mddev->new_layout))) {  
5150                 printk(KERN_ERR "md/raid:%s: layout %d not supportedn",  
5151                        mdname(mddev), mddev->new_layout);  
5152                 return ERR_PTR(-EIO);  
5153         }  
5154         if (mddev->new_level == 6 && mddev->raid_disks < 4) {  
5155                 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)n",  
5156                        mdname(mddev), mddev->raid_disks);  
5157                 return ERR_PTR(-EINVAL);  
5158         }  
5159  
5160         if (!mddev->new_chunk_sectors ||  
5161             (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||  
5162             !is_power_of_2(mddev->new_chunk_sectors)) {  
5163                 printk(KERN_ERR "md/raid:%s: invalid chunk size %dn",  
5164                        mdname(mddev), mddev->new_chunk_sectors << 9);  
5165                 return ERR_PTR(-EINVAL);  
5166         }  
5167  
5168         conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);  
5169         if (conf == NULL)  
5170                 goto abort;  
5171         spin_lock_init(&conf->device_lock);  
5172         init_waitqueue_head(&conf->wait_for_stripe);  
5173         init_waitqueue_head(&conf->wait_for_overlap);  
5174         INIT_LIST_HEAD(&conf->handle_list);  
5175         INIT_LIST_HEAD(&conf->hold_list);  
5176         INIT_LIST_HEAD(&conf->delayed_list);  
5177         INIT_LIST_HEAD(&conf->bitmap_list);  
5178         INIT_LIST_HEAD(&conf->inactive_list);  
5179         atomic_set(&conf->active_stripes, 0);  
5180         atomic_set(&conf->preread_active_stripes, 0);  
5181         atomic_set(&conf->active_aligned_reads, 0);  
5182         conf->bypass_threshold = BYPASS_THRESHOLD;  
5183         conf->recovery_disabled = mddev->recovery_disabled - 1;  
5184  
5185         conf->raid_disks = mddev->raid_disks;  
5186         if (mddev->reshape_position == MaxSector)  
5187                 conf->previous_raid_disks = mddev->raid_disks;  
5188         else
5189                 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;  
5190         max_disks = max(conf->raid_disks, conf->previous_raid_disks);  
5191         conf->scribble_len = scribble_len(max_disks);  
5192  
5193         conf->disks = kzalloc(max_disks * sizeof(struct disk_info),  
5194                               GFP_KERNEL);  
5195         if (!conf->disks)  
5196                 goto abort;  
5197  
5198         conf->mddev = mddev;  
5199  
5200         if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)  
5201                 goto abort;  
5202  
5203         conf->level = mddev->new_level;  
5204         if (raid5_alloc_percpu(conf) != 0)  
5205                 goto abort;  
5206  
5207         pr_debug("raid456: run(%s) called.n", mdname(mddev));  
5208  
5209         rdev_for_each(rdev, mddev) {  
5210                 raid_disk = rdev->raid_disk;  
5211                 if (raid_disk >= max_disks  
5212                     || raid_disk < 0)  
5213                         continue;  
5214                 disk = conf->disks + raid_disk;  
5215  
5216                 if (test_bit(Replacement, &rdev->flags)) {  
5217                         if (disk->replacement)  
5218                                 goto abort;  
5219                         disk->replacement = rdev;  
5220                 } else {  
5221                         if (disk->rdev)  
5222                                 goto abort;  
5223                         disk->rdev = rdev;  
5224                 }  
5225  
5226                 if (test_bit(In_sync, &rdev->flags)) {  
5227                         char b[BDEVNAME_SIZE];  
5228                         printk(KERN_INFO "md/raid:%s: device %s operational as raid"
5229                                " disk %dn",  
5230                                mdname(mddev), bdevname(rdev->bdev, b), raid_disk);  
5231                 } else if (rdev->saved_raid_disk != raid_disk)  
5232                         /* Cannot rely on bitmap to complete recovery */
5233                         conf->fullsync = 1;  
5234         }  
5235  
5236         conf->chunk_sectors = mddev->new_chunk_sectors;  
5237         conf->level = mddev->new_level;  
5238         if (conf->level == 6)  
5239                 conf->max_degraded = 2;  
5240         else
5241                 conf->max_degraded = 1;  
5242         conf->algorithm = mddev->new_layout;  
5243         conf->max_nr_stripes = NR_STRIPES;  
5244         conf->reshape_progress = mddev->reshape_position;  
5245         if (conf->reshape_progress != MaxSector) {  
5246                 conf->prev_chunk_sectors = mddev->chunk_sectors;  
5247                 conf->prev_algo = mddev->layout;  
5248         }  
5249  
5250         memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +  
5251                  max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;  
5252         if (grow_stripes(conf, conf->max_nr_stripes)) {  
5253                 printk(KERN_ERR  

5254                        "md/raid:%s: couldn't allocate %dkB for buffersn",  
5255                        mdname(mddev), memory);  
5256                 goto abort;  
5257         } else
5258                 printk(KERN_INFO "md/raid:%s: allocated %dkBn",  
5259                        mdname(mddev), memory);  
5260  
5261         sprintf(pers_name, "raid%d", mddev->new_level);  
5262         conf->thread = md_register_thread(raid5d, mddev, pers_name);  
5263         if (!conf->thread) {  
5264                 printk(KERN_ERR  
5265                        "md/raid:%s: couldn't allocate thread.n",  
5266                        mdname(mddev));  
5267                 goto abort;  
5268         }  
5269  
5270         return conf;

(编辑:温州站长网)

【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容!

热点阅读