1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
   |  data_root = 'iccv09Data' img_dir = 'images' ann_dir = 'labels'
 
  classes = ('sky', 'tree', 'road', 'grass', 'water', 'bldg', 'mntn', 'fg obj') palette = [[128, 128, 128], [129, 127, 38], [120, 69, 125], [53, 125, 34],             [0, 11, 123], [118, 20, 12], [122, 81, 25], [241, 134, 51]]
  from mmseg.registry import DATASETS from mmseg.datasets import BaseSegDataset
  @DATASETS.register_module() class StanfordBackgroundDataset(BaseSegDataset):   METAINFO = dict(classes = classes, palette = palette)   def __init__(self, **kwargs):     super().__init__(img_suffix='.jpg', seg_map_suffix='.png', **kwargs)
  from mmengine import Config cfg = Config.fromfile('config.py')
  cfg.norm_cfg = dict(type='BN', requires_grad=True)  cfg.crop_size = (256, 256) cfg.model.data_preprocessor.size = cfg.crop_size cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg
  cfg.model.decode_head.num_classes = 8 cfg.model.auxiliary_head.num_classes = 8
 
  cfg.dataset_type = 'StanfordBackgroundDataset' cfg.data_root = data_root
  cfg.train_dataloader.batch_size = 8
  cfg.train_pipeline = [     dict(type='LoadImageFromFile'),     dict(type='LoadAnnotations'),     dict(type='RandomResize', scale=(320, 240), ratio_range=(0.5, 2.0), keep_ratio=True),     dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75),     dict(type='RandomFlip', prob=0.5),     dict(type='PackSegInputs') ]
  cfg.test_pipeline = [     dict(type='LoadImageFromFile'),     dict(type='Resize', scale=(320, 240), keep_ratio=True),               dict(type='LoadAnnotations'),     dict(type='PackSegInputs') ]
 
  cfg.train_dataloader.dataset.type = cfg.dataset_type cfg.train_dataloader.dataset.data_root = cfg.data_root cfg.train_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir) cfg.train_dataloader.dataset.pipeline = cfg.train_pipeline cfg.train_dataloader.dataset.ann_file = 'splits/train.txt'
  cfg.val_dataloader.dataset.type = cfg.dataset_type cfg.val_dataloader.dataset.data_root = cfg.data_root cfg.val_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir) cfg.val_dataloader.dataset.pipeline = cfg.test_pipeline cfg.val_dataloader.dataset.ann_file = 'splits/val.txt'
  cfg.test_dataloader = cfg.val_dataloader
 
 
  cfg.load_from = 'checkpoint.pth'
 
  cfg.work_dir = './work_dirs/tutorial'
 
  cfg.train_cfg.max_iters = 800
  cfg.train_cfg.val_interval = 400
  cfg.default_hooks.logger.interval = 100
  cfg.default_hooks.checkpoint.interval = 400
 
  cfg['randomness'] = dict(seed=0)
 
  |