카테고리 없음

baseline for clouds segmentation

라텔만 2023. 5. 28. 23:07

GPU 스펙 확인
!nvidia-smi
구글 드라이브 연동
from google.colab import drive
drive.mount('/gdrive')
폴더 경로 설정
workspace_path = '/gdrive/My Drive/Colab Notebooks/CS2023/competition'  # 파일 업로드한 경로 반영
### 필요한 패키지 로드
!pip install albumentations==0.4.6
!pip install yacs
import os
import torch
import torch.nn.functional as F
import yaml
import numpy as np
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
import random
import torch.backends.cudnn as cudnn
import time
from torchvision import models
from tqdm import tqdm
### 재구현 세팅
def init_seeds(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    cudnn.deterministic = True
    cudnn.benchmark = False
init_seeds(0)
### 데이터 로드
rgb_path = os.path.join(workspace_path, 'data/train/rgb/')
ngr_path = os.path.join(workspace_path, 'data/train/ngr/')
label_path = os.path.join(workspace_path, 'data/train/label/')
rgb_images = os.listdir(rgb_path)
rgb_images = [os.path.join(rgb_path,x) for x in rgb_images]
ngr_images = os.listdir(ngr_path)
ngr_images = [os.path.join(ngr_path, x) for x in ngr_images]
label_images = os.listdir(label_path)
label_images = [os.path.join(label_path, x) for x in label_images]
### 데이터셋 클래스 정의
class CloudDataset(torch.utils.data.Dataset):
    def __init__(self, image_path, label_path, patch_size = 400, patch_stride = 100, is_train = True, cache_dir = './cache', transforms = None):
        self.image_path = image_path
        self.label_path = label_path
        self.patch_size = patch_size
        self.patch_stride = patch_stride
        self.is_train = is_train
        self.transforms = transforms
        
        self.patch_images = []
        self.patch_labels = []
        
        
        cache_dir = cache_dir
        os.makedirs(cache_dir, exist_ok=True)
        if is_train:
            for img_path in self.image_path:
                img = cv2.imread(img_path)
                img_count = 0
                for x in range(0, img.shape[0]-self.patch_size+1, self.patch_stride):
                    for y in range(0, img.shape[1]-self.patch_size+1, self.patch_stride):
                        patch_image = img[x:x+patch_size, y:y+patch_size, :].copy()
                        patch_path = f'rgb_{os.path.splitext(os.path.basename(img_path))[0]}_{img_count}.png'
                        if not os.path.isfile(os.path.join(cache_dir, patch_path)):
                            cv2.imwrite(os.path.join(cache_dir, patch_path), patch_image)
                        self.patch_images.append(os.path.join(cache_dir, patch_path))
                        img_count += 1

            for label_path in self.label_path:
                img = cv2.imread(label_path)
                img_count = 0
                for x in range(0, img.shape[0]-self.patch_size+1, self.patch_stride):
                    for y in range(0, img.shape[1]-self.patch_size+1, self.patch_stride):
                        patch_image = img[x:x+patch_size, y:y+patch_size, :].copy()
                        patch_path = f'label_{os.path.splitext(os.path.basename(label_path))[0]}_{img_count}.png'
                        if not os.path.isfile(os.path.join(cache_dir, patch_path)):
                            cv2.imwrite(os.path.join(cache_dir, patch_path), patch_image)
                        self.patch_labels.append(os.path.join(cache_dir, patch_path))
                        img_count += 1
        else:
            self.patch_images = self.image_path
            self.patch_labels = self.label_path
    def __len__(self):
        return len(self.patch_images)
        
    def __getitem__(self, idx):
        img = cv2.imread(self.patch_images[idx])
        
        if self.is_train:
            label = cv2.imread(self.patch_labels[idx])
            # numpy arrays to tensors
            h, w = label.shape[:2]
        
            target = np.zeros((h, w), dtype=np.uint8)
            pos = np.where(np.all(label == [0, 0, 255], axis=-1))  # thick cloud
            target[pos] = 1
            pos = np.where(np.all(label == [0, 255, 0], axis=-1))  # thin cloud
            target[pos] = 2
            pos = np.where(np.all(label == [0, 255, 255], axis=-1))  # cloud shadow
            target[pos] = 3
        else:
            target = img
        if self.transforms is not None:
            img, target = self.transforms(img, target)
            
        if self.is_train:
            return img, target
        else:
            return img, self.patch_images[idx]
### 파라미터 세팅
batch_size = 8
epochs = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
patch_size = 400
patch_stride = 100
num_workers = 0

num_classes = 4
class_names = ['thick cloud', 'thin cloud', 'cloud shadow']

train_data_rate = 0.7

model_name = 'dilated_unet'

loss_func = 'dice'
### 데이터증대
class ImageAug:
    def __init__(self):
        self.aug = A.Compose([A.HorizontalFlip(p=0.5),
                             A.VerticalFlip(p=0.5),
                             A.ShiftScaleRotate(p=0.5),
                             A.RandomBrightnessContrast(p=0.3),
                             A.Normalize(),
                             ToTensorV2()])

    def __call__(self, img, label):
        transformed = self.aug(image=img, mask=label)
        return transformed['image'], transformed['mask']

class DefaultAug:
    def __init__(self):
        self.aug = A.Compose([A.Normalize(),
                             ToTensorV2()])

    def __call__(self, img, label):
        transformed = self.aug(image=img, mask=label)
        return transformed['image'], transformed['mask']
train_transforms = ImageAug()
val_transforms = DefaultAug()
### 데이터셋 정의
#train dataset
train_dataset = CloudDataset(rgb_images[:int(len(rgb_images)*train_data_rate)], label_images[:int(len(label_images)*train_data_rate)],
                            transforms=train_transforms, cache_dir=os.path.join(workspace_path, 'cache'))
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               num_workers=num_workers, pin_memory=True, drop_last=True)

#valid dataset
val_dataset = CloudDataset(rgb_images[int(len(rgb_images)*train_data_rate):], label_images[int(len(label_images)*train_data_rate):],
                            transforms=val_transforms, cache_dir=os.path.join(workspace_path, 'cache'))
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
                                               num_workers=num_workers, pin_memory=True, drop_last=True)
### 모델 정의
def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)
import torch.nn as nn

class DoubleConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(DoubleConvBlock, self).__init__()
        self.block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                                   nn.ReLU(inplace=True),
                                   nn.BatchNorm2d(out_channels),
                                   nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
                                   nn.ReLU(inplace=True),
                                   nn.BatchNorm2d(out_channels))

    def forward(self, x):
        x = self.block(x)
        return x


class DilatedConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, dilation, padding):
        super(DilatedConvBlock, self).__init__()
        self.block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding, dilation=dilation),
                                   nn.ReLU(inplace=True),
                                   nn.BatchNorm2d(out_channels))

    def forward(self, x):
        x = self.block(x)
        return x




class ConcatDoubleConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ConcatDoubleConvBlock, self).__init__()
        self.block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                                   nn.ReLU(inplace=True),
                                   nn.BatchNorm2d(out_channels),
                                   nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
                                   nn.ReLU(inplace=True),
                                   nn.BatchNorm2d(out_channels))

    def forward(self, x, skip):
        x = torch.cat((skip, x), dim=1)
        x = self.block(x)
        return x



class MyDilatedConvUNet(nn.Module):
    def __init__(self, filters=44, depth=3, bottleneck_depth=6):
        super(MyDilatedConvUNet, self).__init__()
        self.depth = depth
        self.encoder_path = nn.ModuleList()
        src_in_channels = 3     # Geo-TIFF has four channels (R, G, B, and NIR)
        for d in range(depth):
            in_channels = src_in_channels if d == 0 else filters * 2 ** (d-1)
            self.encoder_path.append(
                DoubleConvBlock(in_channels, filters * 2 ** d))
        self.maxpool = nn.MaxPool2d(2, 2, padding=0)
        self.bottleneck_path = nn.ModuleList()
        for d in range(bottleneck_depth):
            in_channels = filters * 2 ** (depth - 1) if d == 0 else filters * 2 ** depth
            self.bottleneck_path.append(DilatedConvBlock(in_channels, filters * 2 ** depth, 2 ** d, 2 ** d))
        self.decoder_path = nn.ModuleList()
        for d in range(depth):
            in_channels = filters * 2 ** (depth - d)
            self.decoder_path.append(ConcatDoubleConvBlock(in_channels, filters * 2 ** (depth - d - 1)))
        self.up_path = nn.ModuleList()
        for d in range(depth):
            in_channels = filters * 2 ** (depth - d)
            self.up_path.append(nn.ConvTranspose2d(in_channels, filters * 2 ** (depth - d - 1),
                                                        kernel_size=4, stride=2, padding=1))
        out_channels = 4     # output channels (num_classes + 1(background))
        self.last_conv = nn.Conv2d(filters, out_channels, kernel_size=1)

    def forward(self, x):
        skip = []
        for block in self.encoder_path:
            x = block(x)
            skip.append(x)
            x = self.maxpool(x)
        dilated = []
        for block in self.bottleneck_path:
            x = block(x)
            dilated.append(x)
        x = torch.stack(dilated, dim=-1).sum(dim=-1)  # sum over list

        # up-sampling and double convolutions
        for d in range(self.depth):
            x = self.up_path[d](x)
            x = self.decoder_path[d](x, skip[-(d+1)])

        return self.last_conv(x)
# Model
if model_name == 'deeplabv3':
    model = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=4)
elif model_name == 'dilated_unet':
    model = MyDilatedConvUNet()

model.to(device)

print('number of parameters: ', count_parameters(model))
### Opimizer 정의
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
### 필요 함수 정의
def fitness_test(true, pred, num_classes=4):
    eps = 1e-7
    true_one_hot = F.one_hot(true.squeeze(1), num_classes=num_classes)  # (B, 1, H, W) to (B, H, W, C)
    true_one_hot = true_one_hot.permute(0, 3, 1, 2)  # (B, H, W, C) to (B, C, H, W)
    pred_max = pred.argmax(1)      # (B, C, H, W) to (B, H, W)
    pix_acc = (true == pred_max.unsqueeze(1)).sum().float().div(true.nelement())
    pred_one_hot = F.one_hot(pred_max, num_classes=num_classes)   # (B, H, W) to (B, H, W, C)
    pred_one_hot = pred_one_hot.permute(0, 3, 1, 2)   # (B, H, W, C) to (B, C, H, W)

    true_one_hot = true_one_hot.type(pred_one_hot.type())
    dims = (0,) + tuple(range(2, true.ndimension()))  # dims = (0, 2, 3)
    intersection = torch.sum(pred_one_hot & true_one_hot, dims)
    union = torch.sum(pred_one_hot | true_one_hot, dims)
    m_iou = (intersection / (union + eps)).mean()

    return m_iou.item(), pix_acc.item()
# Loss 함수 정의
def ce_loss(true, logits, ignore=255):
    """Computes the weighted multi-class cross-entropy loss.
    Args:
        true: a tensor of shape [B, 1, H, W].
        logits: a tensor of shape [B, C, H, W]. Corresponds to
            the raw output or logits of the model.
        ignore: the class index to ignore.
    Returns:
        ce_loss: the weighted multi-class cross-entropy loss.
    """
    ce_loss = F.cross_entropy(
        logits.float(),
        true.squeeze(1).long(),    # [B, H, W]
        ignore_index=ignore,
    )
    return ce_loss


def dice_loss(true, logits, eps=1e-7):
    """Computes the Sørensen–Dice loss.
    Note that PyTorch optimizers minimize a loss. In this
    case, we would like to maximize the dice loss so we
    return the negated dice loss.
    Args:
        true: a tensor of shape [B, 1, H, W].
        logits: a tensor of shape [B, C, H, W]. Corresponds to
            the raw output or logits of the model.
        eps: added to the denominator for numerical stability.
    Returns:
        dice_loss: the Sørensen–Dice loss.
    """
    num_classes = logits.shape[1]
    if num_classes == 1:
        true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
        true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
        true_1_hot_f = true_1_hot[:, 0:1, :, :]
        true_1_hot_s = true_1_hot[:, 1:2, :, :]
        true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
        pos_prob = torch.sigmoid(logits)
        neg_prob = 1 - pos_prob
        probas = torch.cat([pos_prob, neg_prob], dim=1)
    else:
        # true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
        true_1_hot = F.one_hot(true.squeeze(1), num_classes=num_classes)   # (B, 1, H, W) to (B, H, W, C)
        true_1_hot = true_1_hot.permute(0, 3, 1, 2)                        # (B, H, W, C) to (B, C, H, W)
        probas = F.softmax(logits, dim=1)
    true_1_hot = true_1_hot.type(logits.type()).contiguous()
    dims = (0,) + tuple(range(2, true.ndimension()))        # dims = (0, 2, 3)
    intersection = torch.sum(probas * true_1_hot, dims)     # intersection w.r.t. the class
    cardinality = torch.sum(probas + true_1_hot, dims)      # cardinality w.r.t. the class
    dice_loss = (2. * intersection / (cardinality + eps)).mean()
    return (1 - dice_loss)


def jaccard_loss(true, logits, eps=1e-7):
    """Computes the Jaccard loss, a.k.a the IoU loss.
    Note that PyTorch optimizers minimize a loss. In this
    case, we would like to maximize the jaccard loss so we
    return the negated jaccard loss.
    Args:
        true: a tensor of shape [B, 1, H, W].
        logits: a tensor of shape [B, C, H, W]. Corresponds to
            the raw output or logits of the model.
        eps: added to the denominator for numerical stability.
    Returns:
        jacc_loss: the Jaccard loss.
    """
    num_classes = logits.shape[1]
    if num_classes == 1:
        true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
        true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
        true_1_hot_f = true_1_hot[:, 0:1, :, :]
        true_1_hot_s = true_1_hot[:, 1:2, :, :]
        true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
        pos_prob = torch.sigmoid(logits)
        neg_prob = 1 - pos_prob
        probas = torch.cat([pos_prob, neg_prob], dim=1)
    else:
        true_1_hot = F.one_hot(true.squeeze(1), num_classes=num_classes)  # (B, 1, H, W) to (B, H, W, C)
        true_1_hot = true_1_hot.permute(0, 3, 1, 2)  # (B, H, W, C) to (B, C, H, W)
        probas = F.softmax(logits, dim=1)
    true_1_hot = true_1_hot.type(logits.type()).contiguous()
    dims = (0,) + tuple(range(2, true.ndimension()))
    intersection = torch.sum(probas * true_1_hot, dims)
    cardinality = torch.sum(probas + true_1_hot, dims)
    union = cardinality - intersection
    jacc_loss = (intersection / (union + eps)).mean()
    return (1 - jacc_loss)
### 학습 함수 정의
def train(model, optimizer, train_dataloader, val_dataloader, loss_func, epochs, device, patch_size=400, use_scheduler=False, save_path='./ckpt'):

    # Learning rate scheduler
    if use_scheduler:
        lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=1)
    else:
        lr_scheduler = None
    start_epoch = 0
    resume = True

    if not os.path.isdir(save_path):
        os.mkdir(save_path)

    weight_file = save_path + '/{}.pt'.format(model_name)

    best_fit = 0.0
    num_epochs = epochs

    if resume:
        if os.path.exists(weight_file):
            checkpoint = torch.load(weight_file)
            model.load_state_dict(checkpoint['model'])
            start_epoch = checkpoint['epoch'] + 1
            best_fit = checkpoint['best_fit']
            print("Starting training for %g epochs..." % start_epoch)

    # Start training

    for epoch in range(start_epoch, num_epochs):
        # loss, metric = train_one_epoch(model, optimizer, dataloader, device, epoch)
        t0 = time.time()
        loss = train_one_epoch(model, optimizer, train_dataloader, loss_func, device, epoch, num_epochs)
        t1 = time.time()
        print('[Epoch %g] loss=%.4f, time=%.1f' % (epoch, loss.item(), t1 - t0))
        if lr_scheduler is not None:
            lr_scheduler.step(loss)
        #tb_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)

        state = {'model_name': model_name, 'epoch': epoch, 'best_fit': best_fit, 'model': model.state_dict()}
        torch.save(state, weight_file)

        #tb_writer.add_scalar('train_epoch_loss', loss, epoch)

        # validation
        patch_size = patch_size
        fit = val_one_epoch(model, val_dataloader, device, epoch, num_epochs, patch_size)
        if fit > best_fit:
            print("best fit so far=>saved")
            torch.save(state, os.path.join(save_path, '/{}_best.pt'.format(model_name)))
            best_fit = fit


def train_one_epoch(model, optimizer, data_loader, loss_func, device, epoch, num_epochs):
    model.train()
    losses = np.array([])
    metrics = np.array([])
    bi0 = epoch * len(data_loader)  # batch index

    print(('\n' + '%10s' * 2) % ('Epoch', 'loss'))
    pbar = tqdm(enumerate(data_loader), total=len(data_loader))
    s = ('%10s' + '%10.4f') % (
        '-/%g' % (num_epochs - 1), 0.0)
    pbar.set_description(s)
    for i, (imgs, targets) in pbar:
        imgs, targets = imgs.to(device), targets.to(device)
        if model_name == 'deeplabv3':
            preds = model(imgs)['out']
            targets = targets.long()
        elif model_name == 'hrnet_w18' or model_name == 'hrnet_w48':
            preds = model(imgs)
            h, w = preds.shape[2], preds.shape[3]
            targets = F.interpolate(targets.float(), size=(h, w), mode='nearest').long()
        elif model_name == 'dilated_unet':
            preds = model(imgs)
            targets = targets.long()
            
        if loss_func == 'jaccard':
            loss = jaccard_loss(targets, preds)
        elif loss_func == 'dice':
            loss = dice_loss(targets, preds)
        elif loss_func == 'ce':
            loss = ce_loss(targets, preds)
        else:
            print('unsupported loss function')
            exit(1)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            # cv2_imshow(imgs[0], preds[0])
            losses = np.append(losses, loss.item())

            s = ('%10s' + '%10.4f') % (
                '%g/%g' % (epoch, num_epochs - 1), loss.item())
            pbar.set_description(s)
            bi = bi0 + i
            #tb_writer.add_scalar('train_batch_loss', loss.item(), bi)

    epoch_loss = losses.mean()

    return epoch_loss


def val_one_epoch(model, data_loader, device, epoch, num_epochs, patch_size):
    model.eval()
    m_iou_list = np.array([])
    pix_acc_list = np.array([])

    print(('\n' + '%10s' * 3) % ('Epoch(V)', 'mIOU', 'Accuracy'))
    pbar = tqdm(enumerate(data_loader), total=len(data_loader))
    s = ('%10s' + '%10.4f' + ' %8.4f') % (
        '-/%g' % (num_epochs - 1), 0.0, 0.0)
    pbar.set_description(s)

    for i, (imgs, targets) in pbar:
        imgs, targets = imgs.to(device), targets.to(device)
        with torch.no_grad():
            if model_name == 'deeplabv3':
                preds = model(imgs)['out']
                targets = targets.long()
            elif model_name == 'hrnet_w18' or model_name == 'hrnet_w48':
                preds = model(imgs)
                h, w = preds.shape[2], preds.shape[3]
                targets = F.interpolate(targets.float(), size=(h, w), mode='nearest').long()
            elif model_name == 'dilated_unet':
                preds = model(imgs)
                targets = targets.long()

            m_iou, pix_acc = fitness_test(targets, preds)

            s = ('%10s' + '%10.4f' + ' %8.4f') % (
                '%g/%g' % (epoch, num_epochs - 1), m_iou, pix_acc)
            pbar.set_description(s)
            m_iou_list = np.append(m_iou_list, m_iou)
            pix_acc_list = np.append(pix_acc_list, pix_acc)
    val_m_iou_mean = m_iou_list.mean()
    val_pix_acc_mean = pix_acc_list.mean()
    print('[V] mIOU={:.3f}, Accuracy={:.3f}'.format(val_m_iou_mean, val_pix_acc_mean))
    #tb_writer.add_scalar('val_epoch_m_iou', val_m_iou_mean, epoch)
    #tb_writer.add_scalar('val_epoch_pix_acc', val_pix_acc_mean, epoch)
    return val_pix_acc_mean

### 학습 시작
train(model, optimizer, train_dataloader, val_dataloader, loss_func, epochs, device, patch_size=patch_size, save_path=os.path.join(workspace_path, '/ckpt'))
### 최고 성능 모델 로드
save_path=os.path.join(workspace_path, 'ckpt')

checkpoint_path = os.path.join(save_path,'{}_best.pt'.format(model_name))
checkpoint = torch.load(checkpoint_path)

model.load_state_dict(checkpoint['model'])
model.to(device)

print('model load success')
### 테스트 데이터셋 정의
test_rgb_path = os.path.join(workspace_path, 'data/test/rgb')
test_rgb_images = os.listdir(test_rgb_path)
test_rgb_images = [os.path.join(test_rgb_path, x) for x in test_rgb_images]
#empty value
test_label_path = os.path.join(workspace_path, 'data/test/label')
try:
    test_label_images = os.listdir(test_label_path)
except:
    test_label_images = []
test_label_images = [os.path.join(test_label_path, x) for x in test_label_images]
test_dataset = CloudDataset(test_rgb_images, test_label_images,
                            transforms=val_transforms, is_train=False)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False,
                                               num_workers=num_workers, pin_memory=True, drop_last=True)
### 테스트 결과 저장
model.eval()

result_path = os.path.join(workspace_path, 'results')
os.makedirs(result_path, exist_ok=True)

with torch.no_grad():
    pbar = tqdm(enumerate(test_dataloader), total=len(test_dataloader))
    for i, (imgs, img_path) in pbar:
        imgs = imgs.to(device)
        if model_name == 'deeplabv3':
            preds = model(imgs)['out']
        #elif model_name == 'hrnet_w18' or model_name == 'hrnet_w48':
        #    preds = model(imgs)
        #    h, w = preds.shape[2], preds.shape[3]
        elif model_name == 'dilated_unet':
            preds = model(imgs)
        
        pred_img = np.zeros((*list(preds.shape[2:]), 3), dtype=np.uint8)
        _, idx = preds.squeeze(0).max(0)
        pos = idx == 0
        pred_img[pos.cpu().numpy()] = [0, 0, 0]
        pos = idx == 1
        pred_img[pos.cpu().numpy()] = [0, 0, 255]
        pos = idx == 2
        pred_img[pos.cpu().numpy()] = [0, 255, 0]
        pos = idx == 3
        pred_img[pos.cpu().numpy()] = [0, 255, 255]
        
        cv2.imwrite(os.path.join(result_path, os.path.basename(img_path[0])), pred_img)

### Run-Length Encoding
import pandas as pd
def mask2rle(img):
    '''
    img: numpy array, 1 - mask, 0 - background
    Returns run length as string formatted
    '''
    pixels= img.T.flatten()
    pixels = np.concatenate([[0], pixels, [0]])
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    runs[1::2] -= runs[::2]
    return ' '.join(str(x) for x in runs)
test_label_file_list = os.listdir(result_path)
test_label_path_list = [os.path.join(result_path, x) for x in test_label_file_list]
rle_list = []
for file_path in test_label_path_list:
    img = cv2.imread(file_path)
    rle = mask2rle(img)
    rle_list.append(rle)
my_dict = {'Image_Label':test_label_file_list, 'EncodedPixels':rle_list}
my_df = pd.DataFrame(my_dict)
my_df.to_csv(os.path.join(workspace_path, 'submission.csv'), index=False)