Loading
Feedback

AI Blitz #7

Solution for Rover Classification

Solution for Rover Classification

By derinformatiker


In [2]:
!git clone https://github.com/derInformatiker/AIcrowd-AIBlitz7-Solution.git
!pip install -r AIcrowd-AIBlitz7-Solution/challenge1/requirements.txt
!pip install aicrowd-cli==0.1
fatal: destination path 'AIcrowd-AIBlitz7-Solution' already exists and is not an empty directory.
     |████████████████████████████████| 10.1MB 5.2MB/s 
     |████████████████████████████████| 28.2MB 108kB/s 
     |████████████████████████████████| 829kB 54.0MB/s 
     |████████████████████████████████| 122kB 58.1MB/s 
     |████████████████████████████████| 112kB 59.1MB/s 
     |████████████████████████████████| 276kB 59.0MB/s 
     |████████████████████████████████| 829kB 48.3MB/s 
     |████████████████████████████████| 952kB 52.8MB/s 
     |████████████████████████████████| 1.3MB 54.7MB/s 
     |████████████████████████████████| 296kB 56.1MB/s 
     |████████████████████████████████| 143kB 57.6MB/s 
  Building wheel for efficientnet-pytorch (setup.py) ... done
  Building wheel for albumentations (setup.py) ... done
  Building wheel for PyYAML (setup.py) ... done
  Building wheel for future (setup.py) ... done
ERROR: google-colab 1.0.0 has requirement pandas~=1.1.0; python_version >= "3.0", but you'll have pandas 1.0.5 which is incompatible.
In [ ]:
#RESTART RUNTIME TO USE NEW PACKAGES
In [3]:
API_KEY = ""  # Please enter your API Key from [https://www.aicrowd.com/participants/me]
!aicrowd login --api-key $API_KEY
API Key valid
Saved API Key successfully!
In [4]:
!aicrowd dataset download --challenge rover-classification

!rm -rf data
!mkdir data

!unzip -q train.zip  -d data/train
!unzip -q val.zip -d data/val
!unzip -q test.zip  -d data/test

!mv train.csv data/train.csv
!mv val.csv data/val.csv
!mv sample_submission.csv data/sample_submission.csv
sample_submission.csv: 100% 164k/164k [00:00<00:00, 1.30MB/s]
test.zip: 100% 66.5M/66.5M [00:03<00:00, 20.8MB/s]
train.csv: 100% 689k/689k [00:00<00:00, 3.15MB/s]
train.zip: 100% 266M/266M [00:10<00:00, 25.9MB/s]
val.csv: 100% 65.0k/65.0k [00:00<00:00, 855kB/s]
val.zip: 100% 26.5M/26.5M [00:01<00:00, 23.0MB/s]
In [5]:
import shutil

shutil.copy('AIcrowd-AIBlitz7-Solution/challenge1/model.py','model.py')
shutil.copy('AIcrowd-AIBlitz7-Solution/challenge1/dataset.py','dataset.py')
Out[5]:
'dataset.py'
In [1]:
import model
import dataset

import albumentations as A
from albumentations.augmentations.transforms import Flip

import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer
In [2]:
if __name__ == '__main__':
    trainer = Trainer(max_epochs = 6, gpus = 1, precision=16, amp_level='O1',deterministic=True)
    
    train_tr = A.Compose([
        A.CenterCrop(200,200,always_apply=True),
        Flip()
    ])
    
    val_tr = A.Compose([
        A.CenterCrop(200,200,always_apply=True)
    ])
    
    model = model.Classifier({'lr':3e-4,'batch_size':64,'train_tr':train_tr,'val_tr':val_tr})
    
    trainer.fit(model)
    trainer.test(model)
    out = trainer.predict(model)
GPU available: True, used: True
TPU available: None, using: 0 TPU cores
Using native 16bit precision.
Loaded pretrained weights for efficientnet-b3
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 6 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.
  cpuset_checked))
/content/model.py:23: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
  prob = F.softmax(x)
--------------------------------------------------------------------------------
DATALOADER:0 TEST RESULTS
{'test_f1': 0.48106613755226135, 'test_loss': 0.7106836438179016}
--------------------------------------------------------------------------------

In [3]:
import pandas as pd
def writeSub(p):
    labelmap = {0:'perseverance',1:'curiosity'}
    test_df = pd.read_csv('data/sample_submission.csv')
    output_list = p.int().tolist()
    output_list = [labelmap[i] for i in output_list]
    test_df['label'] = output_list
    test_df.to_csv(path_or_buf='data/submission.csv',index = False)
In [4]:
output = torch.tensor([])

for i in range(len(out)):
    output = torch.cat((output,torch.tensor(out[i][1]).argmax(1)))
In [6]:
writeSub(output)
In [ ]:

↕️  Read More


Comments

You must login before you can post a comment.