Loading

Spotify Million Playlist Dataset Challenge

test_1_qxy

test_1_qxy

qianxinyue

test_1_qxy

In [1]:
import pandas as pd
import os
In [2]:
#读取csv文件
#TRAIN = pd.read_csv('./train_label.csv') 
train_logo_path=r'C:/Users/test/Desktop/logo识别练习赛/data/train/logo'
train_without_logo_path=r'C:/Users/test/Desktop/logo识别练习赛/data/train/without_logo'
train_logo_img = os.listdir(train_logo_path)   #解析出父文件夹中所有的文件名称,并以列表的格式输出,

l1=len(train_logo_img)
print(l1)
test = pd.read_csv(r'C:/Users/test/Desktop/logo识别练习赛/data/提交示例文件.csv',header=None,encoding='gbk') 
train_without_logo_img= os.listdir(train_without_logo_path)   #解析出父文件夹中所有的文件名称,并以列表的格式输出,
l2=len(train_without_logo_img)
print(l2)
l=l1+l2
print(f"共有 {l} 张训练样本")
print(f"共有 {len(test)} 张测试样本") 
test.head()
269
519
共有 788 张训练样本
共有 300 张测试样本
Out[2]:
0
0 001.jpg
1 002.jpg
2 003.jpg
3 004.jpg
4 005.jpg
In [3]:
!pip install cnn_finetune
Requirement already satisfied: cnn_finetune in d:\anoconda3.7\lib\site-packages (0.6.0)
Requirement already satisfied: torchvision>=0.3.0 in d:\anoconda3.7\lib\site-packages (from cnn_finetune) (0.10.0)
Requirement already satisfied: pretrainedmodels>=0.7.4 in d:\anoconda3.7\lib\site-packages (from cnn_finetune) (0.7.4)
Requirement already satisfied: torch in d:\anoconda3.7\lib\site-packages (from cnn_finetune) (1.9.0)
Requirement already satisfied: tqdm in d:\anoconda3.7\lib\site-packages (from cnn_finetune) (4.64.0)
Requirement already satisfied: scipy in d:\anoconda3.7\lib\site-packages (from cnn_finetune) (1.3.1)
Requirement already satisfied: munch in d:\anoconda3.7\lib\site-packages (from pretrainedmodels>=0.7.4->cnn_finetune) (2.5.0)
Requirement already satisfied: pillow>=5.3.0 in d:\anoconda3.7\lib\site-packages (from torchvision>=0.3.0->cnn_finetune) (9.1.0)
Requirement already satisfied: numpy in d:\anoconda3.7\lib\site-packages (from torchvision>=0.3.0->cnn_finetune) (1.21.5)
Requirement already satisfied: typing-extensions in d:\anoconda3.7\lib\site-packages (from torch->cnn_finetune) (3.7.4.3)
Requirement already satisfied: colorama in d:\anoconda3.7\lib\site-packages (from tqdm->cnn_finetune) (0.4.1)
Requirement already satisfied: six in d:\anoconda3.7\lib\site-packages (from munch->pretrainedmodels>=0.7.4->cnn_finetune) (1.15.0)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -ywin32 (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -umpy (d:\anoconda3.7\lib\site-packages)
WARNING: Ignoring invalid distribution -cipy (d:\anoconda3.7\lib\site-packages)
WARNING: There was an error checking the latest version of pip.
In [4]:
import os
import shutil
import glob
from PIL import Image
import pandas as pd
In [5]:
#目标文件夹
determination = r'D:/logo_data/train_new2'
if not os.path.exists(determination):
    os.makedirs(determination)
dirs_without_logo=os.listdir(train_without_logo_path)  #列表 得到图片名
print(dirs_without_logo)
dirs_logo=os.listdir(train_logo_path)  #列表 得到图片名
print('dirs_logo:')
print(dirs_logo)

data_list1=[]
for i in range (1,l+1):
    data_list1.append(str(i).zfill(3)+'.jpg')
print('data_list1:')
print(data_list1)
data_list2=[]
count1=0
count2=0
for i in range (1,l1+1):
    data_list2.append(1)
    count1+=1
for j in range (1,l2+1):
    data_list2.append(0)
    count2+=1
print('data_list2:')
print(data_list2)   
print('len(data_list2):')
print(len(data_list2))
print(count1)
print(count2)
['001.jpg', '002.jpg', '003.jpg', '004.jpg', '005.jpg', '006.jpg', '007.jpg', '008.jpg', '009.jpg', '010.jpg', '011.jpg', '012.jpg', '013.jpg', '014.jpg', '015.jpg', '016.jpg', '017.jpg', '018.jpg', '019.jpg', '020.jpg', '021.jpg', '022.jpg', '023.jpg', '024.jpg', '025.jpg', '026.jpg', '027.jpg', '028.jpg', '029.jpg', '030.jpg', '031.jpg', '032.jpg', '033.jpg', '034.jpg', '035.jpg', '036.jpg', '037.jpg', '038.jpg', '039.jpg', '040.jpg', '041.jpg', '042.jpg', '043.jpg', '044.jpg', '045.jpg', '046.jpg', '047.jpg', '048.jpg', '049.jpg', '050.jpg', '051.jpg', '052.jpg', '053.jpg', '054.jpg', '055.jpg', '056.jpg', '057.jpg', '058.jpg', '059.jpg', '060.jpg', '061.jpg', '062.jpg', '063.jpg', '064.jpg', '065.jpg', '066.jpg', '067.jpg', '068.jpg', '069.jpg', '070.jpg', '071.jpg', '072.jpg', '073.jpg', '074.jpg', '075.jpg', '076.jpg', '077.jpg', '078.jpg', '079.jpg', '080.jpg', '081.jpg', '082.jpg', '083.jpg', '084.jpg', '085.jpg', '086.jpg', '087.jpg', '088.jpg', '089.jpg', '090.jpg', '091.jpg', '092.jpg', '093.jpg', '094.jpg', '095.jpg', '096.jpg', '097.jpg', '098.jpg', '099.jpg', '100.jpg', '101.jpg', '102.jpg', '103.jpg', '104.jpg', '105.jpg', '106.jpg', '107.jpg', '108.jpg', '109.jpg', '110.jpg', '111.jpg', '112.jpg', '113.jpg', '114.jpg', '115.jpg', '116.jpg', '117.jpg', '118.jpg', '119.jpg', '120.jpg', '121.jpg', '122.jpg', '123.jpg', '124.jpg', '125.jpg', '126.jpg', '127.jpg', '128.jpg', '129.jpg', '130.jpg', '131.jpg', '132.jpg', '133.jpg', '134.jpg', '135.jpg', '136.jpg', '137.jpg', '138.jpg', '139.jpg', '140.jpg', '141.jpg', '142.jpg', '143.jpg', '144.jpg', '145.jpg', '146.jpg', '147.jpg', '148.jpg', '149.jpg', '150.jpg', '151.jpg', '152.jpg', '153.jpg', '154.jpg', '155.jpg', '156.jpg', '157.jpg', '158.jpg', '159.jpg', '160.jpg', '161.jpg', '162.jpg', '163.jpg', '164.jpg', '165.jpg', '166.jpg', '167.jpg', '168.jpg', '169.jpg', '170.jpg', '171.jpg', '172.jpg', '173.jpg', '174.jpg', '175.jpg', '176.jpg', '177.jpg', '178.jpg', '179.jpg', '180.jpg', '181.jpg', '182.jpg', '183.jpg', '184.jpg', '185.jpg', '186.jpg', '187.jpg', '188.jpg', '189.jpg', '190.jpg', '191.jpg', '192.jpg', '193.jpg', '194.jpg', '195.jpg', '196.jpg', '197.jpg', '198.jpg', '199.jpg', '200.jpg', '201.jpg', '202.jpg', '203.jpg', '204.jpg', '205.jpg', '206.jpg', '207.jpg', '208.jpg', '209.jpg', '210.jpg', '211.jpg', '212.jpg', '213.jpg', '214.jpg', '215.jpg', '216.jpg', '217.jpg', '218.jpg', '219.jpg', '220.jpg', '221.jpg', '222.jpg', '223.jpg', '224.jpg', '225.jpg', '226.jpg', '227.jpg', '228.jpg', '229.jpg', '230.jpg', '231.jpg', '232.jpg', '233.jpg', '234.jpg', '235.jpg', '236.jpg', '237.jpg', '238.jpg', '239.jpg', '240.jpg', '241.jpg', '242.jpg', '243.jpg', '244.jpg', '245.jpg', '246.jpg', '247.jpg', '248.jpg', '249.jpg', '250.jpg', '251.jpg', '252.jpg', '253.jpg', '254.jpg', '255.jpg', '256.jpg', '257.jpg', '258.jpg', '259.jpg', '260.jpg', '261.jpg', '262.jpg', '263.jpg', '264.jpg', '265.jpg', '266.jpg', '267.jpg', '268.jpg', '269.jpg', '270.jpg', '271.jpg', '272.jpg', '273.jpg', '274.jpg', '275.jpg', '276.jpg', '277.jpg', '278.jpg', '279.jpg', '280.jpg', '281.jpg', '282.jpg', '283.jpg', '284.jpg', '285.jpg', '286.jpg', '287.jpg', '288.jpg', '289.jpg', '290.jpg', '291.jpg', '292.jpg', '293.jpg', '294.jpg', '295.jpg', '296.jpg', '297.jpg', '298.jpg', '299.jpg', '300.jpg', '301.jpg', '302.jpg', '303.jpg', '304.jpg', '305.jpg', '306.jpg', '307.jpg', '308.jpg', '309.jpg', '310.jpg', '311.jpg', '312.jpg', '313.jpg', '314.jpg', '315.jpg', '316.jpg', '317.jpg', '318.jpg', '319.jpg', '320.jpg', '321.jpg', '322.jpg', '323.jpg', '324.jpg', '325.jpg', '326.jpg', '327.jpg', '328.jpg', '329.jpg', '330.jpg', '331.jpg', '332.jpg', '333.jpg', '334.jpg', '335.jpg', '336.jpg', '337.jpg', '338.jpg', '339.jpg', '340.jpg', '341.jpg', '342.jpg', '343.jpg', '344.jpg', '345.jpg', '346.jpg', '347.jpg', '348.jpg', '349.jpg', '350.jpg', '351.jpg', '352.jpg', '353.jpg', '354.jpg', '355.jpg', '356.jpg', '357.jpg', '358.jpg', '359.jpg', '360.jpg', '361.jpg', '362.jpg', '363.jpg', '364.jpg', '365.jpg', '366.jpg', '367.jpg', '368.jpg', '369.jpg', '370.jpg', '371.jpg', '372.jpg', '373.jpg', '374.jpg', '375.jpg', '376.jpg', '377.jpg', '378.jpg', '379.jpg', '380.jpg', '381.jpg', '382.jpg', '383.jpg', '384.jpg', '385.jpg', '386.jpg', '387.jpg', '388.jpg', '389.jpg', '390.jpg', '391.jpg', '392.jpg', '393.jpg', '394.jpg', '395.jpg', '396.jpg', '397.jpg', '398.jpg', '399.jpg', '400.jpg', '401.jpg', '402.jpg', '403.jpg', '404.jpg', '405.jpg', '406.jpg', '407.jpg', '408.jpg', '409.jpg', '410.jpg', '411.jpg', '412.jpg', '413.jpg', '414.jpg', '415.jpg', '416.jpg', '417.jpg', '418.jpg', '419.jpg', '420.jpg', '421.jpg', '422.jpg', '423.jpg', '424.jpg', '425.jpg', '426.jpg', '427.jpg', '428.jpg', '429.jpg', '430.jpg', '431.jpg', '432.jpg', '433.jpg', '434.jpg', '435.jpg', '436.jpg', '437.jpg', '438.jpg', '439.jpg', '440.jpg', '441.jpg', '442.jpg', '443.jpg', '444.jpg', '445.jpg', '446.jpg', '447.jpg', '448.jpg', '449.jpg', '450.jpg', '451.jpg', '452.jpg', '453.jpg', '454.jpg', '455.jpg', '456.jpg', '457.jpg', '458.jpg', '459.jpg', '460.jpg', '461.jpg', '462.jpg', '463.jpg', '464.jpg', '465.jpg', '466.jpg', '467.jpg', '468.jpg', '469.jpg', '470.jpg', '471.jpg', '472.jpg', '473.jpg', '474.jpg', '475.jpg', '476.jpg', '477.jpg', '478.jpg', '479.jpg', '480.jpg', '481.jpg', '482.jpg', '483.jpg', '484.jpg', '485.jpg', '486.jpg', '487.jpg', '488.jpg', '489.jpg', '490.jpg', '491.jpg', '492.jpg', '493.jpg', '494.jpg', '495.jpg', '496.jpg', '497.jpg', '498.jpg', '499.jpg', '500.jpg', '501.jpg', '502.jpg', '503.jpg', '504.jpg', '505.jpg', '506.jpg', '507.jpg', '508.jpg', '509.jpg', '510.jpg', '511.jpg', '512.jpg', '513.jpg', '514.jpg', '515.jpg', '516.jpg', '517.jpg', '518.jpg', '519.jpg']
dirs_logo:
['001.jpg', '002.jpg', '003.jpg', '004.jpg', '005.jpg', '006.jpg', '007.jpg', '008.jpg', '009.jpg', '010.jpg', '011.jpg', '012.jpg', '013.jpg', '014.jpg', '015.jpg', '016.jpg', '017.jpg', '018.jpg', '019.jpg', '020.jpg', '021.jpg', '022.jpg', '023.jpg', '024.jpg', '025.jpg', '026.jpg', '027.jpg', '028.jpg', '029.jpg', '030.jpg', '031.jpg', '032.jpg', '033.jpg', '034.jpg', '035.jpg', '036.jpg', '037.jpg', '038.jpg', '039.jpg', '040.jpg', '041.jpg', '042.jpg', '043.jpg', '044.jpg', '045.jpg', '046.jpg', '047.jpg', '048.jpg', '049.jpg', '050.jpg', '051.jpg', '052.jpg', '053.jpg', '054.jpg', '055.jpg', '056.jpg', '057.jpg', '058.jpg', '059.jpg', '060.jpg', '061.jpg', '062.jpg', '063.jpg', '064.jpg', '065.jpg', '066.jpg', '067.jpg', '068.jpg', '069.jpg', '070.jpg', '071.jpg', '072.jpg', '073.jpg', '074.jpg', '075.jpg', '076.jpg', '077.jpg', '078.jpg', '079.jpg', '080.jpg', '081.jpg', '082.jpg', '083.jpg', '084.jpg', '085.jpg', '086.jpg', '087.jpg', '088.jpg', '089.jpg', '090.jpg', '091.jpg', '092.jpg', '093.jpg', '094.jpg', '095.jpg', '096.jpg', '097.jpg', '098.jpg', '099.jpg', '100.jpg', '101.jpg', '102.jpg', '103.jpg', '104.jpg', '105.jpg', '106.jpg', '107.jpg', '108.jpg', '109.jpg', '110.jpg', '111.jpg', '112.jpg', '113.jpg', '114.jpg', '115.jpg', '116.jpg', '117.jpg', '118.jpg', '119.jpg', '120.jpg', '121.jpg', '122.jpg', '123.jpg', '124.jpg', '125.jpg', '126.jpg', '127.jpg', '128.jpg', '129.jpg', '130.jpg', '131.jpg', '132.jpg', '133.jpg', '134.jpg', '135.jpg', '136.jpg', '137.jpg', '138.jpg', '139.jpg', '140.jpg', '141.jpg', '142.jpg', '143.jpg', '144.jpg', '145.jpg', '146.jpg', '147.jpg', '148.jpg', '149.jpg', '150.jpg', '151.jpg', '152.jpg', '153.jpg', '154.jpg', '155.jpg', '156.jpg', '157.jpg', '158.jpg', '159.jpg', '160.jpg', '161.jpg', '162.jpg', '163.jpg', '164.jpg', '165.jpg', '166.jpg', '167.jpg', '168.jpg', '169.jpg', '170.jpg', '171.jpg', '172.jpg', '173.jpg', '174.jpg', '175.jpg', '176.jpg', '177.jpg', '178.jpg', '179.jpg', '180.jpg', '181.jpg', '182.jpg', '183.jpg', '184.jpg', '185.jpg', '186.jpg', '187.jpg', '188.jpg', '189.jpg', '190.jpg', '191.jpg', '192.jpg', '193.jpg', '194.jpg', '195.jpg', '196.jpg', '197.jpg', '198.jpg', '199.jpg', '200.jpg', '201.jpg', '202.jpg', '203.jpg', '204.jpg', '205.jpg', '206.jpg', '207.jpg', '208.jpg', '209.jpg', '210.jpg', '211.jpg', '212.jpg', '213.jpg', '214.jpg', '215.jpg', '216.jpg', '217.jpg', '218.jpg', '219.jpg', '220.jpg', '221.jpg', '222.jpg', '223.jpg', '224.jpg', '225.jpg', '226.jpg', '227.jpg', '228.jpg', '229.jpg', '230.jpg', '231.jpg', '232.jpg', '233.jpg', '234.jpg', '235.jpg', '236.jpg', '237.jpg', '238.jpg', '239.jpg', '240.jpg', '241.jpg', '242.jpg', '243.jpg', '244.jpg', '245.jpg', '246.jpg', '247.jpg', '248.jpg', '249.jpg', '250.jpg', '251.jpg', '252.jpg', '253.jpg', '254.jpg', '255.jpg', '256.jpg', '257.jpg', '258.jpg', '259.jpg', '260.jpg', '261.jpg', '262.jpg', '263.jpg', '264.jpg', '265.jpg', '266.jpg', '267.jpg', '268.jpg', '269.jpg']
data_list1:
['001.jpg', '002.jpg', '003.jpg', '004.jpg', '005.jpg', '006.jpg', '007.jpg', '008.jpg', '009.jpg', '010.jpg', '011.jpg', '012.jpg', '013.jpg', '014.jpg', '015.jpg', '016.jpg', '017.jpg', '018.jpg', '019.jpg', '020.jpg', '021.jpg', '022.jpg', '023.jpg', '024.jpg', '025.jpg', '026.jpg', '027.jpg', '028.jpg', '029.jpg', '030.jpg', '031.jpg', '032.jpg', '033.jpg', '034.jpg', '035.jpg', '036.jpg', '037.jpg', '038.jpg', '039.jpg', '040.jpg', '041.jpg', '042.jpg', '043.jpg', '044.jpg', '045.jpg', '046.jpg', '047.jpg', '048.jpg', '049.jpg', '050.jpg', '051.jpg', '052.jpg', '053.jpg', '054.jpg', '055.jpg', '056.jpg', '057.jpg', '058.jpg', '059.jpg', '060.jpg', '061.jpg', '062.jpg', '063.jpg', '064.jpg', '065.jpg', '066.jpg', '067.jpg', '068.jpg', '069.jpg', '070.jpg', '071.jpg', '072.jpg', '073.jpg', '074.jpg', '075.jpg', '076.jpg', '077.jpg', '078.jpg', '079.jpg', '080.jpg', '081.jpg', '082.jpg', '083.jpg', '084.jpg', '085.jpg', '086.jpg', '087.jpg', '088.jpg', '089.jpg', '090.jpg', '091.jpg', '092.jpg', '093.jpg', '094.jpg', '095.jpg', '096.jpg', '097.jpg', '098.jpg', '099.jpg', '100.jpg', '101.jpg', '102.jpg', '103.jpg', '104.jpg', '105.jpg', '106.jpg', '107.jpg', '108.jpg', '109.jpg', '110.jpg', '111.jpg', '112.jpg', '113.jpg', '114.jpg', '115.jpg', '116.jpg', '117.jpg', '118.jpg', '119.jpg', '120.jpg', '121.jpg', '122.jpg', '123.jpg', '124.jpg', '125.jpg', '126.jpg', '127.jpg', '128.jpg', '129.jpg', '130.jpg', '131.jpg', '132.jpg', '133.jpg', '134.jpg', '135.jpg', '136.jpg', '137.jpg', '138.jpg', '139.jpg', '140.jpg', '141.jpg', '142.jpg', '143.jpg', '144.jpg', '145.jpg', '146.jpg', '147.jpg', '148.jpg', '149.jpg', '150.jpg', '151.jpg', '152.jpg', '153.jpg', '154.jpg', '155.jpg', '156.jpg', '157.jpg', '158.jpg', '159.jpg', '160.jpg', '161.jpg', '162.jpg', '163.jpg', '164.jpg', '165.jpg', '166.jpg', '167.jpg', '168.jpg', '169.jpg', '170.jpg', '171.jpg', '172.jpg', '173.jpg', '174.jpg', '175.jpg', '176.jpg', '177.jpg', '178.jpg', '179.jpg', '180.jpg', '181.jpg', '182.jpg', '183.jpg', '184.jpg', '185.jpg', '186.jpg', '187.jpg', '188.jpg', '189.jpg', '190.jpg', '191.jpg', '192.jpg', '193.jpg', '194.jpg', '195.jpg', '196.jpg', '197.jpg', '198.jpg', '199.jpg', '200.jpg', '201.jpg', '202.jpg', '203.jpg', '204.jpg', '205.jpg', '206.jpg', '207.jpg', '208.jpg', '209.jpg', '210.jpg', '211.jpg', '212.jpg', '213.jpg', '214.jpg', '215.jpg', '216.jpg', '217.jpg', '218.jpg', '219.jpg', '220.jpg', '221.jpg', '222.jpg', '223.jpg', '224.jpg', '225.jpg', '226.jpg', '227.jpg', '228.jpg', '229.jpg', '230.jpg', '231.jpg', '232.jpg', '233.jpg', '234.jpg', '235.jpg', '236.jpg', '237.jpg', '238.jpg', '239.jpg', '240.jpg', '241.jpg', '242.jpg', '243.jpg', '244.jpg', '245.jpg', '246.jpg', '247.jpg', '248.jpg', '249.jpg', '250.jpg', '251.jpg', '252.jpg', '253.jpg', '254.jpg', '255.jpg', '256.jpg', '257.jpg', '258.jpg', '259.jpg', '260.jpg', '261.jpg', '262.jpg', '263.jpg', '264.jpg', '265.jpg', '266.jpg', '267.jpg', '268.jpg', '269.jpg', '270.jpg', '271.jpg', '272.jpg', '273.jpg', '274.jpg', '275.jpg', '276.jpg', '277.jpg', '278.jpg', '279.jpg', '280.jpg', '281.jpg', '282.jpg', '283.jpg', '284.jpg', '285.jpg', '286.jpg', '287.jpg', '288.jpg', '289.jpg', '290.jpg', '291.jpg', '292.jpg', '293.jpg', '294.jpg', '295.jpg', '296.jpg', '297.jpg', '298.jpg', '299.jpg', '300.jpg', '301.jpg', '302.jpg', '303.jpg', '304.jpg', '305.jpg', '306.jpg', '307.jpg', '308.jpg', '309.jpg', '310.jpg', '311.jpg', '312.jpg', '313.jpg', '314.jpg', '315.jpg', '316.jpg', '317.jpg', '318.jpg', '319.jpg', '320.jpg', '321.jpg', '322.jpg', '323.jpg', '324.jpg', '325.jpg', '326.jpg', '327.jpg', '328.jpg', '329.jpg', '330.jpg', '331.jpg', '332.jpg', '333.jpg', '334.jpg', '335.jpg', '336.jpg', '337.jpg', '338.jpg', '339.jpg', '340.jpg', '341.jpg', '342.jpg', '343.jpg', '344.jpg', '345.jpg', '346.jpg', '347.jpg', '348.jpg', '349.jpg', '350.jpg', '351.jpg', '352.jpg', '353.jpg', '354.jpg', '355.jpg', '356.jpg', '357.jpg', '358.jpg', '359.jpg', '360.jpg', '361.jpg', '362.jpg', '363.jpg', '364.jpg', '365.jpg', '366.jpg', '367.jpg', '368.jpg', '369.jpg', '370.jpg', '371.jpg', '372.jpg', '373.jpg', '374.jpg', '375.jpg', '376.jpg', '377.jpg', '378.jpg', '379.jpg', '380.jpg', '381.jpg', '382.jpg', '383.jpg', '384.jpg', '385.jpg', '386.jpg', '387.jpg', '388.jpg', '389.jpg', '390.jpg', '391.jpg', '392.jpg', '393.jpg', '394.jpg', '395.jpg', '396.jpg', '397.jpg', '398.jpg', '399.jpg', '400.jpg', '401.jpg', '402.jpg', '403.jpg', '404.jpg', '405.jpg', '406.jpg', '407.jpg', '408.jpg', '409.jpg', '410.jpg', '411.jpg', '412.jpg', '413.jpg', '414.jpg', '415.jpg', '416.jpg', '417.jpg', '418.jpg', '419.jpg', '420.jpg', '421.jpg', '422.jpg', '423.jpg', '424.jpg', '425.jpg', '426.jpg', '427.jpg', '428.jpg', '429.jpg', '430.jpg', '431.jpg', '432.jpg', '433.jpg', '434.jpg', '435.jpg', '436.jpg', '437.jpg', '438.jpg', '439.jpg', '440.jpg', '441.jpg', '442.jpg', '443.jpg', '444.jpg', '445.jpg', '446.jpg', '447.jpg', '448.jpg', '449.jpg', '450.jpg', '451.jpg', '452.jpg', '453.jpg', '454.jpg', '455.jpg', '456.jpg', '457.jpg', '458.jpg', '459.jpg', '460.jpg', '461.jpg', '462.jpg', '463.jpg', '464.jpg', '465.jpg', '466.jpg', '467.jpg', '468.jpg', '469.jpg', '470.jpg', '471.jpg', '472.jpg', '473.jpg', '474.jpg', '475.jpg', '476.jpg', '477.jpg', '478.jpg', '479.jpg', '480.jpg', '481.jpg', '482.jpg', '483.jpg', '484.jpg', '485.jpg', '486.jpg', '487.jpg', '488.jpg', '489.jpg', '490.jpg', '491.jpg', '492.jpg', '493.jpg', '494.jpg', '495.jpg', '496.jpg', '497.jpg', '498.jpg', '499.jpg', '500.jpg', '501.jpg', '502.jpg', '503.jpg', '504.jpg', '505.jpg', '506.jpg', '507.jpg', '508.jpg', '509.jpg', '510.jpg', '511.jpg', '512.jpg', '513.jpg', '514.jpg', '515.jpg', '516.jpg', '517.jpg', '518.jpg', '519.jpg', '520.jpg', '521.jpg', '522.jpg', '523.jpg', '524.jpg', '525.jpg', '526.jpg', '527.jpg', '528.jpg', '529.jpg', '530.jpg', '531.jpg', '532.jpg', '533.jpg', '534.jpg', '535.jpg', '536.jpg', '537.jpg', '538.jpg', '539.jpg', '540.jpg', '541.jpg', '542.jpg', '543.jpg', '544.jpg', '545.jpg', '546.jpg', '547.jpg', '548.jpg', '549.jpg', '550.jpg', '551.jpg', '552.jpg', '553.jpg', '554.jpg', '555.jpg', '556.jpg', '557.jpg', '558.jpg', '559.jpg', '560.jpg', '561.jpg', '562.jpg', '563.jpg', '564.jpg', '565.jpg', '566.jpg', '567.jpg', '568.jpg', '569.jpg', '570.jpg', '571.jpg', '572.jpg', '573.jpg', '574.jpg', '575.jpg', '576.jpg', '577.jpg', '578.jpg', '579.jpg', '580.jpg', '581.jpg', '582.jpg', '583.jpg', '584.jpg', '585.jpg', '586.jpg', '587.jpg', '588.jpg', '589.jpg', '590.jpg', '591.jpg', '592.jpg', '593.jpg', '594.jpg', '595.jpg', '596.jpg', '597.jpg', '598.jpg', '599.jpg', '600.jpg', '601.jpg', '602.jpg', '603.jpg', '604.jpg', '605.jpg', '606.jpg', '607.jpg', '608.jpg', '609.jpg', '610.jpg', '611.jpg', '612.jpg', '613.jpg', '614.jpg', '615.jpg', '616.jpg', '617.jpg', '618.jpg', '619.jpg', '620.jpg', '621.jpg', '622.jpg', '623.jpg', '624.jpg', '625.jpg', '626.jpg', '627.jpg', '628.jpg', '629.jpg', '630.jpg', '631.jpg', '632.jpg', '633.jpg', '634.jpg', '635.jpg', '636.jpg', '637.jpg', '638.jpg', '639.jpg', '640.jpg', '641.jpg', '642.jpg', '643.jpg', '644.jpg', '645.jpg', '646.jpg', '647.jpg', '648.jpg', '649.jpg', '650.jpg', '651.jpg', '652.jpg', '653.jpg', '654.jpg', '655.jpg', '656.jpg', '657.jpg', '658.jpg', '659.jpg', '660.jpg', '661.jpg', '662.jpg', '663.jpg', '664.jpg', '665.jpg', '666.jpg', '667.jpg', '668.jpg', '669.jpg', '670.jpg', '671.jpg', '672.jpg', '673.jpg', '674.jpg', '675.jpg', '676.jpg', '677.jpg', '678.jpg', '679.jpg', '680.jpg', '681.jpg', '682.jpg', '683.jpg', '684.jpg', '685.jpg', '686.jpg', '687.jpg', '688.jpg', '689.jpg', '690.jpg', '691.jpg', '692.jpg', '693.jpg', '694.jpg', '695.jpg', '696.jpg', '697.jpg', '698.jpg', '699.jpg', '700.jpg', '701.jpg', '702.jpg', '703.jpg', '704.jpg', '705.jpg', '706.jpg', '707.jpg', '708.jpg', '709.jpg', '710.jpg', '711.jpg', '712.jpg', '713.jpg', '714.jpg', '715.jpg', '716.jpg', '717.jpg', '718.jpg', '719.jpg', '720.jpg', '721.jpg', '722.jpg', '723.jpg', '724.jpg', '725.jpg', '726.jpg', '727.jpg', '728.jpg', '729.jpg', '730.jpg', '731.jpg', '732.jpg', '733.jpg', '734.jpg', '735.jpg', '736.jpg', '737.jpg', '738.jpg', '739.jpg', '740.jpg', '741.jpg', '742.jpg', '743.jpg', '744.jpg', '745.jpg', '746.jpg', '747.jpg', '748.jpg', '749.jpg', '750.jpg', '751.jpg', '752.jpg', '753.jpg', '754.jpg', '755.jpg', '756.jpg', '757.jpg', '758.jpg', '759.jpg', '760.jpg', '761.jpg', '762.jpg', '763.jpg', '764.jpg', '765.jpg', '766.jpg', '767.jpg', '768.jpg', '769.jpg', '770.jpg', '771.jpg', '772.jpg', '773.jpg', '774.jpg', '775.jpg', '776.jpg', '777.jpg', '778.jpg', '779.jpg', '780.jpg', '781.jpg', '782.jpg', '783.jpg', '784.jpg', '785.jpg', '786.jpg', '787.jpg', '788.jpg']
data_list2:
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
len(data_list2):
788
269
519
In [6]:
data = {'img_id': pd.Series([ str(i).zfill(3) for i in data_list1]),
        'label': pd.Series([ str(j).zfill(3) for j in data_list2])}
train_label=pd.DataFrame(data)
print(train_label)
train_label.head(10)
      img_id label
0    001.jpg   001
1    002.jpg   001
2    003.jpg   001
3    004.jpg   001
4    005.jpg   001
..       ...   ...
783  784.jpg   000
784  785.jpg   000
785  786.jpg   000
786  787.jpg   000
787  788.jpg   000

[788 rows x 2 columns]
Out[6]:
img_id label
0 001.jpg 001
1 002.jpg 001
2 003.jpg 001
3 004.jpg 001
4 005.jpg 001
5 006.jpg 001
6 007.jpg 001
7 008.jpg 001
8 009.jpg 001
9 010.jpg 001
In [7]:
train_label.tail(10)
Out[7]:
img_id label
778 779.jpg 000
779 780.jpg 000
780 781.jpg 000
781 782.jpg 000
782 783.jpg 000
783 784.jpg 000
784 785.jpg 000
785 786.jpg 000
786 787.jpg 000
787 788.jpg 000
In [8]:
#train_lable已准备好,是一个dataframe文件,保存时不要索引
train_label.to_csv(r"C:/Users/test/Desktop/logo识别练习赛/data/train_label.csv", index=False)
In [9]:
#determination = r'D:/logo_data/train'
#train_logo_path=r'C:/Users/test/Desktop/logo识别练习赛/data/train/logo'
#train_without_logo_path=r'C:/Users/test/Desktop/logo识别练习赛/data/train/without_logo'
In [10]:
imgpath = train_logo_path
new_imgpath = determination

 
j =1  #文件名起始
for root, dirs, files in os.walk(imgpath):
    for i in range(len(files)):
        shutil.copy(os.path.join(imgpath,files[i]), os.path.join(new_imgpath, str(j).zfill(3) + '.jpg'))
        j += 1
 
print ('this work has done')
this work has done
In [11]:
j =270  #文件名起始
for root, dirs, files in os.walk(train_without_logo_path):
    for i in range(len(files)):
        shutil.copy(os.path.join(train_without_logo_path,files[i]), os.path.join(determination, str(j) + '.jpg'))
        j += 1
 
print ('this work has done')
this work has done
In [12]:
#图片文件夹和标注已准备好,待打乱
In [13]:
import pandas as pd
train_label=pd.read_csv(r"C:/Users/test/Desktop/logo识别练习赛/data/train_label.csv")
len(train_label[train_label['label']==1]),len(train_label[train_label['label']==0]) #可见正负样本并不均衡
Out[13]:
(269, 519)
In [14]:
class Config(object):
    backbone = 'xception'#
    num_classes = 2 #
    use_smooth_label=False
    loss = 'CrossEntropyLoss'#focal_loss/CrossEntropyLoss
    input_size = 384
    train_batch_size = 16  # batch size
    val_batch_size = 12
    test_batch_size = 1
    optimizer = 'adam'#sam/adam
    lr_scheduler='exp'#cosine/exp/poly
    lr = 3e-4  # adam 0.00001
    sam_lr=1e-3
    MOMENTUM = 0.9
    device = "cuda"  # cuda  or cpu
    gpu_id = [0]
    num_workers = 0  # how many workers for loading data
    max_epoch = 21
    weight_decay = 5e-4
    val_interval = 1
    print_interval = 50
    save_interval = 2
    tensorboard_interval=50
    min_save_epoch=1
    load_from = None
    #
    log_dir = 'log/'
    train_val_data = r"D:/logo_data/train_new"
    train_label_csv = r"C:/Users/test/Desktop/logo识别练习赛/data/train_label.csv"
    #
    checkpoints_dir = './ckpt/'
    pre_trained = '..'
In [15]:
import os
import glob
from PIL import Image
import torch
from torch.utils.data import Dataset,DataLoader
import numpy as np
from torchvision import transforms as T
import torchvision
import cv2
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
In [16]:
class fuDataset(Dataset):
    def __init__(self, root, train_label_csv, phase='train', input_size=224):
        self.phase = phase
        train_val_label=pd.read_csv(r"C:/Users/test/Desktop/logo识别练习赛/data/train_label.csv")
        val_ids=[i for i in range(len(train_label)) if i%5==0]#验证集
        train_ids=[i for i in range(len(train_label)) if i%5!=0]#训练集
        if phase=='train':
            img_label=train_val_label[train_val_label.index.isin(train_ids)].reset_index()
            self.img_names=[os.path.join(root,i) for i in img_label['img_id'].values]
            self.labels=img_label['label'].values
        else:
            img_label=train_val_label[train_val_label.index.isin(val_ids)].reset_index()
            self.img_names=[os.path.join(root,i) for i in img_label['img_id'].values]
            self.labels=img_label['label'].values
        #使用全部数据训练(不要验证集)
        self.img_names=[os.path.join(root,i) for i in train_val_label['img_id'].values]
        self.labels=train_val_label['label'].values
        #
        normalize = T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        if self.phase == 'train':
            self.transforms = T.Compose([
                T.Resize((input_size,input_size)),
                T.RandomHorizontalFlip(p=0.5),
                T.RandomVerticalFlip(p=0.25),
                T.RandomRotation(degrees=(-20,20)),
                T.ColorJitter(0.2,0.2),
                T.ToTensor(),
                normalize
            ])
        else:
            self.transforms = T.Compose([
                T.Resize((input_size,input_size)),
                T.ToTensor(),
                normalize
            ])

    def __getitem__(self, index):
        img_path = self.img_names[index]
        data = Image.open(img_path)
        data = data.convert('RGB')
        data = self.transforms(data)
        label = np.int32(self.labels[index])
        return data.float(), label

    def __len__(self):
        return len(self.img_names)
In [17]:
import logging

def get_logger(filename, verbosity=1, name=None):
    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
    formatter = logging.Formatter(
        "[%(asctime)s][%(filename)s][%(levelname)s] %(message)s"
    )
    logger = logging.getLogger(name)
    logger.setLevel(level_dict[verbosity])

    fh = logging.FileHandler(filename, "w")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)
    return logger
In [18]:
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import time
from sklearn.metrics import accuracy_score
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import numpy as np
from cnn_finetune import make_model
In [19]:
import warnings
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train_model(model,criterion, optimizer, lr_scheduler=None):

    train_dataset = fuDataset(opt.train_val_data, opt.train_label_csv, phase='train', input_size=opt.input_size)
    trainloader = DataLoader(train_dataset,
                             batch_size=opt.train_batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers)

    total_iters=len(trainloader)
    logger.info('total_iters:{}'.format(total_iters))
    model_name=opt.backbone
    since = time.time()
    best_score = 0.0
    best_epoch = 0
    log_acc=0
    log_train=0
    writer = SummaryWriter()  # 用于记录训练和测试的信息:loss,acc等
    logger.info('start training...')
    #
    iters = len(trainloader)
    for epoch in range(1,opt.max_epoch+1):
        model.train(True)
        begin_time=time.time()
        logger.info('learning rate:{}'.format(optimizer.param_groups[-1]['lr']))
        logger.info('Epoch {}/{}'.format(epoch, opt.max_epoch))
        logger.info('-' * 10)
        running_corrects_linear = 0
        count=0
        train_loss = []
        for i, data in enumerate(trainloader):
            count+=1
            inputs, labels = data
            labels = labels.type(torch.LongTensor)
            inputs, labels = inputs.to(device), labels.to(device)
            #
            out_linear= model(inputs)
            _, linear_preds = torch.max(out_linear.data, 1)
            loss = criterion(out_linear, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 更新cosine学习率
            lr_scheduler.step(epoch + count / iters)

            if i % opt.print_interval == 0 or out_linear.size()[0] < opt.train_batch_size:
                spend_time = time.time() - begin_time
                logger.info(
                    ' Epoch:{}({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format(
                        epoch, count, total_iters,
                        loss.item(), optimizer.param_groups[-1]['lr'],
                        spend_time / count * total_iters // 60 - spend_time // 60))
            #
            running_corrects_linear += torch.sum(linear_preds == labels.data)
            train_loss.append(loss.item())
            writer.add_scalar('train_loss',loss.item(), global_step=log_train)
            log_train+=1
            #
        #lr_scheduler.step()
        val_acc,val_loss= val_model(model, criterion)
        epoch_acc_linear = running_corrects_linear.double() / total_iters / opt.train_batch_size
        logger.info('valLoss: {:.4f} valAcc: {:.4f}'.format(val_loss,val_acc))
        logger.info('Epoch:[{}/{}] train_acc={:.3f} '.format(epoch, opt.max_epoch,
                                                                    epoch_acc_linear))
        #
        model_out_path = model_save_dir + "/" + '{}_'.format(model_name) + str(epoch) + '.pth'
        best_model_out_path = model_save_dir + "/" + '{}_'.format(model_name) + 'best' + '.pth'
        #model_out_path = '{}_'.format(model_name) + str(epoch) + '.pth'
        #save the best model
        if val_acc > best_score:
            best_score = val_acc
            best_epoch=epoch
            torch.save(model.state_dict(), best_model_out_path)
            logger.info("save best epoch: {} best acc: {}".format(best_epoch,val_acc))
        #save based on epoch interval
        if epoch % opt.save_interval == 0 and epoch>opt.min_save_epoch:
            torch.save(model.state_dict(), model_out_path)
    #
    logger.info('Best acc: {:.3f} Best epoch:{}'.format(best_score,best_epoch))
    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    writer.close()

@torch.no_grad()
def val_model(model, criterion):
    val_dataset = fuDataset(opt.train_val_data, opt.train_label_csv, phase='val', input_size=opt.input_size)
    val_loader = DataLoader(val_dataset,
                             batch_size=opt.val_batch_size,
                             shuffle=False,
                             num_workers=opt.num_workers)
    dset_sizes=len(val_dataset)
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    cont = 0
    outPre = []
    outLabel = []
    pres_list=[]
    labels_list=[]
    for data in val_loader:
        inputs, labels = data
        labels = labels.type(torch.LongTensor)
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)
        loss = criterion(outputs, labels)
        if cont == 0:
            outPre = outputs.data.cpu()
            outLabel = labels.data.cpu()
        else:
            outPre = torch.cat((outPre, outputs.data.cpu()), 0)
            outLabel = torch.cat((outLabel, labels.data.cpu()), 0)
        pres_list+=preds.cpu().numpy().tolist()
        labels_list+=labels.data.cpu().numpy().tolist()
        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)
        cont += 1
    #
    val_acc = accuracy_score(labels_list, pres_list)
    return val_acc,running_loss / dset_sizes

#
if __name__ == "__main__":
    #
    opt = Config()
    torch.cuda.empty_cache()
    #device = torch.device(opt.device)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    criterion = torch.nn.CrossEntropyLoss().to(device)
    model_name=opt.backbone
    model_save_dir =os.path.join(opt.checkpoints_dir , model_name)
    if not os.path.exists(model_save_dir): os.makedirs(model_save_dir)
    logger = get_logger(os.path.join(model_save_dir,'log.log'))
    logger.info('Using: {}'.format(model_name))
    logger.info('InputSize: {}'.format(opt.input_size))
    logger.info('optimizer: {}'.format(opt.optimizer))
    logger.info('lr_init: {}'.format(opt.lr))
    logger.info('batch size: {}'.format(opt.train_batch_size))
    logger.info('criterion: {}'.format(opt.loss))
    logger.info('Using label smooth: {}'.format(opt.use_smooth_label))
    logger.info('lr_scheduler: {}'.format(opt.lr_scheduler))
    logger.info('Using the GPU: {}'.format(str(opt.gpu_id)))

    model  = make_model('{}'.format('xception'), num_classes=2,
                        pretrained=True)
    model.to(device)
    optimizer = optim.AdamW(model.parameters(), lr=3e-4 ,weight_decay=5e-4)
    #lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=3, T_mult=2, eta_min=1e-6, last_epoch=-1)
    train_model(model, criterion, optimizer,
              lr_scheduler=lr_scheduler)
    #
[2022-06-22 09:40:18,125][<ipython-input-19-053217a93bf8>][INFO] Using: xception
[2022-06-22 09:40:18,126][<ipython-input-19-053217a93bf8>][INFO] InputSize: 384
[2022-06-22 09:40:18,127][<ipython-input-19-053217a93bf8>][INFO] optimizer: adam
[2022-06-22 09:40:18,129][<ipython-input-19-053217a93bf8>][INFO] lr_init: 0.0003
[2022-06-22 09:40:18,130][<ipython-input-19-053217a93bf8>][INFO] batch size: 16
[2022-06-22 09:40:18,132][<ipython-input-19-053217a93bf8>][INFO] criterion: CrossEntropyLoss
[2022-06-22 09:40:18,134][<ipython-input-19-053217a93bf8>][INFO] Using label smooth: False
[2022-06-22 09:40:18,137][<ipython-input-19-053217a93bf8>][INFO] lr_scheduler: exp
[2022-06-22 09:40:18,139][<ipython-input-19-053217a93bf8>][INFO] Using the GPU: [0]
[2022-06-22 09:40:19,232][<ipython-input-19-053217a93bf8>][INFO] total_iters:50
[2022-06-22 09:40:19,248][<ipython-input-19-053217a93bf8>][INFO] start training...
[2022-06-22 09:40:19,252][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0003
[2022-06-22 09:40:19,253][<ipython-input-19-053217a93bf8>][INFO] Epoch 1/21
[2022-06-22 09:40:19,254][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 09:40:38,184][<ipython-input-19-053217a93bf8>][INFO]  Epoch:1(1/50) loss:0.589 lr:0.0002225 epoch_Time:15.0min:
[2022-06-22 09:56:26,455][<ipython-input-19-053217a93bf8>][INFO]  Epoch:1(50/50) loss:0.450 lr:0.0000758 epoch_Time:0.0min:
[2022-06-22 10:01:40,813][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.3324 valAcc: 0.8579
[2022-06-22 10:01:40,815][<ipython-input-19-053217a93bf8>][INFO] Epoch:[1/21] train_acc=0.762 
[2022-06-22 10:01:40,948][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 1 best acc: 0.8578680203045685
[2022-06-22 10:01:40,949][<ipython-input-19-053217a93bf8>][INFO] learning rate:7.575000000000001e-05
[2022-06-22 10:01:40,950][<ipython-input-19-053217a93bf8>][INFO] Epoch 2/21
[2022-06-22 10:01:40,951][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 10:01:57,168][<ipython-input-19-053217a93bf8>][INFO]  Epoch:2(1/50) loss:0.184 lr:0.0000731 epoch_Time:13.0min:
[2022-06-22 10:16:32,952][<ipython-input-19-053217a93bf8>][INFO]  Epoch:2(50/50) loss:0.690 lr:0.0003000 epoch_Time:0.0min:
[2022-06-22 10:21:49,910][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.2111 valAcc: 0.9213
[2022-06-22 10:21:49,913][<ipython-input-19-053217a93bf8>][INFO] Epoch:[2/21] train_acc=0.876 
[2022-06-22 10:21:50,102][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 2 best acc: 0.9213197969543148
[2022-06-22 10:21:50,236][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0003
[2022-06-22 10:21:50,239][<ipython-input-19-053217a93bf8>][INFO] Epoch 3/21
[2022-06-22 10:21:50,240][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 10:22:07,693][<ipython-input-19-053217a93bf8>][INFO]  Epoch:3(1/50) loss:0.174 lr:0.0003000 epoch_Time:14.0min:
[2022-06-22 10:35:36,866][<ipython-input-19-053217a93bf8>][INFO]  Epoch:3(50/50) loss:1.119 lr:0.0002800 epoch_Time:0.0min:
[2022-06-22 10:40:37,180][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.1973 valAcc: 0.9289
[2022-06-22 10:40:37,182][<ipython-input-19-053217a93bf8>][INFO] Epoch:[3/21] train_acc=0.880 
[2022-06-22 10:40:37,294][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 3 best acc: 0.9289340101522843
[2022-06-22 10:40:37,297][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0002799707978657736
[2022-06-22 10:40:37,297][<ipython-input-19-053217a93bf8>][INFO] Epoch 4/21
[2022-06-22 10:40:37,298][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 10:40:54,183][<ipython-input-19-053217a93bf8>][INFO]  Epoch:4(1/50) loss:0.119 lr:0.0002792 epoch_Time:14.0min:
[2022-06-22 10:55:03,644][<ipython-input-19-053217a93bf8>][INFO]  Epoch:4(50/50) loss:0.062 lr:0.0002252 epoch_Time:0.0min:
[2022-06-22 11:00:04,649][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.1006 valAcc: 0.9670
[2022-06-22 11:00:04,651][<ipython-input-19-053217a93bf8>][INFO] Epoch:[4/21] train_acc=0.882 
[2022-06-22 11:00:04,764][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 4 best acc: 0.9670050761421319
[2022-06-22 11:00:04,878][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00022524999999999995
[2022-06-22 11:00:04,880][<ipython-input-19-053217a93bf8>][INFO] Epoch 5/21
[2022-06-22 11:00:04,880][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 11:00:22,965][<ipython-input-19-053217a93bf8>][INFO]  Epoch:5(1/50) loss:0.102 lr:0.0002239 epoch_Time:15.0min:
[2022-06-22 11:14:06,194][<ipython-input-19-053217a93bf8>][INFO]  Epoch:5(50/50) loss:0.254 lr:0.0001505 epoch_Time:0.0min:
[2022-06-22 11:19:23,827][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0535 valAcc: 0.9797
[2022-06-22 11:19:23,830][<ipython-input-19-053217a93bf8>][INFO] Epoch:[5/21] train_acc=0.939 
[2022-06-22 11:19:23,945][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 5 best acc: 0.9796954314720813
[2022-06-22 11:19:23,947][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00015049999999999997
[2022-06-22 11:19:23,948][<ipython-input-19-053217a93bf8>][INFO] Epoch 6/21
[2022-06-22 11:19:23,949][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 11:19:41,990][<ipython-input-19-053217a93bf8>][INFO]  Epoch:6(1/50) loss:0.039 lr:0.0001489 epoch_Time:15.0min:
[2022-06-22 11:33:14,178][<ipython-input-19-053217a93bf8>][INFO]  Epoch:6(50/50) loss:0.255 lr:0.0000758 epoch_Time:0.0min:
[2022-06-22 11:38:14,501][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0163 valAcc: 0.9962
[2022-06-22 11:38:14,503][<ipython-input-19-053217a93bf8>][INFO] Epoch:[6/21] train_acc=0.950 
[2022-06-22 11:38:14,617][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 6 best acc: 0.9961928934010152
[2022-06-22 11:38:14,726][<ipython-input-19-053217a93bf8>][INFO] learning rate:7.575000000000001e-05
[2022-06-22 11:38:14,728][<ipython-input-19-053217a93bf8>][INFO] Epoch 7/21
[2022-06-22 11:38:14,729][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 11:38:32,129][<ipython-input-19-053217a93bf8>][INFO]  Epoch:7(1/50) loss:0.034 lr:0.0000744 epoch_Time:14.0min:
[2022-06-22 11:55:55,143][<ipython-input-19-053217a93bf8>][INFO]  Epoch:7(50/50) loss:0.082 lr:0.0000210 epoch_Time:0.0min:
[2022-06-22 12:01:22,878][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0177 valAcc: 0.9949
[2022-06-22 12:01:22,880][<ipython-input-19-053217a93bf8>][INFO] Epoch:[7/21] train_acc=0.961 
[2022-06-22 12:01:22,881][<ipython-input-19-053217a93bf8>][INFO] learning rate:2.102920213422641e-05
[2022-06-22 12:01:22,882][<ipython-input-19-053217a93bf8>][INFO] Epoch 8/21
[2022-06-22 12:01:22,883][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 12:01:39,520][<ipython-input-19-053217a93bf8>][INFO]  Epoch:8(1/50) loss:0.281 lr:0.0000203 epoch_Time:13.0min:
[2022-06-22 12:15:35,729][<ipython-input-19-053217a93bf8>][INFO]  Epoch:8(50/50) loss:0.022 lr:0.0003000 epoch_Time:0.0min:
[2022-06-22 12:20:37,520][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0131 valAcc: 0.9975
[2022-06-22 12:20:37,522][<ipython-input-19-053217a93bf8>][INFO] Epoch:[8/21] train_acc=0.961 
[2022-06-22 12:20:37,638][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 8 best acc: 0.9974619289340102
[2022-06-22 12:20:37,755][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0003
[2022-06-22 12:20:37,757][<ipython-input-19-053217a93bf8>][INFO] Epoch 9/21
[2022-06-22 12:20:37,759][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 12:20:54,505][<ipython-input-19-053217a93bf8>][INFO]  Epoch:9(1/50) loss:0.054 lr:0.0003000 epoch_Time:13.0min:
[2022-06-22 12:34:04,171][<ipython-input-19-053217a93bf8>][INFO]  Epoch:9(50/50) loss:0.023 lr:0.0002949 epoch_Time:0.0min:
[2022-06-22 12:38:59,509][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0771 valAcc: 0.9721
[2022-06-22 12:38:59,511][<ipython-input-19-053217a93bf8>][INFO] Epoch:[9/21] train_acc=0.929 
[2022-06-22 12:38:59,512][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00029490591103021566
[2022-06-22 12:38:59,513][<ipython-input-19-053217a93bf8>][INFO] Epoch 10/21
[2022-06-22 12:38:59,513][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 12:39:15,934][<ipython-input-19-053217a93bf8>][INFO]  Epoch:10(1/50) loss:0.060 lr:0.0002947 epoch_Time:13.0min:
[2022-06-22 12:52:51,365][<ipython-input-19-053217a93bf8>][INFO]  Epoch:10(50/50) loss:1.050 lr:0.0002800 epoch_Time:0.0min:
[2022-06-22 12:57:46,097][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.2310 valAcc: 0.9099
[2022-06-22 12:57:46,099][<ipython-input-19-053217a93bf8>][INFO] Epoch:[10/21] train_acc=0.907 
[2022-06-22 12:57:46,211][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0002799707978657736
[2022-06-22 12:57:46,212][<ipython-input-19-053217a93bf8>][INFO] Epoch 11/21
[2022-06-22 12:57:46,213][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 12:58:02,883][<ipython-input-19-053217a93bf8>][INFO]  Epoch:11(1/50) loss:0.042 lr:0.0002796 epoch_Time:13.0min:
[2022-06-22 13:11:09,037][<ipython-input-19-053217a93bf8>][INFO]  Epoch:11(50/50) loss:0.203 lr:0.0002562 epoch_Time:0.0min:
[2022-06-22 13:16:05,250][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0222 valAcc: 0.9949
[2022-06-22 13:16:05,253][<ipython-input-19-053217a93bf8>][INFO] Epoch:[11/21] train_acc=0.926 
[2022-06-22 13:16:05,254][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00025621246378738883
[2022-06-22 13:16:05,254][<ipython-input-19-053217a93bf8>][INFO] Epoch 12/21
[2022-06-22 13:16:05,255][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 13:16:22,069][<ipython-input-19-053217a93bf8>][INFO]  Epoch:12(1/50) loss:0.132 lr:0.0002557 epoch_Time:14.0min:
[2022-06-22 13:29:32,568][<ipython-input-19-053217a93bf8>][INFO]  Epoch:12(50/50) loss:0.004 lr:0.0002252 epoch_Time:0.0min:
[2022-06-22 13:34:24,829][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0128 valAcc: 0.9962
[2022-06-22 13:34:24,831][<ipython-input-19-053217a93bf8>][INFO] Epoch:[12/21] train_acc=0.969 
[2022-06-22 13:34:24,943][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00022524999999999995
[2022-06-22 13:34:24,944][<ipython-input-19-053217a93bf8>][INFO] Epoch 13/21
[2022-06-22 13:34:24,945][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 13:34:42,607][<ipython-input-19-053217a93bf8>][INFO]  Epoch:13(1/50) loss:0.197 lr:0.0002246 epoch_Time:14.0min:
[2022-06-22 13:48:23,127][<ipython-input-19-053217a93bf8>][INFO]  Epoch:13(50/50) loss:0.251 lr:0.0001892 epoch_Time:0.0min:
[2022-06-22 13:53:28,529][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0666 valAcc: 0.9708
[2022-06-22 13:53:28,531][<ipython-input-19-053217a93bf8>][INFO] Epoch:[13/21] train_acc=0.958 
[2022-06-22 13:53:28,533][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00018919344724282683
[2022-06-22 13:53:28,533][<ipython-input-19-053217a93bf8>][INFO] Epoch 14/21
[2022-06-22 13:53:28,534][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 13:53:44,503][<ipython-input-19-053217a93bf8>][INFO]  Epoch:14(1/50) loss:0.046 lr:0.0001884 epoch_Time:13.0min:
[2022-06-22 14:07:50,858][<ipython-input-19-053217a93bf8>][INFO]  Epoch:14(50/50) loss:0.009 lr:0.0001505 epoch_Time:0.0min:
[2022-06-22 14:12:55,910][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0064 valAcc: 0.9987
[2022-06-22 14:12:55,913][<ipython-input-19-053217a93bf8>][INFO] Epoch:[14/21] train_acc=0.963 
[2022-06-22 14:12:56,027][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 14 best acc: 0.998730964467005
[2022-06-22 14:12:56,136][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00015049999999999997
[2022-06-22 14:12:56,138][<ipython-input-19-053217a93bf8>][INFO] Epoch 15/21
[2022-06-22 14:12:56,139][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 14:13:13,328][<ipython-input-19-053217a93bf8>][INFO]  Epoch:15(1/50) loss:0.267 lr:0.0001497 epoch_Time:14.0min:
[2022-06-22 14:26:47,126][<ipython-input-19-053217a93bf8>][INFO]  Epoch:15(50/50) loss:0.638 lr:0.0001118 epoch_Time:0.0min:
[2022-06-22 14:31:46,065][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0049 valAcc: 1.0000
[2022-06-22 14:31:46,067][<ipython-input-19-053217a93bf8>][INFO] Epoch:[15/21] train_acc=0.960 
[2022-06-22 14:31:46,192][<ipython-input-19-053217a93bf8>][INFO] save best epoch: 15 best acc: 1.0
[2022-06-22 14:31:46,194][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.00011180655275717314
[2022-06-22 14:31:46,194][<ipython-input-19-053217a93bf8>][INFO] Epoch 16/21
[2022-06-22 14:31:46,195][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 14:32:02,554][<ipython-input-19-053217a93bf8>][INFO]  Epoch:16(1/50) loss:0.148 lr:0.0001111 epoch_Time:13.0min:
[2022-06-22 14:45:28,550][<ipython-input-19-053217a93bf8>][INFO]  Epoch:16(50/50) loss:0.005 lr:0.0000758 epoch_Time:0.0min:
[2022-06-22 14:50:29,942][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0059 valAcc: 0.9987
[2022-06-22 14:50:29,944][<ipython-input-19-053217a93bf8>][INFO] Epoch:[16/21] train_acc=0.965 
[2022-06-22 14:50:30,055][<ipython-input-19-053217a93bf8>][INFO] learning rate:7.575000000000001e-05
[2022-06-22 14:50:30,058][<ipython-input-19-053217a93bf8>][INFO] Epoch 17/21
[2022-06-22 14:50:30,059][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 14:50:46,806][<ipython-input-19-053217a93bf8>][INFO]  Epoch:17(1/50) loss:0.002 lr:0.0000751 epoch_Time:13.0min:
[2022-06-22 15:04:14,634][<ipython-input-19-053217a93bf8>][INFO]  Epoch:17(50/50) loss:0.153 lr:0.0000448 epoch_Time:0.0min:
[2022-06-22 15:09:16,299][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0016 valAcc: 1.0000
[2022-06-22 15:09:16,301][<ipython-input-19-053217a93bf8>][INFO] Epoch:[17/21] train_acc=0.978 
[2022-06-22 15:09:16,302][<ipython-input-19-053217a93bf8>][INFO] learning rate:4.478753621261114e-05
[2022-06-22 15:09:16,303][<ipython-input-19-053217a93bf8>][INFO] Epoch 18/21
[2022-06-22 15:09:16,304][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 15:09:33,272][<ipython-input-19-053217a93bf8>][INFO]  Epoch:18(1/50) loss:0.002 lr:0.0000442 epoch_Time:14.0min:
[2022-06-22 15:22:54,400][<ipython-input-19-053217a93bf8>][INFO]  Epoch:18(50/50) loss:2.682 lr:0.0000210 epoch_Time:0.0min:
[2022-06-22 15:27:55,470][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0042 valAcc: 0.9987
[2022-06-22 15:27:55,473][<ipython-input-19-053217a93bf8>][INFO] Epoch:[18/21] train_acc=0.973 
[2022-06-22 15:27:55,582][<ipython-input-19-053217a93bf8>][INFO] learning rate:2.102920213422641e-05
[2022-06-22 15:27:55,584][<ipython-input-19-053217a93bf8>][INFO] Epoch 19/21
[2022-06-22 15:27:55,585][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 15:28:17,020][<ipython-input-19-053217a93bf8>][INFO]  Epoch:19(1/50) loss:0.822 lr:0.0000206 epoch_Time:17.0min:
[2022-06-22 15:41:29,662][<ipython-input-19-053217a93bf8>][INFO]  Epoch:19(50/50) loss:0.152 lr:0.0000061 epoch_Time:0.0min:
[2022-06-22 15:46:25,136][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0024 valAcc: 0.9987
[2022-06-22 15:46:25,138][<ipython-input-19-053217a93bf8>][INFO] Epoch:[19/21] train_acc=0.976 
[2022-06-22 15:46:25,140][<ipython-input-19-053217a93bf8>][INFO] learning rate:6.0940889697843025e-06
[2022-06-22 15:46:25,142][<ipython-input-19-053217a93bf8>][INFO] Epoch 20/21
[2022-06-22 15:46:25,143][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 15:46:41,150][<ipython-input-19-053217a93bf8>][INFO]  Epoch:20(1/50) loss:0.029 lr:0.0000059 epoch_Time:13.0min:
[2022-06-22 15:59:41,963][<ipython-input-19-053217a93bf8>][INFO]  Epoch:20(50/50) loss:0.003 lr:0.0003000 epoch_Time:0.0min:
[2022-06-22 16:04:34,179][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0025 valAcc: 0.9987
[2022-06-22 16:04:34,181][<ipython-input-19-053217a93bf8>][INFO] Epoch:[20/21] train_acc=0.983 
[2022-06-22 16:04:34,291][<ipython-input-19-053217a93bf8>][INFO] learning rate:0.0003
[2022-06-22 16:04:34,294][<ipython-input-19-053217a93bf8>][INFO] Epoch 21/21
[2022-06-22 16:04:34,295][<ipython-input-19-053217a93bf8>][INFO] ----------
[2022-06-22 16:04:50,633][<ipython-input-19-053217a93bf8>][INFO]  Epoch:21(1/50) loss:0.002 lr:0.0003000 epoch_Time:13.0min:
[2022-06-22 16:17:55,972][<ipython-input-19-053217a93bf8>][INFO]  Epoch:21(50/50) loss:0.148 lr:0.0002987 epoch_Time:0.0min:
[2022-06-22 16:22:50,241][<ipython-input-19-053217a93bf8>][INFO] valLoss: 0.0042 valAcc: 0.9987
[2022-06-22 16:22:50,244][<ipython-input-19-053217a93bf8>][INFO] Epoch:[21/21] train_acc=0.969 
[2022-06-22 16:22:50,245][<ipython-input-19-053217a93bf8>][INFO] Best acc: 1.000 Best epoch:15
[2022-06-22 16:22:50,245][<ipython-input-19-053217a93bf8>][INFO] Training complete in 402m 31s
In [20]:
submit=pd.read_csv(r'C:/Users/test/Desktop/logo识别练习赛/data/提交示例文件.csv',header=None)
submit.columns=['name']
model  = make_model('{}'.format('xception'), num_classes=2,
                        pretrained=False)
net_weight='./ckpt/xception/xception_20.pth'
model.load_state_dict(torch.load(net_weight))
model = model.to(device)
model.eval()
#
infer_transforms=T.Compose([
                T.Resize((opt.input_size,opt.input_size)),
                T.ToTensor(),
                T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])
result=[]
test_dir='C:/Users/test/Desktop/logo识别练习赛/data/test/'
for name in submit['name'].values:
    img_path=os.path.join(test_dir,name)
    data = Image.open(img_path)
    data = data.convert('RGB')
    data = infer_transforms(data)
    data=data.unsqueeze(0)
    inputs= data.to(device)
    with torch.no_grad():
        outputs = model(inputs)
    _, preds = torch.max(outputs.data, 1)
    result.append(preds.cpu().data.numpy()[0])
    #
submit['label']=result
submit.to_csv(r'C:/Users/test/Desktop/submit2.csv',index=False,header=None)
In [ ]:

In [ ]:

In [ ]:


Comments

You must login before you can post a comment.

Execute