diff --git a/models/intel_optimized_models/int8/inceptionv3_int8.prototxt b/models/intel_optimized_models/int8/inceptionv3_int8.prototxt index 70a52b698..40a810628 100644 --- a/models/intel_optimized_models/int8/inceptionv3_int8.prototxt +++ b/models/intel_optimized_models/int8/inceptionv3_int8.prototxt @@ -1,3 +1,4 @@ +# For INT8 reference name: "InceptionV3" layer { name: "data" @@ -90,7 +91,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 75.3399963379 - scale_out: 24.3700008392 + scale_out: 48.9399986267 scale_params: 125.419998169 } } @@ -134,7 +135,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 48.9399986267 - scale_out: 33.9199981689 + scale_out: 68.1200027466 scale_params: 238.300003052 } } @@ -190,7 +191,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 68.1200027466 - scale_out: 37.0099983215 + scale_out: 74.3199996948 scale_params: 115.459999084 } } @@ -234,7 +235,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 74.3199996948 - scale_out: 61.7999992371 + scale_out: 124.080001831 scale_params: 490.480010986 } } @@ -290,7 +291,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 124.080001831 - scale_out: 62.7400016785 + scale_out: 125.970001221 scale_params: 239.179992676 } } @@ -334,7 +335,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 124.080001831 - scale_out: 79.2399978638 + scale_out: 159.11000061 scale_params: 303.589996338 } } @@ -378,7 +379,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 159.11000061 - scale_out: 69.2699966431 + scale_out: 139.080001831 scale_params: 495.859985352 } } @@ -422,7 +423,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 124.080001831 - scale_out: 72.8399963379 + scale_out: 146.25 scale_params: 215.38999939 } } @@ -466,7 +467,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 146.25 - scale_out: 85.2699966431 + scale_out: 171.210006714 scale_params: 322.309997559 } } @@ -510,7 +511,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 171.210006714 - scale_out: 57.8400001526 + scale_out: 116.150001526 scale_params: 303.209991455 } } @@ -566,7 +567,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 164.720001221 - scale_out: 87.5400009155 + scale_out: 175.770004272 scale_params: 160.130004883 } } @@ -621,8 +622,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 57.8400001526 - scale_out: 63.8800010681 + scale_in: 116.150001526 + scale_out: 128.270004272 scale_params: 229.300003052 } } @@ -665,8 +666,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 57.8400001526 - scale_out: 48.7599983215 + scale_in: 116.150001526 + scale_out: 97.9100036621 scale_params: 171.490005493 } } @@ -710,7 +711,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 97.9100036621 - scale_out: 54.2099990845 + scale_out: 108.839996338 scale_params: 516.630004883 } } @@ -753,8 +754,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 57.8400001526 - scale_out: 77.5999984741 + scale_in: 116.150001526 + scale_out: 155.809997559 scale_params: 194.460006714 } } @@ -798,7 +799,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 155.809997559 - scale_out: 100.75 + scale_out: 202.300003052 scale_params: 243.229995728 } } @@ -842,7 +843,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 202.300003052 - scale_out: 52.8199996948 + scale_out: 106.050003052 scale_params: 90.2200012207 } } @@ -897,8 +898,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 80.2900009155 - scale_out: 100.050003052 + scale_in: 161.210006714 + scale_out: 200.88999939 scale_params: 146.809997559 } } @@ -953,8 +954,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 52.8199996948 - scale_out: 78.9400024414 + scale_in: 106.050003052 + scale_out: 158.509994507 scale_params: 251.970001221 } } @@ -997,8 +998,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 52.8199996948 - scale_out: 50.7700004578 + scale_in: 106.050003052 + scale_out: 101.940002441 scale_params: 211.789993286 } } @@ -1042,7 +1043,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 101.940002441 - scale_out: 62.0 + scale_out: 124.5 scale_params: 537.25 } } @@ -1085,8 +1086,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 52.8199996948 - scale_out: 80.1200027466 + scale_in: 106.050003052 + scale_out: 160.86000061 scale_params: 225.440002441 } } @@ -1130,7 +1131,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 160.86000061 - scale_out: 62.7799987793 + scale_out: 126.050003052 scale_params: 141.570007324 } } @@ -1174,7 +1175,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 126.050003052 - scale_out: 65.6800003052 + scale_out: 131.869995117 scale_params: 423.859985352 } } @@ -1229,8 +1230,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 85.0 - scale_out: 95.9300003052 + scale_in: 170.679992676 + scale_out: 192.61000061 scale_params: 127.099998474 } } @@ -1285,8 +1286,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 62.0 - scale_out: 76.8199996948 + scale_in: 124.5 + scale_out: 154.25 scale_params: 251.36000061 } } @@ -1329,8 +1330,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 62.0 - scale_out: 96.2900009155 + scale_in: 124.5 + scale_out: 193.350006104 scale_params: 278.760009766 } } @@ -1374,7 +1375,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 193.350006104 - scale_out: 132.729995728 + scale_out: 266.5 scale_params: 457.459991455 } } @@ -1418,7 +1419,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 266.5 - scale_out: 58.7700004578 + scale_out: 118.0 scale_params: 270.910003662 } } @@ -1484,8 +1485,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 58.7700004578 - scale_out: 75.8399963379 + scale_in: 118.0 + scale_out: 152.289993286 scale_params: 153.630004883 } } @@ -1528,8 +1529,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 58.7700004578 - scale_out: 55.1300010681 + scale_in: 118.0 + scale_out: 110.699996948 scale_params: 182.440002441 } } @@ -1575,7 +1576,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 110.699996948 - scale_out: 48.0499992371 + scale_out: 96.4899978638 scale_params: 157.910003662 } } @@ -1621,7 +1622,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 96.4899978638 - scale_out: 62.3899993896 + scale_out: 125.279998779 scale_params: 241.160003662 } } @@ -1664,8 +1665,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 58.7700004578 - scale_out: 145.690002441 + scale_in: 118.0 + scale_out: 292.529998779 scale_params: 284.149993896 } } @@ -1711,7 +1712,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 292.529998779 - scale_out: 125.720001221 + scale_out: 252.419998169 scale_params: 290.660003662 } } @@ -1757,7 +1758,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 252.419998169 - scale_out: 69.25 + scale_out: 139.050003052 scale_params: 173.460006714 } } @@ -1803,7 +1804,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 139.050003052 - scale_out: 79.2600021362 + scale_out: 159.149993896 scale_params: 185.009994507 } } @@ -1849,7 +1850,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 159.149993896 - scale_out: 76.5999984741 + scale_out: 153.809997559 scale_params: 206.669998169 } } @@ -1904,8 +1905,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 100.790000916 - scale_out: 93.4000015259 + scale_in: 202.369995117 + scale_out: 187.529998779 scale_params: 116.709999084 } } @@ -1960,8 +1961,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 62.3899993896 - scale_out: 69.3000030518 + scale_in: 125.279998779 + scale_out: 139.149993896 scale_params: 192.5 } } @@ -2004,8 +2005,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 62.3899993896 - scale_out: 54.7299995422 + scale_in: 125.279998779 + scale_out: 109.88999939 scale_params: 252.080001831 } } @@ -2051,7 +2052,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 109.88999939 - scale_out: 54.1800003052 + scale_out: 108.779998779 scale_params: 196.350006104 } } @@ -2097,7 +2098,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 108.779998779 - scale_out: 47.1699981689 + scale_out: 94.7200012207 scale_params: 218.880004883 } } @@ -2140,8 +2141,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 62.3899993896 - scale_out: 107.379997253 + scale_in: 125.279998779 + scale_out: 215.61000061 scale_params: 294.489990234 } } @@ -2187,7 +2188,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 215.61000061 - scale_out: 72.3499984741 + scale_out: 145.259994507 scale_params: 186.509994507 } } @@ -2233,7 +2234,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 145.259994507 - scale_out: 62.2599983215 + scale_out: 125.019996643 scale_params: 243.020004272 } } @@ -2279,7 +2280,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 125.019996643 - scale_out: 82.0299987793 + scale_out: 164.699996948 scale_params: 320.420013428 } } @@ -2325,7 +2326,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 164.699996948 - scale_out: 78.0699996948 + scale_out: 156.759994507 scale_params: 216.5 } } @@ -2380,8 +2381,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 101.949996948 - scale_out: 64.6999969482 + scale_in: 204.699996948 + scale_out: 129.899993896 scale_params: 149.190002441 } } @@ -2436,8 +2437,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 47.1699981689 - scale_out: 67.3399963379 + scale_in: 94.7200012207 + scale_out: 135.210006714 scale_params: 165.38999939 } } @@ -2480,8 +2481,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 47.1699981689 - scale_out: 72.6299972534 + scale_in: 94.7200012207 + scale_out: 145.830001831 scale_params: 194.589996338 } } @@ -2527,7 +2528,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 145.830001831 - scale_out: 71.2099990845 + scale_out: 142.979995728 scale_params: 200.419998169 } } @@ -2573,7 +2574,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 142.979995728 - scale_out: 71.7600021362 + scale_out: 144.089996338 scale_params: 248.820007324 } } @@ -2616,8 +2617,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 47.1699981689 - scale_out: 89.7200012207 + scale_in: 94.7200012207 + scale_out: 180.13999939 scale_params: 276.739990234 } } @@ -2663,7 +2664,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 180.13999939 - scale_out: 51.8300018311 + scale_out: 104.069999695 scale_params: 231.600006104 } } @@ -2709,7 +2710,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 104.069999695 - scale_out: 58.4500007629 + scale_out: 117.370002747 scale_params: 192.809997559 } } @@ -2755,7 +2756,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 117.370002747 - scale_out: 54.4000015259 + scale_out: 109.230003357 scale_params: 210.199996948 } } @@ -2801,7 +2802,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 109.230003357 - scale_out: 86.8899993896 + scale_out: 174.460006714 scale_params: 312.559997559 } } @@ -2856,8 +2857,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 72.2799987793 - scale_out: 104.019996643 + scale_in: 145.119995117 + scale_out: 208.850006104 scale_params: 119.339996338 } } @@ -2912,8 +2913,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 67.3399963379 - scale_out: 67.4499969482 + scale_in: 135.210006714 + scale_out: 135.440002441 scale_params: 252.38999939 } } @@ -2956,8 +2957,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 67.3399963379 - scale_out: 64.8700027466 + scale_in: 135.210006714 + scale_out: 130.25 scale_params: 369.850006104 } } @@ -3003,7 +3004,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 130.25 - scale_out: 52.3899993896 + scale_out: 105.180000305 scale_params: 260.700012207 } } @@ -3049,7 +3050,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 105.180000305 - scale_out: 76.4199981689 + scale_out: 153.440002441 scale_params: 316.489990234 } } @@ -3092,8 +3093,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 67.3399963379 - scale_out: 80.0599975586 + scale_in: 135.210006714 + scale_out: 160.740005493 scale_params: 264.869995117 } } @@ -3139,7 +3140,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 160.740005493 - scale_out: 72.5599975586 + scale_out: 145.690002441 scale_params: 273.700012207 } } @@ -3185,7 +3186,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 145.690002441 - scale_out: 67.7699966431 + scale_out: 136.080001831 scale_params: 273.950012207 } } @@ -3231,7 +3232,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 136.080001831 - scale_out: 68.7900009155 + scale_out: 138.130004883 scale_params: 271.679992676 } } @@ -3277,7 +3278,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 138.130004883 - scale_out: 83.0400009155 + scale_out: 166.729995728 scale_params: 331.309997559 } } @@ -3332,8 +3333,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 116.220001221 - scale_out: 93.6600036621 + scale_in: 233.350006104 + scale_out: 188.050003052 scale_params: 129.759994507 } } @@ -3388,8 +3389,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 67.4499969482 - scale_out: 75.5800018311 + scale_in: 135.440002441 + scale_out: 151.75 scale_params: 245.309997559 } } @@ -3433,7 +3434,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 151.75 - scale_out: 40.4199981689 + scale_out: 81.1500015259 scale_params: 171.190002441 } } @@ -3476,8 +3477,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 67.4499969482 - scale_out: 73.7399978638 + scale_in: 135.440002441 + scale_out: 148.070007324 scale_params: 135.559997559 } } @@ -3523,7 +3524,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 148.070007324 - scale_out: 57.2099990845 + scale_out: 114.86000061 scale_params: 123.540000916 } } @@ -3569,7 +3570,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 114.86000061 - scale_out: 96.2099990845 + scale_out: 193.179992676 scale_params: 310.769989014 } } @@ -3613,7 +3614,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 193.179992676 - scale_out: 67.4700012207 + scale_out: 135.479995728 scale_params: 313.369995117 } } @@ -3680,8 +3681,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 40.4199981689 - scale_out: 156.029998779 + scale_in: 81.1500015259 + scale_out: 313.279998779 scale_params: 400.739990234 } } @@ -3724,8 +3725,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 40.4199981689 - scale_out: 87.5299987793 + scale_in: 81.1500015259 + scale_out: 175.759994507 scale_params: 289.25 } } @@ -3771,7 +3772,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 175.759994507 - scale_out: 103.349998474 + scale_out: 207.509994507 scale_params: 346.429992676 } } @@ -3817,7 +3818,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 175.759994507 - scale_out: 93.3399963379 + scale_out: 187.419998169 scale_params: 263.369995117 } } @@ -3860,8 +3861,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 40.4199981689 - scale_out: 70.1999969482 + scale_in: 81.1500015259 + scale_out: 140.960006714 scale_params: 241.759994507 } } @@ -3905,7 +3906,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 140.960006714 - scale_out: 63.2599983215 + scale_out: 127.019996643 scale_params: 514.510009766 } } @@ -3951,7 +3952,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 127.019996643 - scale_out: 98.9100036621 + scale_out: 198.600006104 scale_params: 570.369995117 } } @@ -3997,7 +3998,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 127.019996643 - scale_out: 99.2799987793 + scale_out: 199.339996338 scale_params: 365.929992676 } } @@ -4052,8 +4053,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 75.75 - scale_out: 153.800003052 + scale_in: 152.100006104 + scale_out: 308.799987793 scale_params: 119.88999939 } } @@ -4111,8 +4112,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 93.3399963379 - scale_out: 16.8299999237 + scale_in: 187.419998169 + scale_out: 33.7900009155 scale_params: 19.4200000763 } } @@ -4155,8 +4156,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 93.3399963379 - scale_out: 61.1500015259 + scale_in: 187.419998169 + scale_out: 122.779998779 scale_params: 105.769996643 } } @@ -4202,7 +4203,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 122.779998779 - scale_out: 9.39999961853 + scale_out: 18.8600006104 scale_params: 35.6800003052 } } @@ -4248,7 +4249,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 122.779998779 - scale_out: 9.22000026703 + scale_out: 18.5200004578 scale_params: 37.3199996948 } } @@ -4291,8 +4292,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 93.3399963379 - scale_out: 60.1800003052 + scale_in: 187.419998169 + scale_out: 120.839996338 scale_params: 92.4100036621 } } @@ -4336,7 +4337,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 120.839996338 - scale_out: 94.8700027466 + scale_out: 190.479995728 scale_params: 345.420013428 } } @@ -4382,7 +4383,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 190.479995728 - scale_out: 8.35000038147 + scale_out: 16.7700004578 scale_params: 44.3199996948 } } @@ -4428,7 +4429,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 190.479995728 - scale_out: 10.9600000381 + scale_out: 22.0100002289 scale_params: 44.2700004578 } } @@ -4483,8 +4484,8 @@ layer { bw_layer_in: 8 bw_layer_out: 8 bw_params: 8 - scale_in: 93.3399963379 - scale_out: 19.9799995422 + scale_in: 187.419998169 + scale_out: 40.1100006104 scale_params: 81.5699996948 } } @@ -4567,33 +4568,4 @@ layer { } } } -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "fc1" - bottom: "label" - top: "loss" -} -layer { - name: "acc/top-1" - type: "Accuracy" - bottom: "fc1" - bottom: "label" - top: "acc/top-1" - include { - phase: TEST - } -} -layer { - name: "acc/top-5" - type: "Accuracy" - bottom: "fc1" - bottom: "label" - top: "acc/top-5" - include { - phase: TEST - } - accuracy_param { - top_k: 5 - } -} + diff --git a/models/intel_optimized_models/int8/resnet50_int8.prototxt b/models/intel_optimized_models/int8/resnet50_int8.prototxt index c48020181..8d6aacd66 100644 --- a/models/intel_optimized_models/int8/resnet50_int8.prototxt +++ b/models/intel_optimized_models/int8/resnet50_int8.prototxt @@ -1,3 +1,4 @@ +# For INT8 reference name: "ResNet-50" layer { name: "data" @@ -6,10 +7,10 @@ layer { dummy_data_param { data_filler { type: "constant" - value: 0.0099999997764825821 + value: 0.01 } shape { - dim: 50 + dim: 64 dim: 3 dim: 224 dim: 224 @@ -25,7 +26,7 @@ layer { type: "constant" } shape { - dim: 50 + dim: 64 } } } @@ -263,7 +264,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 19.9799995422 - scale_out: 5.73999977112 + scale_out: 11.5200004578 scale_params: 72.1699981689 } } @@ -434,7 +435,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 19.5799999237 - scale_out: 5.73999977112 + scale_out: 11.5200004578 scale_params: 88.4599990845 } } @@ -605,7 +606,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 13.5799999237 - scale_out: 5.73999977112 + scale_out: 11.5200004578 scale_params: 103.910003662 } } @@ -822,7 +823,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 25.5400009155 - scale_out: 4.78000020981 + scale_out: 9.59000015259 scale_params: 65.7200012207 } } @@ -993,7 +994,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 20.4899997711 - scale_out: 4.78000020981 + scale_out: 9.59000015259 scale_params: 55.6899986267 } } @@ -1164,7 +1165,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 3.74000000954 - scale_out: 4.78000020981 + scale_out: 9.59000015259 scale_params: 66.2300033569 } } @@ -1335,7 +1336,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 15.0600004196 - scale_out: 4.78000020981 + scale_out: 9.59000015259 scale_params: 113.980003357 } } @@ -1552,7 +1553,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 15.3699998856 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 75.6200027466 } } @@ -1723,7 +1724,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.44000005722 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 67.4400024414 } } @@ -1894,7 +1895,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.94999980927 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 76.9700012207 } } @@ -2065,7 +2066,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 5.40000009537 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 77.1500015259 } } @@ -2236,7 +2237,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.51999998093 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 98.8199996948 } } @@ -2407,7 +2408,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 5.01999998093 - scale_out: 3.70000004768 + scale_out: 7.42999982834 scale_params: 93.3000030518 } } @@ -2624,7 +2625,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 25.2000007629 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 27.1299991608 } } @@ -2795,7 +2796,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 29.2800006866 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 35.3400001526 } } @@ -2966,7 +2967,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 14.7100000381 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 23.2399997711 } } @@ -3031,26 +3032,4 @@ layer { } } } -layer { - name: "accuracy/top-1" - type: "Accuracy" - bottom: "fc1000" - bottom: "label" - top: "accuracy-top1" - include { - phase: TEST - } -} -layer { - name: "accuracy/top-5" - type: "Accuracy" - bottom: "fc1000" - bottom: "label" - top: "accuracy-top5" - include { - phase: TEST - } - accuracy_param { - top_k: 5 - } -} + diff --git a/models/intel_optimized_models/int8/resnet50_sparse_int8.prototxt b/models/intel_optimized_models/int8/resnet50_sparse_int8.prototxt index 402b24a63..22eb796ff 100644 --- a/models/intel_optimized_models/int8/resnet50_sparse_int8.prototxt +++ b/models/intel_optimized_models/int8/resnet50_sparse_int8.prototxt @@ -1,3 +1,4 @@ +# For INT8 reference name: "ResNet-50" layer { name: "data" @@ -263,7 +264,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 19.9799995422 - scale_out: 6.03000020981 + scale_out: 12.1099996567 scale_params: 72.1699981689 } } @@ -434,7 +435,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 19.5799999237 - scale_out: 6.03000020981 + scale_out: 12.1099996567 scale_params: 88.4599990845 } } @@ -616,7 +617,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 16.9200000763 - scale_out: 5.73999977112 + scale_out: 11.5200004578 scale_params: 103.910003662 } } @@ -833,7 +834,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 25.5400009155 - scale_out: 4.65000009537 + scale_out: 9.32999992371 scale_params: 65.7200012207 } } @@ -1004,7 +1005,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 20.4899997711 - scale_out: 4.65000009537 + scale_out: 9.32999992371 scale_params: 55.6899986267 } } @@ -1175,7 +1176,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 3.70000004768 - scale_out: 4.65000009537 + scale_out: 9.32999992371 scale_params: 66.2300033569 } } @@ -1357,7 +1358,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 14.6999998093 - scale_out: 4.88999986649 + scale_out: 9.82999992371 scale_params: 113.980003357 } } @@ -1574,7 +1575,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 14.8900003433 - scale_out: 4.28000020981 + scale_out: 8.60000038147 scale_params: 75.6200027466 } } @@ -1745,7 +1746,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.30999994278 - scale_out: 4.28000020981 + scale_out: 8.60000038147 scale_params: 67.4400024414 } } @@ -1916,7 +1917,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.90999984741 - scale_out: 4.28000020981 + scale_out: 8.60000038147 scale_params: 76.9700012207 } } @@ -2087,7 +2088,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 5.40000009537 - scale_out: 4.28000020981 + scale_out: 8.60000038147 scale_params: 77.1500015259 } } @@ -2258,7 +2259,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 6.51999998093 - scale_out: 4.28000020981 + scale_out: 8.60000038147 scale_params: 98.8199996948 } } @@ -2440,7 +2441,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 16.6200008392 - scale_out: 4.23000001907 + scale_out: 8.5 scale_params: 93.3000030518 } } @@ -2657,7 +2658,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 25.2000007629 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 27.1299991608 } } @@ -2828,7 +2829,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 26.6200008392 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 35.3400001526 } } @@ -2999,7 +3000,7 @@ layer { bw_layer_out: 8 bw_params: 8 scale_in: 14.7100000381 - scale_out: 0.939999997616 + scale_out: 1.87999999523 scale_params: 23.2399997711 } } @@ -3064,36 +3065,4 @@ layer { } } } -layer { - name: "prob" - type: "SoftmaxWithLoss" - bottom: "fc1000" - bottom: "label" - top: "prob" - include { - phase: TRAIN - } -} -layer { - name: "accuracy/top-1" - type: "Accuracy" - bottom: "fc1000" - bottom: "label" - top: "accuracy-top1" - include { - phase: TEST - } -} -layer { - name: "accuracy/top-5" - type: "Accuracy" - bottom: "fc1000" - bottom: "label" - top: "accuracy-top5" - include { - phase: TEST - } - accuracy_param { - top_k: 5 - } -} + diff --git a/models/intel_optimized_models/int8/ssd_int8.prototxt b/models/intel_optimized_models/int8/ssd_int8.prototxt index 26e1a28dc..4a0ed0332 100644 --- a/models/intel_optimized_models/int8/ssd_int8.prototxt +++ b/models/intel_optimized_models/int8/ssd_int8.prototxt @@ -1,3 +1,4 @@ +# For INT8 reference name: "VGG_VOC0712_SSD_300x300_test" layer { name: "data" @@ -1946,33 +1947,9 @@ layer { nms_threshold: 0.449999988079 top_k: 400 } - save_output_param { - output_directory: "data/ssd_out/VOC2007/SSD_300x300" - output_name_prefix: "comp4_det_test_" - output_format: "VOC" - label_map_file: "data/VOC0712/labelmap_voc.prototxt" - name_size_file: "data/VOC0712/test_name_size.txt" - num_test_image: 4952 - } code_type: CENTER_SIZE keep_top_k: 200 confidence_threshold: 0.00999999977648 } } -layer { - name: "detection_eval" - type: "DetectionEvaluate" - bottom: "detection_out" - bottom: "label" - top: "detection_eval" - include { - phase: TEST - } - detection_evaluate_param { - num_classes: 21 - background_label_id: 0 - overlap_threshold: 0.5 - evaluate_difficult_gt: false - name_size_file: "data/VOC0712/test_name_size.txt" - } -} + diff --git a/scripts/calibrator.py b/scripts/calibrator.py index 7dcb7c54a..4bda3bb8d 100644 --- a/scripts/calibrator.py +++ b/scripts/calibrator.py @@ -64,23 +64,21 @@ def read_prototxt(prototxt): try: if not check_existence(prototxt): return None - net = caffe_pb2.NetParameter() - with open(prototxt) as f: txtf.Merge(f.read(), net) return net except Exception as e: - raise ("Failed to read {} due to {}".format(prototxt, str(e))) + print ("Failed to read {} due to {}".format(prototxt, e)) -def get_bottom_layers(layer_name, net, start): +def get_bottom_layers(top_name, net, start): bottom_layers = [] for index, value in enumerate(net.layer[start:]): for sub_index, sub_value in enumerate(value.bottom): - if sub_value == layer_name: + if sub_value == top_name: bottom_layers.append((index, value.name, value.type)) return bottom_layers @@ -123,9 +121,9 @@ def get_all_top_layers(l, net, end, skip_layers, interesting_layers): return all_top_layers -def get_all_bottom_layers(layer_name, net, start, skip_layers, interesting_layers): +def get_all_bottom_layers(top_name, net, start, skip_layers, interesting_layers): all_bottom_layers = [] - bottom_layers = get_bottom_layers(layer_name, net, start) + bottom_layers = get_bottom_layers(top_name, net, start) while True: if len(bottom_layers) == 0: break @@ -149,12 +147,48 @@ def get_all_bottom_layers(layer_name, net, start, skip_layers, interesting_layer return all_bottom_layers -def transform_convolutions(model_path): +def get_fusion_conv_names(compiled_model): + compiled_net = caffe_pb2.NetParameter() + with open(compiled_model) as f: + s = f.read() + txtf.Merge(s, compiled_net) + return [(layer.name, layer.bottom[1]) for _, layer in enumerate(compiled_net.layer) + if layer.type == 'Convolution' and len(layer.bottom) > 1] + + +def filter_fusion_layers(net, fusion_layer, conv_layer): + if not fusion_layer or not conv_layer: + return [] + interesting_layers = ['ReLU'] + skip_layers = ['Convolution', 'Eltwise', 'Concat'] + output_with_relu_layer = [(l.name, net.layer[index].top[0]) for l, index in conv_layer + if len(get_all_bottom_layers(net.layer[index].top[0], net, index + 1, + skip_layers, interesting_layers)) == 0] + output_without_dict = {v: k for (k, v) in output_with_relu_layer} + for layer_name, top_name in fusion_layer: + if top_name in output_without_dict.keys(): + del output_without_dict[top_name] + + return output_without_dict.values() + + +def check_relu_existence(net, start, end, exclude_layer): + if net.layer[start].type == 'Convolution' and net.layer[start].name in exclude_layer: + return False + + for i in net.layer[start + 1: end]: + if i.type == 'ReLU': + return True + return False + + +def transform_convolutions(model_path, compiled_model_path): net = caffe_pb2.NetParameter() with open(model_path) as f: s = f.read() txtf.Merge(s, net) + fusion_layer = get_fusion_conv_names(compiled_model_path) new_net = copy.deepcopy(net) convolution_layers = [(value, index) for index, value in enumerate(net.layer) if value.type == 'Convolution'] @@ -164,17 +198,18 @@ def transform_convolutions(model_path): u8_max = 255 s8_max = 127 - + u8_layers = filter_fusion_layers(net, fusion_layer, convolution_layers) for (l, index) in convolution_layers: - outputwith_relu = get_all_bottom_layers(l.name, net, index + 1, skip_layers, interesting_layers) + outputwith_relu = get_all_bottom_layers(net.layer[index].top[0], net, index + 1, skip_layers, + interesting_layers) + conv_relu_flag = check_relu_existence(net, index, + convolution_layers[convolution_layers.index((l, index)) + 1][1] + if (l, index) != convolution_layers[-1] + else len(net.layer), [i[0] for i in fusion_layer]) inputwith_relu = get_all_top_layers(l, net, index, skip_layers, interesting_layers) - # print "Processing", l.type, l.name - - # output_type = 'u8' if outputwith_relu else 's8' - # input_type = 'u8' if inputwith_relu else 's8' for si in range(0, len(new_net.layer[index].quantization_param.scale_out)): - if len(outputwith_relu) > 0: # u8 + if len(outputwith_relu) > 0 or l.name in u8_layers or conv_relu_flag: # u8 new_net.layer[index].quantization_param.scale_out[si] = round(u8_max / new_net.layer[index]. quantization_param.scale_out[si], 2) else: # s8 @@ -182,12 +217,12 @@ def transform_convolutions(model_path): quantization_param.scale_out[si], 2) for si in range(0, len(new_net.layer[index].quantization_param.scale_in)): - if len(inputwith_relu) > 0: # u8 + if len(inputwith_relu) > 0 or l.type == 'Convolution': # u8 new_net.layer[index].quantization_param.scale_in[si] = round(u8_max / new_net.layer[index]. quantization_param.scale_in[si], 2) - else: # s8 - new_net.layer[index].quantization_param.scale_in[si] = round(s8_max / new_net.layer[index]. - quantization_param.scale_in[si], 2) + else: + new_net.layer[index].ClearField('quantization_param') + continue for si in range(0, len(new_net.layer[index].quantization_param.scale_params)): new_net.layer[index].quantization_param.scale_params[si] = round(s8_max / new_net.layer[index]. @@ -198,39 +233,51 @@ def transform_convolutions(model_path): def generate_sample(sample_path, input_model, weights, - quantized_model, model_type, iterations=1, error_margin=1, power=0): + quantized_model, detection, iterations=1, error_margin=1, power=0): cmd = '{0} quantize -model {1} -weights {2} -model_quantized {3} -iterations {4} ' \ '-trimming_mode dynamic_fixed_point -error_margin {5} -power {6}'.format(sample_path, input_model, weights, quantized_model, iterations, error_margin, power) - if model_type == 3: + if detection: cmd += ' --detection=1' os.system(cmd) -def get_the_accuracy(caffe_bin, model_def, model_weights, iterations, model_type): +def get_compiled_net(caffe_bin, model_def, model_weights, detection): + output_log_name = '.compiled_net.txt' + + cmd = '{} test -model {} -weights {} -iterations 1'.format(caffe_bin, model_def, model_weights) + if detection: + cmd += ' -detection' + cmd += ' 2>&1 > {}'.format(output_log_name) + + os.system(cmd) + return os.path.abspath(output_log_name) + + +def get_the_accuracy(caffe_bin, model_def, model_weights, iterations, detection, blob_name): output_log_name = 'calibrator_log.txt' cmd = '{} test -model {} -weights {} -iterations {}'.format(caffe_bin, model_def, model_weights, iterations) - if model_type == 3: + if detection: cmd += ' -detection' cmd += ' 2>&1|tee {}'.format(output_log_name) + os.system(cmd) + with open(output_log_name) as f: data = f.readlines() - try: - if model_type == 1: - top_1 = data[-2].strip() - return float(top_1.split('=')[-1].strip()) - elif model_type == 2: - top_1 = data[-3].strip() - return float(top_1.split('=')[-1].strip()) - elif model_type == 3: - top_1 = data[-1].strip() - return float(top_1.split('=')[-1].strip()) - except Exception as e: - print 'Failed to generate accuracy due to {}'.format(str(e)) - sys.exit(-1) + + for i in data[::-1]: + if i.find('{} = '.format(blob_name)) != -1: + try: + return float(i.split('=')[-1].strip()) + except Exception as e: + print 'Failed to generate accuracy due to {}'.format(str(e)) + sys.exit(-1) + + print 'Failed to get accuracy, please check the parameters and rerun the scripts.' + sys.exit(-1) def remove_top_quantized_parameter(current_quantized_file): @@ -244,17 +291,33 @@ def remove_top_quantized_parameter(current_quantized_file): f.write(str(net)) -def tuning_quantized_topology(base_top1_accuracy, quantized_file, caffe_bin, model_weights, iterations, - enable_floating_point, toleration, model_type): - if enable_floating_point == 0: +def tuning_quantized_topology(base_top1_accuracy, prototxt, caffe_bin, model_weights, iterations, + is_floating_point, accuracy_loss, detection, blob_name): + if is_floating_point == 0: print 'Updating quantization parameter...' - transform_convolutions(quantized_file) - current_top1_accuracy = get_the_accuracy(caffe_bin, quantized_file, model_weights, iterations, model_type) - while abs(current_top1_accuracy - base_top1_accuracy) >= toleration: + + transform_convolutions(prototxt, get_compiled_net(caffe_bin, prototxt, model_weights, detection)) + + current_top1_accuracy = get_the_accuracy(caffe_bin, prototxt, model_weights, iterations, detection, blob_name) + + while abs(current_top1_accuracy - base_top1_accuracy) >= accuracy_loss: print 'Tuning... ' print abs(current_top1_accuracy - base_top1_accuracy) - remove_top_quantized_parameter(quantized_file) - current_top1_accuracy = get_the_accuracy(caffe_bin, quantized_prototxt, model_weights, iterations, model_type) + remove_top_quantized_parameter(prototxt) + current_top1_accuracy = get_the_accuracy(caffe_bin, prototxt, model_weights, iterations, detection, blob_name) + + +def check_blob_name_existence(prototxt, blob_name): + net = read_prototxt(prototxt) + if not net.layer: + print 'Please check the model prototxt integrity.' + sys.exit(-1) + + for i in net.layer[::-1]: + for _, value in enumerate(i.top): + if value == blob_name: + return True + return False if __name__ == '__main__': @@ -265,36 +328,41 @@ def tuning_quantized_topology(base_top1_accuracy, quantized_file, caffe_bin, mod ' -w pre-trained-fp32 weights ' \ ' -m typology ' \ ' -i iterations ' \ - ' -t resnet|inceptionv3|ssd\n ' + ' -l acceptable accuracy loss value, the default value is 0.01(stands for 1%)' \ + ' -d 1(0 means classification while 1 means detection, the default value is 0' \ + ' -n blob name which means accuracy.\n ' parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-h', '--help', action='help', help=usage_string) parser.add_argument('-i', '--iterations', action='store', dest='iterations', default=10, - help='iterations') + help='equal to the number to complete one epoch.') parser.add_argument('-w', '--weights', action='store', dest='weights', default='', - help='pre-trained-fp32-weights') + help='pre-trained-fp32-weights.') parser.add_argument('-m', '--model', action='store', dest='model', default='', - help='model') + help='topology definition prototxt.') parser.add_argument('-l', '--accuracy_loss', action='store', dest='loss', default=0.01, - help='accuracy-loss') + help='the acceptable accuracy loss that raised by 8-Bit quantization, ' + 'default value is 0.01(1%).') - parser.add_argument('-t', '--type', action='store', dest='input_model_type', default='', - help='model type') + parser.add_argument('-d', '--detection', action='store', dest='is_detection', default=0, + help='0 for classification while 1 for detection, default value is 0.') parser.add_argument('-r', '--root', action='store', dest='root', default='', help='caffe build path') - + + parser.add_argument('-n', '--blob_name', action='store', dest='blob_name', default='', + help='top blob name which stands for accuracy') params = parser.parse_args() try: - iterations = int(params.iterations) + user_input_iterations = int(params.iterations) except: print 'Set the iterations to the default value 1000' - iterations = 1000 + user_input_iterations = 1000 try: toleration = float(params.loss) @@ -304,41 +372,41 @@ def tuning_quantized_topology(base_top1_accuracy, quantized_file, caffe_bin, mod print 'Set the toleration to 1%.' toleration = 0.01 + try: + detection_flag = 1 if int(params.is_detection) == 1 else 0 + except: + print 'Set the test type to classification.' + detection_flag = 0 + model = os.path.abspath(params.model) - weights = os.path.abspath(params.weights) + user_input_weights = os.path.abspath(params.weights) sample = os.path.abspath(params.root + 'tools/sample') - caffe_bin = os.path.abspath(params.root + 'tools/caffe') + caffe_bin_path = os.path.abspath(params.root + 'tools/caffe') setup_env() - if params.input_model_type == 'resnet': - model_type = 1 - elif params.input_model_type == 'inceptionv3': - model_type = 2 - elif params.input_model_type == 'ssd': - model_type = 3 - else: - print 'Invalid model type!' + if not check_existence(model) or not check_existence(user_input_weights) or not check_existence(sample) \ + or not check_existence(caffe_bin_path): + print 'Please check model/weights/sample existence.' sys.exit(-1) - if check_existence(model) is False or check_existence(weights) is False or check_existence(sample) is False or \ - check_existence(caffe_bin) is False: - print 'Please check model/weights/sample existence.' + target_blob_name = params.blob_name + if not target_blob_name or not check_blob_name_existence(model, target_blob_name): + print 'Please specify valid blob name and rerun the script.' sys.exit(-1) sys.path.insert(0, params.root + '../python') quantized_prototxt = model.rsplit('.')[0] + '_quantized.prototxt' - quantized_weights = weights.rsplit('.')[0] + '_quantized.caffemodel' + quantized_weights = user_input_weights.rsplit('.')[0] + '_quantized.caffemodel' enable_floating_point = 0 print 'Sampling...' - generate_sample(sample, model, weights, - quantized_prototxt, model_type, 10, 100 * toleration, enable_floating_point) - + generate_sample(sample, model, user_input_weights, + quantized_prototxt, detection_flag, 10, 100 * toleration, enable_floating_point) print 'Sampling done' print 'Generating the FP32 accuracy...' - top_1 = get_the_accuracy(caffe_bin, model, weights, iterations, model_type) + top_1 = get_the_accuracy(caffe_bin_path, model, user_input_weights, user_input_iterations, detection_flag, + target_blob_name) print 'FP32 accuracy is: {}'.format(top_1) - - tuning_quantized_topology(top_1, quantized_prototxt, caffe_bin, weights, iterations, enable_floating_point, - toleration, model_type) + tuning_quantized_topology(top_1, quantized_prototxt, caffe_bin_path, user_input_weights, user_input_iterations, + enable_floating_point, toleration, detection_flag, target_blob_name) print 'Updated prototxt {} is generated.'.format(quantized_prototxt)