{"id":785,"date":"2024-06-24T09:25:20","date_gmt":"2024-06-24T01:25:20","guid":{"rendered":"http:\/\/ai.gitpp.com\/?p=785"},"modified":"2024-06-24T09:25:20","modified_gmt":"2024-06-24T01:25:20","slug":"%e5%ae%9e%e6%88%98-%e9%80%9a%e8%bf%87%e5%be%ae%e8%b0%83segformer%e6%94%b9%e8%bf%9b%e8%bd%a6%e9%81%93%e6%a3%80%e6%b5%8b%e6%95%88%e6%9e%9c%ef%bc%88%e6%95%b0%e6%8d%ae%e9%9b%86-%e6%ba%90%e7%a0%81","status":"publish","type":"post","link":"http:\/\/ai.gitpp.com\/index.php\/2024\/06\/24\/%e5%ae%9e%e6%88%98-%e9%80%9a%e8%bf%87%e5%be%ae%e8%b0%83segformer%e6%94%b9%e8%bf%9b%e8%bd%a6%e9%81%93%e6%a3%80%e6%b5%8b%e6%95%88%e6%9e%9c%ef%bc%88%e6%95%b0%e6%8d%ae%e9%9b%86-%e6%ba%90%e7%a0%81\/","title":{"rendered":"\u5b9e\u6218 | \u901a\u8fc7\u5fae\u8c03SegFormer\u6539\u8fdb\u8f66\u9053\u68c0\u6d4b\u6548\u679c\uff08\u6570\u636e\u96c6 + \u6e90\u7801\uff09"},"content":{"rendered":"\n<p><\/p>\n\n\n\n<p>\u539f\u6587\u94fe\u63a5\uff1a<\/p>\n\n\n\n<figure class=\"wp-block-embed is-type-wp-embed is-provider-learnopencv wp-block-embed-learnopencv\"><div class=\"wp-block-embed__wrapper\">\n<blockquote class=\"wp-embedded-content\" data-secret=\"jiIbhMvZvM\"><a href=\"https:\/\/learnopencv.com\/segformer-fine-tuning-for-lane-detection\/\">SegFormer \ud83e\udd17 : Fine-Tuning for Improved Lane Detection in Autonomous Vehicles<\/a><\/blockquote><iframe loading=\"lazy\" class=\"wp-embedded-content\" sandbox=\"allow-scripts\" security=\"restricted\" style=\"position: absolute; clip: rect(1px, 1px, 1px, 1px);\" title=\"&#8220;SegFormer \ud83e\udd17 : Fine-Tuning for Improved Lane Detection in Autonomous Vehicles&#8221; &#8212; LearnOpenCV\" src=\"https:\/\/learnopencv.com\/segformer-fine-tuning-for-lane-detection\/embed\/#?secret=yedOrIt35f#?secret=jiIbhMvZvM\" data-secret=\"jiIbhMvZvM\" width=\"600\" height=\"338\" frameborder=\"0\" marginwidth=\"0\" marginheight=\"0\" scrolling=\"no\"><\/iframe>\n<\/div><\/figure>\n\n\n\n<p><\/p>\n\n\n\n<p><strong>\u80cc\u666f\u4ecb\u7ecd<\/strong><\/p>\n\n\n\n<p><strong>&nbsp;&nbsp;&nbsp; SegFormer\uff1a<\/strong>\u5b9e\u4f8b\u5206\u5272\u5728\u81ea\u52a8\u9a7e\u9a76\u6c7d\u8f66\u6280\u672f\u7684\u5feb\u901f\u53d1\u5c55\u4e2d\u53d1\u6325\u4e86\u5173\u952e\u4f5c\u7528\u3002\u5bf9\u4e8e\u4efb\u4f55\u5728\u9053\u8def\u4e0a\u884c\u9a76\u7684\u8f66\u8f86\u6765\u8bf4\uff0c\u8f66\u9053\u68c0\u6d4b\u90fd\u662f\u5fc5\u4e0d\u53ef\u5c11\u7684\u3002\u8f66\u9053\u662f\u9053\u8def\u4e0a\u7684\u6807\u8bb0\uff0c\u6709\u52a9\u4e8e\u533a\u5206\u9053\u8def\u4e0a\u53ef\u884c\u9a76\u533a\u57df\u548c\u4e0d\u53ef\u884c\u9a76\u533a\u57df\u3002\u8f66\u9053\u68c0\u6d4b\u7b97\u6cd5\u6709\u5f88\u591a\u79cd\uff0c\u6bcf\u79cd\u7b97\u6cd5\u90fd\u6709\u5404\u81ea\u7684\u4f18\u7f3a\u70b9\u3002<img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_png\/rDAib0gF5Ojbibn9HxIPYIQicJ6Ux63PtKQ9fuDrtWCTTLPeZjmnCT01x9XK5mJZ7dU4Gdu6YyP3hyBq6kTouHMyw\/640?wx_fmt=png&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u4f7f\u7528Berkeley Deep Drive\u6570\u636e\u96c6\u5bf9HuggingFace\uff08Enze Xie\u3001Wenhai Wang\u3001Zhiding Yu \u7b49\u4eba\uff09\u4e2d\u975e\u5e38\u8457\u540d\u7684SegFormer \u6a21\u578b\u8fdb\u884c\u5fae\u8c03\uff0c\u4ee5\u5bf9\u8f66\u8f86\u7684POV\u89c6\u9891\u8fdb\u884c\u8f66\u9053\u68c0\u6d4b\u3002\u6b64\u5b9e\u9a8c\u751a\u81f3\u9002\u7528\u4e8e\u5904\u7406\u8d77\u6765\u5f88\u590d\u6742\u7684\u591c\u95f4\u9a7e\u9a76\u573a\u666f\u3002<\/p>\n\n\n\n<p><strong>\u8f66\u9053\u68c0\u6d4b\u5728ADAS\u4e2d\u7684\u4f5c\u7528<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u603b\u4f53\u800c\u8a00\uff0c\u8f66\u9053\u68c0\u6d4b\u5bf9ADAS\u7cfb\u7edf\u4ea7\u751f\u4e86\u6df1\u8fdc\u5f71\u54cd\u3002\u8ba9\u6211\u4eec\u5728\u8fd9\u91cc\u63a2\u8ba8\u5176\u4e2d\u7684\u51e0\u4e2a\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>\u8f66\u9053\u4fdd\u6301\uff1a<\/strong>\u9664\u4e86\u8b66\u544a\u7cfb\u7edf\u4e4b\u5916\uff0c\u8f66\u9053\u68c0\u6d4b\u4e5f\u662f\u8f66\u9053\u4fdd\u6301\u8f85\u52a9 (LKA) \u6280\u672f\u4e0d\u53ef\u6216\u7f3a\u7684\u4e00\u90e8\u5206\uff0c\u5b83\u4e0d\u4ec5\u53ef\u4ee5\u63d0\u9192\u9a7e\u9a76\u5458\uff0c\u8fd8\u53ef\u4ee5\u91c7\u53d6\u7ea0\u6b63\u63aa\u65bd\uff0c\u4f8b\u5982\u8f7b\u67d4\u7684\u8f6c\u5411\u5e72\u9884\uff0c\u4ee5\u4f7f\u8f66\u8f86\u4fdd\u6301\u5728\u8f66\u9053\u4e2d\u592e\u3002<\/li>\n\n\n\n<li><strong>\u4ea4\u901a\u6d41\u5206\u6790\uff1a<\/strong>\u8f66\u9053\u68c0\u6d4b\u4f7f\u8f66\u8f86\u80fd\u591f\u4e86\u89e3\u9053\u8def\u51e0\u4f55\u5f62\u72b6\uff0c\u8fd9\u5728\u5408\u5e76\u548c\u53d8\u9053\u7b49\u590d\u6742\u9a7e\u9a76\u573a\u666f\u4e2d\u81f3\u5173\u91cd\u8981\uff0c\u5e76\u4e14\u5bf9\u4e8e\u6839\u636e\u5468\u56f4\u4ea4\u901a\u6d41\u91cf\u8c03\u6574\u901f\u5ea6\u7684\u81ea\u9002\u5e94\u5de1\u822a\u63a7\u5236\u7cfb\u7edf\u81f3\u5173\u91cd\u8981\u3002<\/li>\n\n\n\n<li><strong>\u81ea\u52a8\u5bfc\u822a\uff1a<\/strong>\u5bf9\u4e8e\u534a\u81ea\u52a8\u6216\u81ea\u52a8\u9a7e\u9a76\u6c7d\u8f66\uff0c\u8f66\u9053\u68c0\u6d4b\u662f\u4f7f\u8f66\u8f86\u80fd\u591f\u5728\u9053\u8def\u57fa\u7840\u8bbe\u65bd\u5185\u5bfc\u822a\u548c\u4fdd\u6301\u5176\u4f4d\u7f6e\u7684\u57fa\u672c\u7ec4\u4ef6\u3002\u5b83\u5bf9\u4e8e\u81ea\u52a8\u9a7e\u9a76\u7b97\u6cd5\u4e2d\u7684\u8def\u7ebf\u89c4\u5212\u548c\u51b3\u7b56\u8fc7\u7a0b\u81f3\u5173\u91cd\u8981\u3002<\/li>\n\n\n\n<li><strong>\u9a7e\u9a76\u8212\u9002\u5ea6\uff1a<\/strong>\u4f7f\u7528\u8f66\u9053\u68c0\u6d4b\u7684\u7cfb\u7edf\u53ef\u4ee5\u63a5\u7ba1\u90e8\u5206\u9a7e\u9a76\u4efb\u52a1\uff0c\u51cf\u5c11\u9a7e\u9a76\u5458\u75b2\u52b3\uff0c\u63d0\u4f9b\u66f4\u8212\u9002\u7684\u9a7e\u9a76\u4f53\u9a8c\uff0c\u5c24\u5176\u662f\u5728\u9ad8\u901f\u516c\u8def\u957f\u9014\u884c\u9a76\u65f6\u3002<\/li>\n\n\n\n<li><strong>\u9053\u8def\u72b6\u51b5\u76d1\u6d4b\uff1a<\/strong>\u8f66\u9053\u68c0\u6d4b\u7cfb\u7edf\u4e5f\u6709\u52a9\u4e8e\u76d1\u6d4b\u9053\u8def\u72b6\u51b5\u3002\u4f8b\u5982\uff0c\u5982\u679c\u7cfb\u7edf\u6301\u7eed\u68c0\u6d4b\u5230\u8f66\u9053\u6807\u8bb0\u4e0d\u6e05\u6670\u6216\u6839\u672c\u6ca1\u6709\u8f66\u9053\u6807\u8bb0\uff0c\u5219\u53ef\u4ee5\u53cd\u9988\u6b64\u4fe1\u606f\u4ee5\u7528\u4e8e\u57fa\u7840\u8bbe\u65bd\u7ef4\u62a4\u548c\u6539\u8fdb\u3002<\/li>\n<\/ul>\n\n\n\n<p><strong>\u4f2f\u514b\u5229Deep Drive\u6570\u636e\u96c6<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp; Berkeley Deep Drive 100K (BDD100K) \u6570\u636e\u96c6\u662f\u4ece\u5404\u4e2a\u57ce\u5e02\u548c\u90ca\u533a\u6536\u96c6\u7684\u5404\u79cd\u9a7e\u9a76\u89c6\u9891\u5e8f\u5217\u7684\u7efc\u5408\u96c6\u5408\u3002\u5176\u4e3b\u8981\u7528\u4e8e\u4fc3\u8fdb\u81ea\u52a8\u9a7e\u9a76\u7684\u7814\u7a76\u548c\u5f00\u53d1\u3002\u8be5\u6570\u636e\u96c6\u975e\u5e38\u5e9e\u5927\uff0c\u5305\u542b\u7ea6100,000 \u4e2a\u89c6\u9891\uff0c\u6bcf\u4e2a\u89c6\u9891\u65f6\u957f 40 \u79d2\uff0c\u6db5\u76d6\u5404\u79cd\u9a7e\u9a76\u573a\u666f\u3001\u5929\u6c14\u6761\u4ef6\u548c\u4e00\u5929\u4e2d\u7684\u65f6\u95f4\u3002BDD100K \u6570\u636e\u96c6\u4e2d\u7684\u6bcf\u4e2a\u89c6\u9891\u90fd\u9644\u6709\u4e00\u7ec4\u4e30\u5bcc\u7684\u5e27\u7ea7\u6ce8\u91ca\u3002\u8fd9\u4e9b\u6ce8\u91ca\u5305\u62ec\u8f66\u9053\u3001\u53ef\u9a7e\u9a76\u533a\u57df\u3001\u7269\u4f53\uff08\u5982\u8f66\u8f86\u3001\u884c\u4eba\u548c\u4ea4\u901a\u6807\u5fd7\uff09\u7684\u6807\u7b7e\u4ee5\u53ca\u5168\u5e27\u5b9e\u4f8b\u5206\u5272\u3002\u6570\u636e\u96c6\u7684\u591a\u6837\u6027\u5bf9\u4e8e\u5f00\u53d1\u5f3a\u5927\u7684\u8f66\u9053\u68c0\u6d4b\u7b97\u6cd5\u81f3\u5173\u91cd\u8981\uff0c\u56e0\u4e3a\u5b83\u53ef\u4ee5\u5c06\u6a21\u578b\u66b4\u9732\u7ed9\u5404\u79cd\u8f66\u9053\u6807\u8bb0\u3001\u9053\u8def\u7c7b\u578b\u548c\u73af\u5883\u6761\u4ef6\u3002<img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_png\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzWfkN6kNqiaM5HYOOMLE9GmM0sJTZDia27Xxmh0mDZ0HtFDHlXtKwFWWA\/640?wx_fmt=png&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u672c\u6587\u4e2d\uff0c BDD100K \u6570\u636e\u96c6\u768410% \u6837\u672c\u7528\u4e8e\u5fae\u8c03 SegFormer \u6a21\u578b\u3002\u8fd9\u79cd\u5b50\u91c7\u6837\u65b9\u6cd5\u5141\u8bb8\u66f4\u6613\u4e8e\u7ba1\u7406\u7684\u6570\u636e\u96c6\u5927\u5c0f\uff0c\u540c\u65f6\u4fdd\u6301\u6574\u4e2a\u6570\u636e\u96c6\u4e2d\u5b58\u5728\u7684\u6574\u4f53\u591a\u6837\u6027\u7684\u4ee3\u8868\u6027\u5b50\u96c6\u300210% \u7684\u6837\u672c\u5305\u62ec10,000 \u5f20\u56fe\u50cf\uff0c\u8fd9\u4e9b\u56fe\u50cf\u662f\u7ecf\u8fc7\u7cbe\u5fc3\u6311\u9009\u4ee5\u4ee3\u8868\u6570\u636e\u96c6\u7684\u5168\u9762\u9a7e\u9a76\u6761\u4ef6\u548c\u573a\u666f\u3002&nbsp;&nbsp;&nbsp;&nbsp;\u8ba9\u6211\u4eec\u770b\u4e00\u4e0b\u793a\u4f8b\u6570\u636e\u96c6\u4e2d\u7684\u4e00\u4e9b\u793a\u4f8b\u56fe\u50cf\u548c\u6807\u6ce8\u63a9\u7801\uff1a<img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_png\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnziaSEa7WJaaj0Tg25tib5ezGYOcBJtP6GEGtjABSKsUjC8nc56a68bJZQ\/640?wx_fmt=png&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_png\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzJuAKe71dVIf3oqxEKiaiaxibqtIKyjqLtkEVnvAia72deG4TodYKXOA53Q\/640?wx_fmt=png&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_png\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzmbmFR9IrhaP0rL3ukicVAejPvoPodOAdX8QnRNBDzuOd3j11wl1yiaEQ\/640?wx_fmt=png&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4ece\u4e0a\u56fe\u53ef\u4ee5\u770b\u51fa\uff0c\u5bf9\u4e8eBDD\u6570\u636e\u96c6\u4e2d\u7684\u6bcf\u4e2a\u56fe\u50cf\uff0c\u90fd\u6709\u4e00\u4e2a\u6709\u6548\u7684\u771f\u5b9e\u4e8c\u8fdb\u5236\u63a9\u7801\uff0c\u53ef\u534f\u52a9\u5b8c\u6210\u8f66\u9053\u68c0\u6d4b\u4efb\u52a1\u3002\u8fd9\u53ef\u4ee5\u89c6\u4e3a\u4e00\u4e2a2 \u7c7b\u5206\u5272\u95ee\u9898\uff0c\u5176\u4e2d\u8f66\u9053\u7531\u4e00\u4e2a\u7c7b\u8868\u793a\uff0c\u80cc\u666f\u662f\u53e6\u4e00\u4e2a\u7c7b\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8bad\u7ec3\u96c6\u67097000\u5f20\u56fe\u50cf\u548c\u63a9\u7801\uff0c\u6709\u6548\u96c6\u6709\u5927\u7ea63000\u5f20\u56fe\u50cf\u548c\u63a9\u7801\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u63a5\u4e0b\u6765\uff0c\u8ba9\u6211\u4eec\u4e3a\u8fd9\u4e2a\u5b9e\u9a8c\u6784\u5efa\u8bad\u7ec3\u7ba1\u9053\u3002&nbsp;<\/p>\n\n\n\n<p><strong>\u4ee3\u7801\u6f14\u7ec3<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u63a2\u8ba8\u4f7f\u7528 BDD \u6570\u636e\u96c6\u5fae\u8c03HuggingFace SegFormer \u6a21\u578b\uff08\u672c\u6587\u8fd8\u89e3\u91ca\u4e86\u5185\u90e8\u67b6\u6784\uff09\u6240\u6d89\u53ca\u7684\u5404\u79cd\u8fc7\u7a0b\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;<strong>&nbsp;&nbsp;\u5148\u51b3\u6761\u4ef6<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;&#8216;BDDDataset&#8217; \u7c7b\u7684\u4e3b\u8981\u76ee\u7684\u662f\u9ad8\u6548\u5730\u4ece\u6307\u5b9a\u76ee\u5f55\u52a0\u8f7d\u548c\u9884\u5904\u7406\u56fe\u50cf\u6570\u636e\u53ca\u5176\u76f8\u5e94\u7684\u5206\u5272\u63a9\u7801\u3002\u5b83\u8d1f\u8d23\u4ee5\u4e0b\u529f\u80fd\uff1a&nbsp;<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u4f7f\u7528\u8def\u5f84\u52a0\u8f7d\u56fe\u50cf\u53ca\u5176\u5bf9\u5e94\u7684\u8499\u7248\u3002<\/li>\n\n\n\n<li>\u56fe\u50cf\u8f6c\u6362\u4e3a RGB \u683c\u5f0f\uff0c\u800c\u8499\u7248\u8f6c\u6362\u4e3a\u7070\u5ea6\uff08\u5355\u901a\u9053\uff09\u3002<\/li>\n\n\n\n<li>\u7136\u540e\u5c06\u63a9\u7801\u8f6c\u6362\u4e3a\u4e8c\u8fdb\u5236\u683c\u5f0f\uff0c\u5176\u4e2d\u975e\u96f6\u50cf\u7d20\u88ab\u89c6\u4e3a\u8f66\u9053\u7684\u4e00\u90e8\u5206\uff08\u5047\u8bbe\u8f66\u9053\u5206\u5272\u4efb\u52a1\uff09\u3002<\/li>\n\n\n\n<li>\u5c06\u8499\u7248\u8c03\u6574\u5927\u5c0f\u4ee5\u5339\u914d\u56fe\u50cf\u5c3a\u5bf8\uff0c\u7136\u540e\u8f6c\u6362\u4e3a\u5f20\u91cf\u3002<\/li>\n\n\n\n<li>\u6700\u540e\uff0c\u5c06\u63a9\u7801\u9608\u503c\u5316\u56de\u4e8c\u8fdb\u5236\u503c\u5e76\u8f6c\u6362\u4e3a LongTensor\uff0c\u9002\u5408 PyTorch \u4e2d\u7684\u5206\u5272\u4efb\u52a1\u3002<\/li>\n<\/ul>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code>class BDDDataset(Dataset):<\/code><code>    def __init__(self, images_dir, masks_dir, transform=None):<\/code><code>        self.images_dir = images_dir<\/code><code>        self.masks_dir = masks_dir<\/code><code>        self.transform = transform<\/code><code>        self.images = [img for img in os.listdir(images_dir) if img.endswith('.jpg')]<\/code><code>        self.masks = [mask.replace('.jpg', '.png') for mask in self.images]<\/code><code> <\/code><code>    def __len__(self):<\/code><code>        return len(self.images)<\/code><code> <\/code><code>    def __getitem__(self, idx):<\/code><code>        image_path = os.path.join(self.images_dir, self.images[idx])<\/code><code>        mask_path = os.path.join(self.masks_dir, self.masks[idx])<\/code><code>        image = Image.open(image_path).convert(\"RGB\")<\/code><code>        mask = Image.open(mask_path).convert('L')  <em># Convert mask to grayscale<\/em><\/code><code>         <\/code><code>        <em># Convert mask to binary format with 0 and 1 values<\/em><\/code><code>        mask = np.array(mask)<\/code><code>        mask = (mask &gt; 0).astype(np.uint8)  <em># Assuming non-zero pixels are lanes<\/em><\/code><code>         <\/code><code>        <em># Convert to PIL Image for consistency in transforms<\/em><\/code><code>        mask = Image.fromarray(mask)<\/code><code> <\/code><code>        if self.transform:<\/code><code>            image = self.transform(image)<\/code><code>            <em># Assuming to_tensor transform is included which scales pixel values between 0-1<\/em><\/code><code>            <em># mask = to_tensor(mask)  # Convert the mask to [0, 1] range<\/em><\/code><code>        mask = TF.functional.resize(img=mask, size=[360, 640], interpolation=Image.NEAREST)<\/code><code>        mask = TF.functional.to_tensor(mask)<\/code><code>        mask = (mask &gt; 0).long()  <em># Threshold back to binary and convert to LongTensor<\/em><\/code><code> <\/code><code>        return image, mask<\/code><\/pre>\n\n\n\n<p><strong>\u6570\u636e\u52a0\u8f7d\u5668\u5b9a\u4e49\u548c\u521d\u59cb\u5316<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4f7f\u7528\u4e4b\u524d\u521b\u5efa\u7684\u201cBDDDataset\u201d\u7c7b\uff0c\u6211\u4eec\u9700\u8981\u5b9a\u4e49\u548c\u521d\u59cb\u5316\u6570\u636e\u52a0\u8f7d\u5668\u3002\u4e3a\u6b64\uff0c\u5fc5\u987b\u521b\u5efa\u4e24\u4e2a\u5355\u72ec\u7684\u6570\u636e\u52a0\u8f7d\u5668\uff0c\u4e00\u4e2a\u7528\u4e8e\u8bad\u7ec3\u96c6\uff0c\u53e6\u4e00\u4e2a\u7528\u4e8e\u9a8c\u8bc1\u96c6\u3002\u8bad\u7ec3\u6570\u636e\u52a0\u8f7d\u5668\u8fd8\u9700\u8981\u4e00\u4e9b\u8f6c\u6362\u3002\u4e0b\u9762\u7684\u4ee3\u7801\u7247\u6bb5\u53ef\u7528\u4e8e\u6b64\u76ee\u7684\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Define the appropriate transformations<\/em><\/code><code>transform = TF.Compose([<\/code><code>    TF.Resize((360, 640)),<\/code><code>    TF.ToTensor(),<\/code><code>    TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])<\/code><code>])<\/code><code> <\/code><code><em># Create the dataset<\/em><\/code><code>train_dataset = BDDDataset(images_dir='deep_drive_10K\/train\/images',<\/code><code>                           masks_dir='deep_drive_10K\/train\/masks',<\/code><code>                           transform=transform)<\/code><code> <\/code><code>valid_dataset = BDDDataset(images_dir='deep_drive_10K\/valid\/images',<\/code><code>                           masks_dir='deep_drive_10K\/valid\/masks',<\/code><code>                           transform=transform)<\/code><code> <\/code><code><em># Create the data loaders<\/em><\/code><code>train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=6)<\/code><code>valid_loader&nbsp;=&nbsp;DataLoader(valid_dataset,&nbsp;batch_size=4,&nbsp;shuffle=False,&nbsp;num_workers=6)<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u8ba9\u6211\u4eec\u770b\u4e00\u4e0b\u8be5\u7ba1\u9053\u4e2d\u4f7f\u7528\u7684\u8f6c\u6362\u3002&nbsp;<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>TF.Resize((360, 640))\uff1a<\/strong>\u5c06\u56fe\u50cf\u5927\u5c0f\u8c03\u6574\u4e3a 360\u00d7640 \u50cf\u7d20\u7684\u7edf\u4e00\u5927\u5c0f\u3002<\/li>\n\n\n\n<li><strong>TF.ToTensor()\uff1a<\/strong>\u5c06\u56fe\u50cf\u8f6c\u6362\u4e3a PyTorch \u5f20\u91cf\u3002<\/li>\n\n\n\n<li><strong>TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\uff1a<\/strong>\u4f7f\u7528\u6307\u5b9a\u7684\u5e73\u5747\u503c\u548c\u6807\u51c6\u5dee\u5bf9\u56fe\u50cf\u8fdb\u884c\u5f52\u4e00\u5316\uff0c\u8fd9\u4e9b\u5e73\u5747\u503c\u548c\u6807\u51c6\u5dee\u901a\u5e38\u6765\u81ea ImageNet \u6570\u636e\u96c6\u3002\u6b64\u6b65\u9aa4\u5bf9\u4e8e\u5728ImageNet\u4e0a\u9884\u8bad\u7ec3\u7684\u6a21\u578b\u81f3\u5173\u91cd\u8981\u3002<\/li>\n<\/ul>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u6839\u636e\u81ea\u5df1\u7684\u8ba1\u7b97\u8d44\u6e90\uff0c\u60a8\u53ef\u80fd\u5e0c\u671b\u8c03\u6574\u201cbatch_size\u201d\u548c\u201cnum_workers\u201d\u7b49\u53c2\u6570\u3002<\/p>\n\n\n\n<p><strong>HuggingFace SegFormer \ud83e\udd17 \u6a21\u578b\u521d\u59cb\u5316<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Load the pre-trained model<\/em><\/code><code>model = SegformerForSemanticSegmentation.from_pretrained('nvidia\/segformer-b2-finetuned-ade-512-512')<\/code><code> <\/code><code><em># Adjust the number of classes for BDD dataset<\/em><\/code><code>model.config.num_labels = 2  <em># Replace with the actual number of classes<\/em><\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e0a\u9762\u7684\u4ee3\u7801\u7247\u6bb5\u521d\u59cb\u5316\u4e86 HuggingFace \u9884\u8bad\u7ec3\u8bed\u4e49\u5206\u5272\u6a21\u578b\u5e93\u4e2d\u7684 SegFormer-b2 \u6a21\u578b\u3002\u7531\u4e8e\u6211\u4eec\u8bd5\u56fe\u5c06\u8f66\u9053\u4ece\u9053\u8def\u4e2d\u5206\u5272\u51fa\u6765\uff0c\u56e0\u6b64\u8fd9\u5c06\u88ab\u89c6\u4e3a 2 \u7c7b\u5206\u5272\u95ee\u9898\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Check for CUDA acceleration<\/em><\/code><code>device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')<\/code><code>model.to(device);<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u6b64\u8fc7\u7a0b\u4e2d\uff0c\u8bf7\u68c0\u67e5\u60a8\u7684\u6df1\u5ea6\u5b66\u4e60\u73af\u5883\u662f\u5426\u652f\u6301\u4f7f\u7528 Nvidia GPU \u7684CUDA \u52a0\u901f\u3002\u5728\u6b64\u5b9e\u9a8c\u4e2d\uff0c\u4f7f\u7528\u914d\u590712GB vRAM\u7684Nvidia RTX 3080 Ti\u8fdb\u884c\u8bad\u7ec3\u3002<\/p>\n\n\n\n<p><strong>\u8bad\u7ec3\u548c\u9a8c\u8bc1<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u672c\u8282\u4e2d\uff0c\u8ba9\u6211\u4eec\u770b\u4e00\u4e0b\u5fae\u8c03\u6b64\u6a21\u578b\u6240\u9700\u7684\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6d41\u7a0b\u3002\u4f46\u5728\u6b64\u4e4b\u524d\uff0c\u60a8\u5c06\u5982\u4f55\u8bc4\u4f30\u6b64\u6a21\u578b\u7684\u6027\u80fd\uff1f<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5bf9\u4e8e\u50cf\u8fd9\u6837\u7684\u8bed\u4e49\u5206\u5272\u95ee\u9898\uff0cIoU\uff08\u6216\uff09\u5e76\u96c6\u4ea4\u96c6\u662f\u8bc4\u4f30\u7684\u4e3b\u8981\u6307\u6807\u3002\u8fd9\u6709\u52a9\u4e8e\u6211\u4eec\u4e86\u89e3\u9884\u6d4b\u63a9\u7801\u4e0e GT \u63a9\u7801\u7684\u91cd\u53e0\u7a0b\u5ea6\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code>def mean_iou(preds, labels, num_classes):<\/code><code>    <em># Flatten predictions and labels<\/em><\/code><code>    preds_flat = preds.view(-1)<\/code><code>    labels_flat = labels.view(-1)<\/code><code> <\/code><code>    <em># Check that the number of elements in the flattened predictions<\/em><\/code><code>    <em># and labels are equal<\/em><\/code><code>    if preds_flat.shape[0] != labels_flat.shape[0]:<\/code><code>        raise ValueError(f\"Predictions and labels have mismatched shapes: \"<\/code><code>                         f\"{preds_flat.shape} vs {labels_flat.shape}\")<\/code><code> <\/code><code>    <em># Calculate the Jaccard score for each class<\/em><\/code><code>    iou = jaccard_score(labels_flat.cpu().numpy(), preds_flat.cpu().numpy(),<\/code><code>                        average=None, labels=range(num_classes))<\/code><code> <\/code><code>    <em># Return the mean IoU<\/em><\/code><code>    return np.mean(iou)<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e0a\u8ff0\u51fd\u6570\u201cmean_iou\u201d\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a&nbsp;<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>\u6241\u5e73\u5316\u9884\u6d4b\u548c\u6807\u7b7e\uff1a<\/strong>\u4f7f\u7528 .view(-1) \u65b9\u6cd5\u6241\u5e73\u5316\u9884\u6d4b\u548c\u6807\u7b7e\u3002\u9700\u8981\u8fdb\u884c\u8fd9\u79cd\u91cd\u5851\uff0c\u4ee5\u4fbf\u9010\u50cf\u7d20\u6bd4\u8f83\u6bcf\u4e2a\u9884\u6d4b\u4e0e\u5176\u5bf9\u5e94\u7684\u6807\u7b7e\u3002<\/li>\n\n\n\n<li><strong>\u5f62\u72b6\u9a8c\u8bc1\uff1a<\/strong>\u8be5\u51fd\u6570\u68c0\u67e5 preds_flat \u548c labels_flat \u4e2d\u7684\u5143\u7d20\u6570\u91cf\u662f\u5426\u76f8\u7b49\u3002\u8fd9\u662f\u4e00\u9879\u81f3\u5173\u91cd\u8981\u7684\u68c0\u67e5\uff0c\u4ee5\u786e\u4fdd\u6bcf\u4e2a\u9884\u6d4b\u90fd\u5bf9\u5e94\u4e00\u4e2a\u6807\u7b7e\u3002<\/li>\n\n\n\n<li><strong>\u6770\u5361\u5fb7\u5206\u6570\u8ba1\u7b97\uff1a<\/strong>\u4f7f\u7528 jaccard_score \u51fd\u6570\uff08\u901a\u5e38\u6765\u81ea scikit-learn \u7b49\u5e93\uff09\u8ba1\u7b97\u6bcf\u4e2a\u7c7b\u7684\u6770\u5361\u5fb7\u5206\u6570 (IoU)\u3002IoU \u662f\u5728\u6241\u5e73\u9884\u6d4b\u548c\u6807\u7b7e\u4e4b\u95f4\u8ba1\u7b97\u7684\u3002\u5b83\u662f\u9488\u5bf9\u6bcf\u4e2a\u7c7b\u5355\u72ec\u8ba1\u7b97\u7684\uff0c\u5982 average=None \u548c labels=range(num_classes) \u6240\u793a\u3002<\/li>\n\n\n\n<li><strong>\u5e73\u5747 IoU \u8ba1\u7b97\uff1a<\/strong>\u5e73\u5747 IoU \u662f\u901a\u8fc7\u8ba1\u7b97\u6240\u6709\u7c7b\u522b\u7684 IoU \u5206\u6570\u7684\u5e73\u5747\u503c\u6765\u8ba1\u7b97\u7684\u3002\u8fd9\u63d0\u4f9b\u4e86\u4e00\u4e2a\u5355\u4e00\u7684\u6027\u80fd\u6307\u6807\uff0c\u603b\u7ed3\u4e86\u6a21\u578b\u7684\u9884\u6d4b\u4e0e\u6240\u6709\u7c7b\u522b\u7684\u57fa\u672c\u4e8b\u5b9e\u7684\u4e00\u81f4\u7a0b\u5ea6\u3002<\/li>\n<\/ul>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Define the optimizer<\/em><\/code><code>optimizer = AdamW(model.parameters(), lr=5e-5)<\/code><code> <\/code><code><em># Define the learning rate scheduler<\/em><\/code><code>num_epochs = 30<\/code><code>num_training_steps = num_epochs * len(train_loader)<\/code><code>lr_scheduler = get_scheduler(<\/code><code>    \"linear\",<\/code><code>    optimizer=optimizer,<\/code><code>    num_warmup_steps=0,<\/code><code>    num_training_steps=num_training_steps<\/code><code>)<\/code><code> <\/code><code><em># Placeholder for best mean IoU and best model weights<\/em><\/code><code>best_iou = 0.0<\/code><code>best_model_wts = copy.deepcopy(model.state_dict())<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5bf9\u4e8e\u6a21\u578b\u4f18\u5316\uff0c\u6211\u4eec\u4f7f\u7528\u4e86\u8457\u540d\u7684 Adam \u4f18\u5316\u5668\uff0c\u5176 `learning_rate` \u4e3a 5e-5\u3002\u5728\u8fd9\u4e2a\u5b9e\u9a8c\u4e2d\uff0c\u5fae\u8c03\u8fc7\u7a0b\u8fdb\u884c\u4e86 30 \u4e2a `epochs`\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code>for epoch in range(num_epochs):<\/code><code>    model.train()<\/code><code>    train_iterator = tqdm(train_loader, desc=f\"Epoch {epoch + 1}\/{num_epochs}\", unit=\"batch\")<\/code><code>    for batch in train_iterator:<\/code><code>        images, masks = batch<\/code><code>        images = images.to(device)<\/code><code>        masks = masks.to(device).long()  <em># Ensure masks are LongTensors<\/em><\/code><code> <\/code><code>        <em># Remove the channel dimension from the masks tensor<\/em><\/code><code>        masks = masks.squeeze(1)  <em># This changes the shape from [batch, 1, H, W] to [batch, H, W]<\/em><\/code><code>        optimizer.zero_grad()<\/code><code> <\/code><code>        <em># Pass pixel_values and labels to the model<\/em><\/code><code>        outputs = model(pixel_values=images, labels=masks,return_dict=True)<\/code><code>         <\/code><code>        loss = outputs[\"loss\"]<\/code><code>        loss.backward()<\/code><code> <\/code><code>        optimizer.step()<\/code><code>        lr_scheduler.step()<\/code><code>        outputs = F.interpolate(outputs[\"logits\"], size=masks.shape[-2:], mode=\"bilinear\", align_corners=False)<\/code><code>         <\/code><code>        train_iterator.set_postfix(loss=loss.item())<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e0a\u9762\u7684\u4ee3\u7801\u7247\u6bb5\u8bf4\u660e\u4e86\u5fae\u8c03\u8fc7\u7a0b\u7684\u8bad\u7ec3\u5faa\u73af\u3002\u5bf9\u4e8e\u6bcf\u4e2a\u65f6\u671f\uff0c\u5faa\u73af\u90fd\u4f1a\u904d\u5386\u8bad\u7ec3\u6570\u636e\u52a0\u8f7d\u5668\u201ctrain_loader\u201d\uff0c\u5b83\u63d0\u4f9b\u6210\u6279\u7684\u56fe\u50cf\u548c\u63a9\u7801\u5bf9\u3002\u8fd9\u4e9b\u662f\u8f66\u9053\u56fe\u50cf\u53ca\u5176\u76f8\u5e94\u7684\u5206\u5272\u63a9\u7801\u3002\u6bcf\u6279\u56fe\u50cf\u548c\u63a9\u7801\u90fd\u4f1a\u79fb\u52a8\u5230\u8ba1\u7b97\u8bbe\u5907\uff08\u5982 GPU\uff0c\u79f0\u4e3a\u201c\u8bbe\u5907\u201d\uff09\u3002\u63a9\u7801\u5f20\u91cf\u7684\u901a\u9053\u7ef4\u5ea6\u88ab\u79fb\u9664\u4ee5\u5339\u914d\u6a21\u578b\u6240\u9700\u7684\u8f93\u5165\u683c\u5f0f\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u8be5\u6a21\u578b\u6267\u884c\u524d\u5411\u4f20\u9012\uff0c\u63a5\u6536\u56fe\u50cf\u548c\u63a9\u7801\u4f5c\u4e3a\u8f93\u5165\u3002\u5728\u672c\u4f8b\u4e2d\uff0c`pixel_values` \u53c2\u6570\u63a5\u6536\u56fe\u50cf\uff0clabels \u53c2\u6570\u63a5\u6536\u63a9\u7801\u3002\u6a21\u578b\u8f93\u51fa\u5305\u62ec\u635f\u5931\u503c\uff08\u7528\u4e8e\u8bad\u7ec3\uff09\u548c logits\uff08\u539f\u59cb\u9884\u6d4b\uff09\u3002\u6b64\u540e\uff0c\u635f\u5931\u53cd\u5411\u4f20\u64ad\u4ee5\u66f4\u65b0\u6a21\u578b\u7684\u6743\u91cd\u3002\u6b64\u540e\uff0c\u4f18\u5316\u5668\u548c\u5b66\u4e60\u7387\u8c03\u5ea6\u7a0b\u5e8f `lr_scheduler` \u5728\u8bad\u7ec3\u671f\u95f4\u8c03\u6574\u5b66\u4e60\u7387\u548c\u5176\u4ed6\u53c2\u6570\u3002\u4f7f\u7528\u53cc\u7ebf\u6027\u63d2\u503c\u8c03\u6574\u6a21\u578b\u4e2d\u7684 logits \u7684\u5927\u5c0f\u4ee5\u5339\u914d\u63a9\u7801\u7684\u5927\u5c0f\u3002\u6b64\u6b65\u9aa4\u5bf9\u4e8e\u5c06\u6a21\u578b\u7684\u9884\u6d4b\u4e0e\u5730\u9762\u771f\u5b9e\u63a9\u7801\u8fdb\u884c\u6bd4\u8f83\u81f3\u5173\u91cd\u8981\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Evaluation loop for each epoch<\/em><\/code><code>model.eval()<\/code><code>total_iou = 0<\/code><code>num_batches = 0<\/code><code>valid_iterator = tqdm(valid_loader, desc=\"Validation\", unit=\"batch\")<\/code><code>for batch in valid_iterator:<\/code><code>    images, masks = batch<\/code><code>    images = images.to(device)<\/code><code>    masks = masks.to(device).long()<\/code><code> <\/code><code>    with torch.no_grad():<\/code><code><em>        # Get the logits from the model and apply argmax to get the predictions<\/em><\/code><code>        outputs = model(pixel_values=images,return_dict=True)<\/code><code>        outputs = F.interpolate(outputs[\"logits\"], size=masks.shape[-2:], mode=\"bilinear\", align_corners=False)<\/code><code>        preds = torch.argmax(outputs, dim=1)<\/code><code>        preds = torch.unsqueeze(preds, dim=1)<\/code><code> <\/code><code>    preds = preds.view(-1)<\/code><code>    masks = masks.view(-1)<\/code><code><em> <\/em><\/code><code><em>    # Compute IoU<\/em><\/code><code>    iou = mean_iou(preds, masks, model.config.num_labels)<\/code><code>    total_iou += iou<\/code><code>    num_batches += 1<\/code><code>    valid_iterator.set_postfix(mean_iou=iou)<\/code><code> <\/code><code>epoch_iou = total_iou \/ num_batches<\/code><code>print(f\"Epoch {epoch+1}\/{num_epochs} - Mean IoU: {epoch_iou:.4f}\")<\/code><code><em> <\/em><\/code><code><em># Check for improvement<\/em><\/code><code>if epoch_iou &gt; best_iou:<\/code><code>    print(f\"Validation IoU improved from {best_iou:.4f} to {epoch_iou:.4f}\")<\/code><code>    best_iou = epoch_iou<\/code><code>    best_model_wts = copy.deepcopy(model.state_dict())<\/code><code>    torch.save(best_model_wts, 'best_model.pth')<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5bf9\u4e8e\u6b64\u8fc7\u7a0b\u7684\u9a8c\u8bc1\u65b9\u9762\uff0c\u6a21\u578b\u8bbe\u7f6e\u4e3a\u8bc4\u4f30\u6a21\u5f0f (model.eval())\uff0c\u8fd9\u4f1a\u7981\u7528\u4ec5\u5728\u8bad\u7ec3\u671f\u95f4\u4f7f\u7528\u7684\u67d0\u4e9b\u5c42\u548c\u884c\u4e3a\uff08\u5982 dropout\uff09\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u5bf9\u4e8e\u9a8c\u8bc1\u6570\u636e\u96c6\u4e2d\u7684\u6bcf\u4e2a\u6279\u6b21\uff0c\u6a21\u578b\u90fd\u4f1a\u751f\u6210\u9884\u6d4b\u3002\u8fd9\u4e9b\u9884\u6d4b\u4f1a\u8c03\u6574\u5927\u5c0f\u5e76\u8fdb\u884c\u5904\u7406\uff0c\u4ee5\u8ba1\u7b97\u4ea4\u5e76\u6bd4 (IoU) \u6307\u6807\u3002\u8ba1\u7b97\u5e76\u6c47\u603b\u6bcf\u4e2a\u6279\u6b21\u7684\u5e73\u5747 IoU\uff0c\u4ee5\u5f97\u51fa\u8be5\u65f6\u671f\u7684\u5e73\u5747 IoU\u3002\u5728\u6bcf\u4e2a\u65f6\u671f\u4e4b\u540e\uff0c\u5c06 IoU \u4e0e\u4e4b\u524d\u65f6\u671f\u83b7\u5f97\u7684\u6700\u4f73 IoU \u8fdb\u884c\u6bd4\u8f83\u3002\u5982\u679c\u5f53\u524d IoU \u66f4\u9ad8\uff0c\u5219\u8868\u793a\u6709\u6240\u6539\u8fdb\uff0c\u5e76\u4e14\u6a21\u578b\u7684\u72b6\u6001\u5c06\u4fdd\u5b58\u4e3a\u8fc4\u4eca\u4e3a\u6b62\u7684\u6700\u4f73\u6a21\u578b\u3002<\/p>\n\n\n\n<p><strong>\u89c6\u9891\u63a8\u7406<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u597d\u4e86\uff0c\u6211\u4eec\u73b0\u5728\u6709\u4e86\u4e00\u4e2a\u7ecf\u8fc7\u5145\u5206\u5fae\u8c03\u7684 SegFormer\uff0c\u5b83\u4e13\u95e8\u7528\u4e8e\u81ea\u52a8\u9a7e\u9a76\u6c7d\u8f66\u7684\u8f66\u9053\u68c0\u6d4b\u3002\u4f46\u662f\uff0c\u6211\u4eec\u5982\u4f55\u770b\u5f85\u7ed3\u679c\u5462\uff1f\u5728\u672c\u8282\u4e2d\uff0c\u8ba9\u6211\u4eec\u63a2\u7d22\u8fd9\u4e2a\u5b9e\u9a8c\u7684\u63a8\u7406\u90e8\u5206\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u9996\u5148\uff0c\u5fc5\u987b\u52a0\u8f7d\u9884\u5148\u8bad\u7ec3\u7684 SegFormer \u6743\u91cd\u3002\u8fd8\u9700\u8981\u5b9a\u4e49\u7c7b\u7684\u6570\u91cf\u3002\u8fd9\u662f\u4f7f\u7528 `model.config.num_labels=2` \u5b8c\u6210\u7684\uff0c\u56e0\u4e3a\u6211\u4eec\u8981\u5904\u7406 2 \u4e2a\u7c7b\u3002&nbsp;<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4ece\u8fd9\u91cc\u5f00\u59cb\uff0c\u8fd8\u9700\u8981\u52a0\u8f7d\u4e0a\u4e00\u4e2a\u4ee3\u7801\u7247\u6bb5\u5bfc\u51fa\u7684\u201cbest_model.pth\u201d\u6743\u91cd\u6587\u4ef6\u3002\u8fd9\u5305\u542b\u5fae\u8c03\u6a21\u578b\u7684\u6700\u4f73\u8bad\u7ec3\u6743\u91cd\u3002\u6a21\u578b\u5fc5\u987b\u8bbe\u7f6e\u4e3a\u8bc4\u4f30\u6a21\u5f0f\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Load the trained model <\/em><\/code><code>device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')<\/code><code>model = SegformerForSemanticSegmentation.from_pretrained('nvidia\/segformer-b2-finetuned-ade-512-512')<\/code><code> <\/code><code><em># Replace with the actual number of classes<\/em><\/code><code>model.config.num_labels = 2 <\/code><code> <\/code><code><em># Load the state from the fine-tuned model and set to model.eval() mode<\/em><\/code><code>model.load_state_dict(torch.load('segformer_inference-360640-b2\/best_model.pth'))<\/code><code>model.to(device)<\/code><code>model.eval()<\/code><code> <\/code><code><em># Video inference<\/em><\/code><code>cap = cv2.VideoCapture('test-footages\/test-2.mp4')<\/code><code>fourcc = cv2.VideoWriter_fourcc(*'XVID')<\/code><code>out = cv2.VideoWriter('output_video.avi', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e3a\u4e86\u52a0\u8f7d\u548c\u8bfb\u53d6\u89c6\u9891\uff0c\u4f7f\u7528\u4e86 OpenCV\uff0c\u5e76\u4f7f\u7528 `cv2.VideoWriter` \u65b9\u6cd5\u5bfc\u51fa\u6700\u7ec8\u63a8\u7406\u89c6\u9891\uff0c\u5176\u4e2d\u8499\u7248\u4e0e\u6e90\u89c6\u9891\u7247\u6bb5\u91cd\u53e0\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Perform transformations<\/em><\/code><code>data_transforms = TF.Compose([<\/code><code>    TF.ToPILImage(),<\/code><code>    TF.Resize((360, 640)),<\/code><code>    TF.ToTensor(),<\/code><code>    TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])<\/code><code>])<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u9700\u8981\u8bb0\u4f4f\u7684\u4e00\u4ef6\u975e\u5e38\u91cd\u8981\u7684\u4e8b\u60c5\u662f\uff0c\u5728\u6570\u636e\u96c6\u9884\u5904\u7406\u671f\u95f4\u4f7f\u7528\u7684\u76f8\u540c\u201c\u53d8\u6362\u201d\u4e5f\u5fc5\u987b\u5728\u63a8\u7406\u9636\u6bb5\u4f7f\u7528\u3002\u89c6\u9891\u4e2d\u7684\u6bcf\u4e00\u5e27\u90fd\u4f1a\u7ecf\u5386\u4e00\u7cfb\u5217\u53d8\u6362\uff0c\u4ee5\u5339\u914d\u6a21\u578b\u6240\u9700\u7684\u8f93\u5165\u683c\u5f0f\u3002\u8fd9\u4e9b\u53d8\u6362\u5305\u62ec\u8c03\u6574\u5927\u5c0f\u3001\u5f20\u91cf\u8f6c\u6362\u548c\u89c4\u8303\u5316\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n\n\n\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-preformatted\"><code><em># Inference loop <\/em><\/code><code>while(cap.isOpened()):<\/code><code>    ret, frame = cap.read()<\/code><code>    if ret == True:<\/code><code><em>        # Preprocess the frame<\/em><\/code><code>        input_tensor = data_transforms(frame).unsqueeze(0).to(device)<\/code><code>         <\/code><code>        with torch.no_grad():<\/code><code>            outputs = model(pixel_values=input_tensor,return_dict=True)<\/code><code>            outputs = F.interpolate(outputs[\"logits\"], size=(360, 640), mode=\"bilinear\", align_corners=False)<\/code><code>             <\/code><code>            preds = torch.argmax(outputs, dim=1)<\/code><code>            preds = torch.unsqueeze(preds, dim=1)<\/code><code>            predicted_mask = (torch.sigmoid(preds) &gt; 0.5).float()<\/code><code><em> <\/em><\/code><code><em>        # Create an RGB version of the mask to overlay on the original frame<\/em><\/code><code>        mask_np = predicted_mask.cpu().squeeze().numpy()<\/code><code>        mask_resized = cv2.resize(mask_np, (frame.shape[1], frame.shape[0]))<\/code><code><em>         <\/em><\/code><code><em>        # Modify this section to create a green mask<\/em><\/code><code>        mask_rgb = np.zeros((mask_resized.shape[0], mask_resized.shape[1], 3), dtype=np.uint8)<\/code><code>        mask_rgb[:, :, 1] = (mask_resized * 255).astype(np.uint8)  # Set only the green channel<\/code><code><em> <\/em><\/code><code><em>        # Post-processing for mask smoothening<\/em><\/code><code><em>        # Remove noise<\/em><\/code><code>        kernel = np.ones((3,3), np.uint8)<\/code><code>        opening = cv2.morphologyEx(mask_rgb, cv2.MORPH_OPEN, kernel, iterations=2)<\/code><code><em>         <\/em><\/code><code><em>        # Close small holes<\/em><\/code><code>        closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=2)<\/code><code><em> <\/em><\/code><code><em>        # Overlay the mask on the frame<\/em><\/code><code>        blended = cv2.addWeighted(frame, 0.65, closing, 0.6, 0)<\/code><code><em>         <\/em><\/code><code><em>        # Write the blended frame to the output video<\/em><\/code><code>        out.write(blended)<\/code><code>    else:<\/code><code>        break<\/code><code> <\/code><code>cap.release()<\/code><code>out.release()<\/code><code>cv2.destroyAllWindows()<\/code><\/pre>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u63a8\u7406\u5faa\u73af\u4e2d\uff0c\u6bcf\u4e2a\u9884\u5904\u7406\u8fc7\u7684\u5e27\u90fd\u4f1a\u88ab\u8f93\u5165\u5230\u6a21\u578b\u4e2d\u3002\u6a21\u578b\u8f93\u51fa\u5bf9\u6570\uff0c\u7136\u540e\u5c06\u5176\u63d2\u503c\u5230\u539f\u59cb\u5e27\u5927\u5c0f\u5e76\u901a\u8fc7 argmax \u51fd\u6570\u6765\u83b7\u5f97\u9884\u6d4b\u7684\u5206\u5272\u63a9\u7801\u3002\u9608\u503c\u64cd\u4f5c\u5c06\u8fd9\u4e9b\u9884\u6d4b\u8f6c\u6362\u4e3a\u4e8c\u8fdb\u5236\u63a9\u7801\uff0c\u7a81\u51fa\u663e\u793a\u68c0\u6d4b\u5230\u7684\u8f66\u9053\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e3a\u4e86\u66f4\u597d\u5730\u8fdb\u884c\u53ef\u89c6\u5316\uff0c\u4e8c\u8fdb\u5236\u63a9\u7801\u88ab\u8f6c\u6362\u4e3a RGB \u683c\u5f0f\uff0c\u8f66\u9053\u989c\u8272\u4e3a\u7eff\u8272\u3002\u5e94\u7528\u4e00\u4e9b\u540e\u5904\u7406\u6b65\u9aa4\uff08\u5982\u566a\u58f0\u6d88\u9664\u548c\u5b54\u6d1e\u586b\u5145\uff09\u6765\u5e73\u6ed1\u63a9\u7801\u3002\u7136\u540e\u5c06\u6b64\u63a9\u7801\u4e0e\u539f\u59cb\u5e27\u6df7\u5408\u4ee5\u521b\u5efa\u68c0\u6d4b\u5230\u7684\u8f66\u9053\u7684\u89c6\u89c9\u53e0\u52a0\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u6700\u540e\uff0c\u5c06\u6df7\u5408\u540e\u7684\u5e27\u5199\u5165\u8f93\u51fa\u89c6\u9891\u6587\u4ef6\uff0c\u811a\u672c\u7ee7\u7eed\u5bf9\u8f93\u5165\u89c6\u9891\u4e2d\u7684\u6240\u6709\u5e27\u6267\u884c\u6b64\u8fc7\u7a0b\u5e76\u5173\u95ed\u6240\u6709\u6587\u4ef6\u6d41\u3002\u8fd9\u6837\u4f1a\u751f\u6210\u4e00\u4e2a\u8f93\u51fa\u89c6\u9891\uff0c\u5176\u4e2d\u68c0\u6d4b\u5230\u7684\u8f66\u9053\u4f1a\u4ee5\u89c6\u89c9\u65b9\u5f0f\u7a81\u51fa\u663e\u793a\uff0c\u4ece\u800c\u5c55\u793a\u8be5\u6a21\u578b\u5728\u73b0\u5b9e\u573a\u666f\u4e2d\u6267\u884c\u8f66\u9053\u68c0\u6d4b\u7684\u80fd\u529b\u3002<\/p>\n\n\n\n<p><strong>\u5b9e\u9a8c\u7ed3\u679c<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u73b0\u5728\u6765\u770b\u770b\u672c\u6587\u6700\u6709\u8da3\u7684\u90e8\u5206\u2014\u2014\u63a8\u7406\u7ed3\u679c\uff01\u5728\u6700\u540e\u4e00\u90e8\u5206\u4e2d\uff0c\u8ba9\u6211\u4eec\u770b\u4e00\u4e0b\u7ecf\u8fc7\u5fae\u8c03\u7684 HuggingFace SegFormer \u6a21\u578b\u5728\u8f66\u9053\u68c0\u6d4b\u4e2d\u7684\u63a8\u7406\u7ed3\u679c\u3002<img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzvkFq71Za5qyKod332v5F2rIhn2Uhib7Yg2kqllSfEWqNia5K6jpANsRw\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<figure class=\"wp-block-image\"><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzTw546AGmZNtrQNof8d9h8ia4vWztNC7Dav1ibLoGlLRicqZpP56VfhDRA\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"\/><\/figure>\n\n\n\n<p><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1Tnz9bZGUgib3dcibH92U4M8sr6YmEdJPyb4ajuM6xiabFgG6k1rWlusxsdHg\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<figure class=\"wp-block-image\"><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzAZSu3mTd14PLibVOUkA64BJnGBicMURVc6JJSZBcCVdh3sSHSeJ7hR1Q\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"\/><\/figure>\n\n\n\n<p><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1Tnz1IXpNIpfuxfTicwShYZ9wPYFv2lKickwiaftRcazzlBkWAiald8J7eZvKw\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><img decoding=\"async\" src=\"https:\/\/mmbiz.qpic.cn\/sz_mmbiz_gif\/rDAib0gF5OjZ5ktHXsYXZOvDsUVol1TnzibUC7A0VSpAjyjia9PYKpqJkFFM0TOD0AiaBarwzzFZYhCcI9zPX6LIxA\/640?wx_fmt=gif&amp;from=appmsg&amp;tp=webp&amp;wxfrom=5&amp;wx_lazy=1&amp;wx_co=1\" alt=\"\u56fe\u7247\"><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4ece\u4e0a\u9762\u663e\u793a\u7684\u63a8\u7406\u7ed3\u679c\u6765\u770b\uff0c\u6211\u4eec\u53ef\u4ee5\u5f97\u51fa\u7ed3\u8bba\uff0cSegFormer \u5728\u8f66\u9053\u68c0\u6d4b\u65b9\u9762\u6548\u679c\u5f88\u597d\u3002\u6b63\u5982\u672c\u6587\u6240\u8ff0\uff0c&nbsp; SegFormer-b2 \u6a21\u578b\u5728\u5927\u91cf BDD \u6570\u636e\u96c6\u7684\u5b50\u6837\u672c\u4e0a\u8fdb\u884c\u4e86 30 \u4e2a epoch \u7684\u5fae\u8c03\u3002 \u4e3a\u4e86\u589e\u5f3a\u60a8\u7684\u7406\u89e3\u5e76\u4eb2\u624b\u64cd\u4f5c\u4ee3\u7801\uff0c\u8bf7\u5728\u6b64\u5904\u6d4f\u89c8\u4ee3\u7801\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u4e3a\u4e86\u83b7\u5f97\u66f4\u597d\u3001\u66f4\u51c6\u786e\u7684\u7ed3\u679c\uff0c\u5efa\u8bae\u9009\u62e9\u66f4\u5927\u3001\u66f4\u51c6\u786e\u7684SegFormer-b5 \u6a21\u578b\uff0c\u5e76\u53ef\u80fd\u5728\u6574\u4e2a\u6570\u636e\u96c6\u4e0a\u5bf9\u5176\u8fdb\u884c\u66f4\u591a\u6b21\u8bad\u7ec3\u3002<strong>\u7ed3 \u8bba<\/strong><\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u5728\u672c\u6b21\u5b9e\u9a8c\u4e2d\uff0c\u6211\u4eec\u5229\u7528 BDD\uff08Berkeley DeepDrive\uff09\u8f66\u9053\u68c0\u6d4b\u6570\u636e\u96c6\u63d0\u4f9b\u7684\u4e30\u5bcc\u591a\u6837\u7684\u6570\u636e\uff0c\u6210\u529f\u5c55\u793a\u4e86\u5fae\u8c03\u7684 SegFormer \u6a21\u578b\u5728\u8f66\u9053\u68c0\u6d4b\u4efb\u52a1\u4e2d\u7684\u5e94\u7528\u3002\u8fd9\u79cd\u65b9\u6cd5\u51f8\u663e\u4e86\u5fae\u8c03\u7684\u6709\u6548\u6027\u4ee5\u53ca SegFormer \u67b6\u6784\u5728\u5904\u7406\u81ea\u52a8\u9a7e\u9a76\u548c\u9053\u8def\u5b89\u5168\u4e2d\u7684\u590d\u6742\u8bed\u4e49\u5206\u5272\u4efb\u52a1\u65f6\u7684\u7a33\u5065\u6027\uff0c\u5373\u4f7f\u5728\u6f06\u9ed1\u7684\u591c\u665a\u4e5f\u662f\u5982\u6b64\u3002<\/p>\n\n\n\n<p>&nbsp;&nbsp;&nbsp;&nbsp;\u6700\u7ec8\u7684\u8f93\u51fa\u7ed3\u679c\uff08\u68c0\u6d4b\u5230\u7684\u8f66\u9053\u53e0\u52a0\u5728\u539f\u59cb\u89c6\u9891\u5e27\u4e0a\uff09\u4e0d\u4ec5\u53ef\u4f5c\u4e3a\u6982\u5ff5\u9a8c\u8bc1\uff0c\u8fd8\u5c55\u793a\u4e86\u8be5\u6280\u672f\u5728\u5b9e\u65f6\u5e94\u7528\u4e2d\u7684\u6f5c\u529b\u3002\u8f66\u9053\u68c0\u6d4b\u7684\u6d41\u7545\u6027\u548c\u51c6\u786e\u6027\uff08\u5728\u53e0\u52a0\u7684\u7eff\u8272\u8499\u7248\u4e2d\u53ef\u89c6\u5316\uff09\u8bc1\u660e\u4e86\u8be5\u6a21\u578b\u7684\u6709\u6548\u6027\u3002\u6700\u540e\uff0c\u53ef\u4ee5\u80af\u5b9a\u7684\u662f\uff0c\u5373\u4f7f\u6709\u591a\u79cd\u5c16\u7aef\u7684\u8f66\u9053\u68c0\u6d4b\u7b97\u6cd5\uff0c\u5bf9 SegFormer \u8fd9\u6837\u7684\u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u4e5f\u80fd\u83b7\u5f97\u51fa\u8272\u7684\u7ed3\u679c\uff01<\/p>\n\n\n\n<p><strong>\u53c2\u8003\u94fe\u63a5\uff1a<\/strong><\/p>\n\n\n\n<p>HuggingFace SegFormer\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code>https:&#47;&#47;huggingface.co\/docs\/transformers\/model_doc\/segformer<\/code><\/pre>\n\n\n\n<p>\u4f2f\u514b\u5229 Deep Drive \u6570\u636e\u96c6\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code>https:<em>\/\/deepdrive.berkeley.edu\/<\/em><\/code><\/pre>\n\n\n\n<p><strong>\u6e90\u7801\u4e0b\u8f7d\u94fe\u63a5\uff1a<\/strong><\/p>\n\n\n\n<p><\/p>\n\n\n\n<p>http:\/\/www.gitpp.com\/datasets\/learnopencv-cn\/tree\/master\/Fine-Tuning-SegFormer-For-Lane-Detection<\/p>\n","protected":false},"excerpt":{"rendered":"<p>\u539f\u6587\u94fe\u63a5\uff1a \u80cc\u666f\u4ecb\u7ecd &nbsp;&nbsp;&nbsp; SegFormer\uff1a\u5b9e\u4f8b\u5206\u5272\u5728\u81ea\u52a8\u9a7e\u9a76\u6c7d\u8f66\u6280\u672f\u7684\u5feb [&hellip;]<\/p>\n","protected":false},"author":9,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[7,33],"tags":[],"class_list":["post-785","post","type-post","status-publish","format-standard","hentry","category-7","category-33"],"blocksy_meta":"","_links":{"self":[{"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/posts\/785","targetHints":{"allow":["GET"]}}],"collection":[{"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/users\/9"}],"replies":[{"embeddable":true,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/comments?post=785"}],"version-history":[{"count":1,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/posts\/785\/revisions"}],"predecessor-version":[{"id":786,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/posts\/785\/revisions\/786"}],"wp:attachment":[{"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/media?parent=785"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/categories?post=785"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/ai.gitpp.com\/index.php\/wp-json\/wp\/v2\/tags?post=785"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}