首页 最新 热门 推荐

  • 首页
  • 最新
  • 热门
  • 推荐

C# OpenCvSharp DNN 部署yolov3目标检测

  • 25-02-19 03:00
  • 2411
  • 11568
blog.csdn.net

目录

效果

yolov3.cfg

项目

代码

下载


C# OpenCvSharp DNN 部署yolov3目标检测

效果

yolov3.cfg

  1. [net]
  2. # Testing
  3. #batch=1
  4. #subdivisions=1
  5. # Training
  6. batch=16
  7. subdivisions=1
  8. width=416
  9. height=416
  10. channels=3
  11. momentum=0.9
  12. decay=0.0005
  13. angle=0
  14. saturation = 1.5
  15. exposure = 1.5
  16. hue=.1
  17. learning_rate=0.001
  18. burn_in=1000
  19. max_batches = 500200
  20. policy=steps
  21. steps=400000,450000
  22. scales=.1,.1
  23. [convolutional]
  24. batch_normalize=1
  25. filters=32
  26. size=3
  27. stride=1
  28. pad=1
  29. activation=leaky
  30. # Downsample
  31. [convolutional]
  32. batch_normalize=1
  33. filters=64
  34. size=3
  35. stride=2
  36. pad=1
  37. activation=leaky
  38. [convolutional]
  39. batch_normalize=1
  40. filters=32
  41. size=1
  42. stride=1
  43. pad=1
  44. activation=leaky
  45. [convolutional]
  46. batch_normalize=1
  47. filters=64
  48. size=3
  49. stride=1
  50. pad=1
  51. activation=leaky
  52. [shortcut]
  53. from=-3
  54. activation=linear
  55. # Downsample
  56. [convolutional]
  57. batch_normalize=1
  58. filters=128
  59. size=3
  60. stride=2
  61. pad=1
  62. activation=leaky
  63. [convolutional]
  64. batch_normalize=1
  65. filters=64
  66. size=1
  67. stride=1
  68. pad=1
  69. activation=leaky
  70. [convolutional]
  71. batch_normalize=1
  72. filters=128
  73. size=3
  74. stride=1
  75. pad=1
  76. activation=leaky
  77. [shortcut]
  78. from=-3
  79. activation=linear
  80. [convolutional]
  81. batch_normalize=1
  82. filters=64
  83. size=1
  84. stride=1
  85. pad=1
  86. activation=leaky
  87. [convolutional]
  88. batch_normalize=1
  89. filters=128
  90. size=3
  91. stride=1
  92. pad=1
  93. activation=leaky
  94. [shortcut]
  95. from=-3
  96. activation=linear
  97. # Downsample
  98. [convolutional]
  99. batch_normalize=1
  100. filters=256
  101. size=3
  102. stride=2
  103. pad=1
  104. activation=leaky
  105. [convolutional]
  106. batch_normalize=1
  107. filters=128
  108. size=1
  109. stride=1
  110. pad=1
  111. activation=leaky
  112. [convolutional]
  113. batch_normalize=1
  114. filters=256
  115. size=3
  116. stride=1
  117. pad=1
  118. activation=leaky
  119. [shortcut]
  120. from=-3
  121. activation=linear
  122. [convolutional]
  123. batch_normalize=1
  124. filters=128
  125. size=1
  126. stride=1
  127. pad=1
  128. activation=leaky
  129. [convolutional]
  130. batch_normalize=1
  131. filters=256
  132. size=3
  133. stride=1
  134. pad=1
  135. activation=leaky
  136. [shortcut]
  137. from=-3
  138. activation=linear
  139. [convolutional]
  140. batch_normalize=1
  141. filters=128
  142. size=1
  143. stride=1
  144. pad=1
  145. activation=leaky
  146. [convolutional]
  147. batch_normalize=1
  148. filters=256
  149. size=3
  150. stride=1
  151. pad=1
  152. activation=leaky
  153. [shortcut]
  154. from=-3
  155. activation=linear
  156. [convolutional]
  157. batch_normalize=1
  158. filters=128
  159. size=1
  160. stride=1
  161. pad=1
  162. activation=leaky
  163. [convolutional]
  164. batch_normalize=1
  165. filters=256
  166. size=3
  167. stride=1
  168. pad=1
  169. activation=leaky
  170. [shortcut]
  171. from=-3
  172. activation=linear
  173. [convolutional]
  174. batch_normalize=1
  175. filters=128
  176. size=1
  177. stride=1
  178. pad=1
  179. activation=leaky
  180. [convolutional]
  181. batch_normalize=1
  182. filters=256
  183. size=3
  184. stride=1
  185. pad=1
  186. activation=leaky
  187. [shortcut]
  188. from=-3
  189. activation=linear
  190. [convolutional]
  191. batch_normalize=1
  192. filters=128
  193. size=1
  194. stride=1
  195. pad=1
  196. activation=leaky
  197. [convolutional]
  198. batch_normalize=1
  199. filters=256
  200. size=3
  201. stride=1
  202. pad=1
  203. activation=leaky
  204. [shortcut]
  205. from=-3
  206. activation=linear
  207. [convolutional]
  208. batch_normalize=1
  209. filters=128
  210. size=1
  211. stride=1
  212. pad=1
  213. activation=leaky
  214. [convolutional]
  215. batch_normalize=1
  216. filters=256
  217. size=3
  218. stride=1
  219. pad=1
  220. activation=leaky
  221. [shortcut]
  222. from=-3
  223. activation=linear
  224. [convolutional]
  225. batch_normalize=1
  226. filters=128
  227. size=1
  228. stride=1
  229. pad=1
  230. activation=leaky
  231. [convolutional]
  232. batch_normalize=1
  233. filters=256
  234. size=3
  235. stride=1
  236. pad=1
  237. activation=leaky
  238. [shortcut]
  239. from=-3
  240. activation=linear
  241. # Downsample
  242. [convolutional]
  243. batch_normalize=1
  244. filters=512
  245. size=3
  246. stride=2
  247. pad=1
  248. activation=leaky
  249. [convolutional]
  250. batch_normalize=1
  251. filters=256
  252. size=1
  253. stride=1
  254. pad=1
  255. activation=leaky
  256. [convolutional]
  257. batch_normalize=1
  258. filters=512
  259. size=3
  260. stride=1
  261. pad=1
  262. activation=leaky
  263. [shortcut]
  264. from=-3
  265. activation=linear
  266. [convolutional]
  267. batch_normalize=1
  268. filters=256
  269. size=1
  270. stride=1
  271. pad=1
  272. activation=leaky
  273. [convolutional]
  274. batch_normalize=1
  275. filters=512
  276. size=3
  277. stride=1
  278. pad=1
  279. activation=leaky
  280. [shortcut]
  281. from=-3
  282. activation=linear
  283. [convolutional]
  284. batch_normalize=1
  285. filters=256
  286. size=1
  287. stride=1
  288. pad=1
  289. activation=leaky
  290. [convolutional]
  291. batch_normalize=1
  292. filters=512
  293. size=3
  294. stride=1
  295. pad=1
  296. activation=leaky
  297. [shortcut]
  298. from=-3
  299. activation=linear
  300. [convolutional]
  301. batch_normalize=1
  302. filters=256
  303. size=1
  304. stride=1
  305. pad=1
  306. activation=leaky
  307. [convolutional]
  308. batch_normalize=1
  309. filters=512
  310. size=3
  311. stride=1
  312. pad=1
  313. activation=leaky
  314. [shortcut]
  315. from=-3
  316. activation=linear
  317. [convolutional]
  318. batch_normalize=1
  319. filters=256
  320. size=1
  321. stride=1
  322. pad=1
  323. activation=leaky
  324. [convolutional]
  325. batch_normalize=1
  326. filters=512
  327. size=3
  328. stride=1
  329. pad=1
  330. activation=leaky
  331. [shortcut]
  332. from=-3
  333. activation=linear
  334. [convolutional]
  335. batch_normalize=1
  336. filters=256
  337. size=1
  338. stride=1
  339. pad=1
  340. activation=leaky
  341. [convolutional]
  342. batch_normalize=1
  343. filters=512
  344. size=3
  345. stride=1
  346. pad=1
  347. activation=leaky
  348. [shortcut]
  349. from=-3
  350. activation=linear
  351. [convolutional]
  352. batch_normalize=1
  353. filters=256
  354. size=1
  355. stride=1
  356. pad=1
  357. activation=leaky
  358. [convolutional]
  359. batch_normalize=1
  360. filters=512
  361. size=3
  362. stride=1
  363. pad=1
  364. activation=leaky
  365. [shortcut]
  366. from=-3
  367. activation=linear
  368. [convolutional]
  369. batch_normalize=1
  370. filters=256
  371. size=1
  372. stride=1
  373. pad=1
  374. activation=leaky
  375. [convolutional]
  376. batch_normalize=1
  377. filters=512
  378. size=3
  379. stride=1
  380. pad=1
  381. activation=leaky
  382. [shortcut]
  383. from=-3
  384. activation=linear
  385. # Downsample
  386. [convolutional]
  387. batch_normalize=1
  388. filters=1024
  389. size=3
  390. stride=2
  391. pad=1
  392. activation=leaky
  393. [convolutional]
  394. batch_normalize=1
  395. filters=512
  396. size=1
  397. stride=1
  398. pad=1
  399. activation=leaky
  400. [convolutional]
  401. batch_normalize=1
  402. filters=1024
  403. size=3
  404. stride=1
  405. pad=1
  406. activation=leaky
  407. [shortcut]
  408. from=-3
  409. activation=linear
  410. [convolutional]
  411. batch_normalize=1
  412. filters=512
  413. size=1
  414. stride=1
  415. pad=1
  416. activation=leaky
  417. [convolutional]
  418. batch_normalize=1
  419. filters=1024
  420. size=3
  421. stride=1
  422. pad=1
  423. activation=leaky
  424. [shortcut]
  425. from=-3
  426. activation=linear
  427. [convolutional]
  428. batch_normalize=1
  429. filters=512
  430. size=1
  431. stride=1
  432. pad=1
  433. activation=leaky
  434. [convolutional]
  435. batch_normalize=1
  436. filters=1024
  437. size=3
  438. stride=1
  439. pad=1
  440. activation=leaky
  441. [shortcut]
  442. from=-3
  443. activation=linear
  444. [convolutional]
  445. batch_normalize=1
  446. filters=512
  447. size=1
  448. stride=1
  449. pad=1
  450. activation=leaky
  451. [convolutional]
  452. batch_normalize=1
  453. filters=1024
  454. size=3
  455. stride=1
  456. pad=1
  457. activation=leaky
  458. [shortcut]
  459. from=-3
  460. activation=linear
  461. ######################
  462. [convolutional]
  463. batch_normalize=1
  464. filters=512
  465. size=1
  466. stride=1
  467. pad=1
  468. activation=leaky
  469. [convolutional]
  470. batch_normalize=1
  471. size=3
  472. stride=1
  473. pad=1
  474. filters=1024
  475. activation=leaky
  476. [convolutional]
  477. batch_normalize=1
  478. filters=512
  479. size=1
  480. stride=1
  481. pad=1
  482. activation=leaky
  483. [convolutional]
  484. batch_normalize=1
  485. size=3
  486. stride=1
  487. pad=1
  488. filters=1024
  489. activation=leaky
  490. [convolutional]
  491. batch_normalize=1
  492. filters=512
  493. size=1
  494. stride=1
  495. pad=1
  496. activation=leaky
  497. [convolutional]
  498. batch_normalize=1
  499. size=3
  500. stride=1
  501. pad=1
  502. filters=1024
  503. activation=leaky
  504. [convolutional]
  505. size=1
  506. stride=1
  507. pad=1
  508. filters=255
  509. activation=linear
  510. [yolo]
  511. mask = 6,7,8
  512. anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
  513. classes=80
  514. num=9
  515. jitter=.3
  516. ignore_thresh = .7
  517. truth_thresh = 1
  518. random=1
  519. [route]
  520. layers = -4
  521. [convolutional]
  522. batch_normalize=1
  523. filters=256
  524. size=1
  525. stride=1
  526. pad=1
  527. activation=leaky
  528. [upsample]
  529. stride=2
  530. [route]
  531. layers = -1, 61
  532. [convolutional]
  533. batch_normalize=1
  534. filters=256
  535. size=1
  536. stride=1
  537. pad=1
  538. activation=leaky
  539. [convolutional]
  540. batch_normalize=1
  541. size=3
  542. stride=1
  543. pad=1
  544. filters=512
  545. activation=leaky
  546. [convolutional]
  547. batch_normalize=1
  548. filters=256
  549. size=1
  550. stride=1
  551. pad=1
  552. activation=leaky
  553. [convolutional]
  554. batch_normalize=1
  555. size=3
  556. stride=1
  557. pad=1
  558. filters=512
  559. activation=leaky
  560. [convolutional]
  561. batch_normalize=1
  562. filters=256
  563. size=1
  564. stride=1
  565. pad=1
  566. activation=leaky
  567. [convolutional]
  568. batch_normalize=1
  569. size=3
  570. stride=1
  571. pad=1
  572. filters=512
  573. activation=leaky
  574. [convolutional]
  575. size=1
  576. stride=1
  577. pad=1
  578. filters=255
  579. activation=linear
  580. [yolo]
  581. mask = 3,4,5
  582. anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
  583. classes=80
  584. num=9
  585. jitter=.3
  586. ignore_thresh = .7
  587. truth_thresh = 1
  588. random=1
  589. [route]
  590. layers = -4
  591. [convolutional]
  592. batch_normalize=1
  593. filters=128
  594. size=1
  595. stride=1
  596. pad=1
  597. activation=leaky
  598. [upsample]
  599. stride=2
  600. [route]
  601. layers = -1, 36
  602. [convolutional]
  603. batch_normalize=1
  604. filters=128
  605. size=1
  606. stride=1
  607. pad=1
  608. activation=leaky
  609. [convolutional]
  610. batch_normalize=1
  611. size=3
  612. stride=1
  613. pad=1
  614. filters=256
  615. activation=leaky
  616. [convolutional]
  617. batch_normalize=1
  618. filters=128
  619. size=1
  620. stride=1
  621. pad=1
  622. activation=leaky
  623. [convolutional]
  624. batch_normalize=1
  625. size=3
  626. stride=1
  627. pad=1
  628. filters=256
  629. activation=leaky
  630. [convolutional]
  631. batch_normalize=1
  632. filters=128
  633. size=1
  634. stride=1
  635. pad=1
  636. activation=leaky
  637. [convolutional]
  638. batch_normalize=1
  639. size=3
  640. stride=1
  641. pad=1
  642. filters=256
  643. activation=leaky
  644. [convolutional]
  645. size=1
  646. stride=1
  647. pad=1
  648. filters=255
  649. activation=linear
  650. [yolo]
  651. mask = 0,1,2
  652. anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
  653. classes=80
  654. num=9
  655. jitter=.3
  656. ignore_thresh = .7
  657. truth_thresh = 1
  658. random=1

项目

代码

using OpenCvSharp;
using OpenCvSharp.Dnn;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Windows.Forms;

namespace OpenCvSharp_DNN_Demo
{
    public partial class frmMain : Form
    {
        public frmMain()
        {
            InitializeComponent();
        }

        string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
        string image_path = "";

        DateTime dt1 = DateTime.Now;
        DateTime dt2 = DateTime.Now;

        float confThreshold;
        float nmsThreshold;

        int inpHeight;
        int inpWidth;

        List class_names;
        int num_class;

        Net opencv_net;
        Mat BN_image;

        Mat image;
        Mat result_image;

        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog ofd = new OpenFileDialog();
            ofd.Filter = fileFilter;
            if (ofd.ShowDialog() != DialogResult.OK) return;

            pictureBox1.Image = null;
            pictureBox2.Image = null;
            textBox1.Text = "";

            image_path = ofd.FileName;
            pictureBox1.Image = new Bitmap(image_path);
            image = new Mat(image_path);
        }

        private void Form1_Load(object sender, EventArgs e)
        {
            confThreshold = 0.5f;
            nmsThreshold = 0.4f;

            inpHeight = 416;
            inpWidth = 416;

            opencv_net = CvDnn.ReadNetFromDarknet("model/yolov3.cfg", "model/yolov3.weights");

            class_names = new List();
            StreamReader sr = new StreamReader("model/coco.names");
            string line;
            while ((line = sr.ReadLine()) != null)
            {
                class_names.Add(line);
            }
            num_class = class_names.Count();

            image_path = "test_img/dog.jpg";
            pictureBox1.Image = new Bitmap(image_path);

        }

        private unsafe void button2_Click(object sender, EventArgs e)
        {
            if (image_path == "")
            {
                return;
            }
            textBox1.Text = "检测中,请稍等……";
            pictureBox2.Image = null;
            Application.DoEvents();

            image = new Mat(image_path);

            BN_image = CvDnn.BlobFromImage(image, 1 / 255.0, new OpenCvSharp.Size(inpWidth, inpHeight), new Scalar(0, 0, 0), true, false);

            //配置图片输入数据
            opencv_net.SetInput(BN_image);

            //模型推理,读取推理结果
            var outNames = opencv_net.GetUnconnectedOutLayersNames();
            var outs = outNames.Select(_ => new Mat()).ToArray();

            dt1 = DateTime.Now;

            opencv_net.Forward(outs, outNames);

            dt2 = DateTime.Now;

            List classIds = new List();
            List confidences = new List();
            List boxes = new List();

            for (int i = 0; i < outs.Length; ++i)
            {
                float* data = (float*)outs[i].Data;
                for (int j = 0; j < outs[i].Rows; ++j, data += outs[i].Cols)
                {
                    Mat scores = outs[i].Row(j).ColRange(5, outs[i].Cols);

                    double minVal, max_class_socre;
                    OpenCvSharp.Point minLoc, classIdPoint;
                    // Get the value and location of the maximum score
                    Cv2.MinMaxLoc(scores, out minVal, out max_class_socre, out minLoc, out classIdPoint);

                    if (max_class_socre > confThreshold)
                    {
                        int centerX = (int)(data[0] * image.Cols);
                        int centerY = (int)(data[1] * image.Rows);
                        int width = (int)(data[2] * image.Cols);
                        int height = (int)(data[3] * image.Rows);
                        int left = centerX - width / 2;
                        int top = centerY - height / 2;

                        classIds.Add(classIdPoint.X);
                        confidences.Add((float)max_class_socre);
                        boxes.Add(new Rect(left, top, width, height));
                    }
                }
            }

            int[] indices;
            CvDnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, out indices);

            result_image = image.Clone();

            for (int i = 0; i < indices.Length; ++i)
            {
                int idx = indices[i];
                Rect box = boxes[idx];
                Cv2.Rectangle(result_image, new OpenCvSharp.Point(box.X, box.Y), new OpenCvSharp.Point(box.X + box.Width, box.Y + box.Height), new Scalar(0, 0, 255), 2);
                string label = class_names[classIds[idx]] + ":" + confidences[idx].ToString("0.00");
                Cv2.PutText(result_image, label, new OpenCvSharp.Point(box.X, box.Y - 5), HersheyFonts.HersheySimplex, 1, new Scalar(0, 0, 255), 2);
            }

            pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
            textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";

        }

        private void pictureBox2_DoubleClick(object sender, EventArgs e)
        {
            Common.ShowNormalImg(pictureBox2.Image);
        }

        private void pictureBox1_DoubleClick(object sender, EventArgs e)
        {
            Common.ShowNormalImg(pictureBox1.Image);
        }
    }
}

  1. using OpenCvSharp;
  2. using OpenCvSharp.Dnn;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Drawing;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Windows.Forms;
  9. namespace OpenCvSharp_DNN_Demo
  10. {
  11. public partial class frmMain : Form
  12. {
  13. public frmMain()
  14. {
  15. InitializeComponent();
  16. }
  17. string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
  18. string image_path = "";
  19. DateTime dt1 = DateTime.Now;
  20. DateTime dt2 = DateTime.Now;
  21. float confThreshold;
  22. float nmsThreshold;
  23. int inpHeight;
  24. int inpWidth;
  25. List<string> class_names;
  26. int num_class;
  27. Net opencv_net;
  28. Mat BN_image;
  29. Mat image;
  30. Mat result_image;
  31. private void button1_Click(object sender, EventArgs e)
  32. {
  33. OpenFileDialog ofd = new OpenFileDialog();
  34. ofd.Filter = fileFilter;
  35. if (ofd.ShowDialog() != DialogResult.OK) return;
  36. pictureBox1.Image = null;
  37. pictureBox2.Image = null;
  38. textBox1.Text = "";
  39. image_path = ofd.FileName;
  40. pictureBox1.Image = new Bitmap(image_path);
  41. image = new Mat(image_path);
  42. }
  43. private void Form1_Load(object sender, EventArgs e)
  44. {
  45. confThreshold = 0.5f;
  46. nmsThreshold = 0.4f;
  47. inpHeight = 416;
  48. inpWidth = 416;
  49. opencv_net = CvDnn.ReadNetFromDarknet("model/yolov3.cfg", "model/yolov3.weights");
  50. class_names = new List<string>();
  51. StreamReader sr = new StreamReader("model/coco.names");
  52. string line;
  53. while ((line = sr.ReadLine()) != null)
  54. {
  55. class_names.Add(line);
  56. }
  57. num_class = class_names.Count();
  58. image_path = "test_img/dog.jpg";
  59. pictureBox1.Image = new Bitmap(image_path);
  60. }
  61. private unsafe void button2_Click(object sender, EventArgs e)
  62. {
  63. if (image_path == "")
  64. {
  65. return;
  66. }
  67. textBox1.Text = "检测中,请稍等……";
  68. pictureBox2.Image = null;
  69. Application.DoEvents();
  70. image = new Mat(image_path);
  71. BN_image = CvDnn.BlobFromImage(image, 1 / 255.0, new OpenCvSharp.Size(inpWidth, inpHeight), new Scalar(0, 0, 0), true, false);
  72. //配置图片输入数据
  73. opencv_net.SetInput(BN_image);
  74. //模型推理,读取推理结果
  75. var outNames = opencv_net.GetUnconnectedOutLayersNames();
  76. var outs = outNames.Select(_ => new Mat()).ToArray();
  77. dt1 = DateTime.Now;
  78. opencv_net.Forward(outs, outNames);
  79. dt2 = DateTime.Now;
  80. List<int> classIds = new List<int>();
  81. List<float> confidences = new List<float>();
  82. List boxes = new List();
  83. for (int i = 0; i < outs.Length; ++i)
  84. {
  85. float* data = (float*)outs[i].Data;
  86. for (int j = 0; j < outs[i].Rows; ++j, data += outs[i].Cols)
  87. {
  88. Mat scores = outs[i].Row(j).ColRange(5, outs[i].Cols);
  89. double minVal, max_class_socre;
  90. OpenCvSharp.Point minLoc, classIdPoint;
  91. // Get the value and location of the maximum score
  92. Cv2.MinMaxLoc(scores, out minVal, out max_class_socre, out minLoc, out classIdPoint);
  93. if (max_class_socre > confThreshold)
  94. {
  95. int centerX = (int)(data[0] * image.Cols);
  96. int centerY = (int)(data[1] * image.Rows);
  97. int width = (int)(data[2] * image.Cols);
  98. int height = (int)(data[3] * image.Rows);
  99. int left = centerX - width / 2;
  100. int top = centerY - height / 2;
  101. classIds.Add(classIdPoint.X);
  102. confidences.Add((float)max_class_socre);
  103. boxes.Add(new Rect(left, top, width, height));
  104. }
  105. }
  106. }
  107. int[] indices;
  108. CvDnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, out indices);
  109. result_image = image.Clone();
  110. for (int i = 0; i < indices.Length; ++i)
  111. {
  112. int idx = indices[i];
  113. Rect box = boxes[idx];
  114. Cv2.Rectangle(result_image, new OpenCvSharp.Point(box.X, box.Y), new OpenCvSharp.Point(box.X + box.Width, box.Y + box.Height), new Scalar(0, 0, 255), 2);
  115. string label = class_names[classIds[idx]] + ":" + confidences[idx].ToString("0.00");
  116. Cv2.PutText(result_image, label, new OpenCvSharp.Point(box.X, box.Y - 5), HersheyFonts.HersheySimplex, 1, new Scalar(0, 0, 255), 2);
  117. }
  118. pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
  119. textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
  120. }
  121. private void pictureBox2_DoubleClick(object sender, EventArgs e)
  122. {
  123. Common.ShowNormalImg(pictureBox2.Image);
  124. }
  125. private void pictureBox1_DoubleClick(object sender, EventArgs e)
  126. {
  127. Common.ShowNormalImg(pictureBox1.Image);
  128. }
  129. }
  130. }

下载

源码下载

天天代码码天天
微信公众号
.NET 人工智能实践
注:本文转载自blog.csdn.net的天天代码码天天的文章"https://lw112190.blog.csdn.net/article/details/135597451"。版权归原作者所有,此博客不拥有其著作权,亦不承担相应法律责任。如有侵权,请联系我们删除。
复制链接
复制链接
相关推荐
发表评论
登录后才能发表评论和回复 注册

/ 登录

评论记录:

未查询到任何数据!
回复评论:

分类栏目

后端 (14832) 前端 (14280) 移动开发 (3760) 编程语言 (3851) Java (3904) Python (3298) 人工智能 (10119) AIGC (2810) 大数据 (3499) 数据库 (3945) 数据结构与算法 (3757) 音视频 (2669) 云原生 (3145) 云平台 (2965) 前沿技术 (2993) 开源 (2160) 小程序 (2860) 运维 (2533) 服务器 (2698) 操作系统 (2325) 硬件开发 (2492) 嵌入式 (2955) 微软技术 (2769) 软件工程 (2056) 测试 (2865) 网络空间安全 (2948) 网络与通信 (2797) 用户体验设计 (2592) 学习和成长 (2593) 搜索 (2744) 开发工具 (7108) 游戏 (2829) HarmonyOS (2935) 区块链 (2782) 数学 (3112) 3C硬件 (2759) 资讯 (2909) Android (4709) iOS (1850) 代码人生 (3043) 阅读 (2841)

热门文章

101
推荐
关于我们 隐私政策 免责声明 联系我们
Copyright © 2020-2025 蚁人论坛 (iYenn.com) All Rights Reserved.
Scroll to Top