首页 最新 热门 推荐

  • 首页
  • 最新
  • 热门
  • 推荐

基于Azure Kinect SDK获取物体rgb图、深度图、红外IR图和点云数据并保存到本地

  • 23-09-26 04:21
  • 2083
  • 11367
blog.csdn.net

Azure Kinect

最近想做一个物体的三维重建,就买了微软最新的深度相机,功能相比前两代虽然有了很大提升,但是可参考的资料确很少,最简单的图像获取,点云图像保存,都费了我一番功夫最后再参考了现有的资料以及结合相机的源码,完成了图像保存以及点云获取保存

直接上全部的代码

我用的是VS2017专业版,OpenCV4.2,Microsoft.Azure.Kinect.Sensor.1.4.1。

 //C++
#include 
#include 
#include 
#include 
 //OpenCV
#include 
#include 
#include 
 //Kinect DK
#include 
//#include 
#include 
#include 

using namespace cv;
using namespace std;

 //宏
 //方便控制是否 std::cout 信息
#define DEBUG_std_cout 0


static void create_xy_table(const k4a_calibration_t *calibration, k4a_image_t xy_table)
{
	k4a_float2_t *table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);

	int width = calibration->depth_camera_calibration.resolution_width;
	int height = calibration->depth_camera_calibration.resolution_height;

	k4a_float2_t p;
	k4a_float3_t ray;
	int valid;

	for (int y = 0, idx = 0; y < height; y++)
	{
		p.xy.y = (float)y;
		for (int x = 0; x < width; x++, idx++)
		{
			p.xy.x = (float)x;

			k4a_calibration_2d_to_3d(
				calibration, &p, 1.f, K4A_CALIBRATION_TYPE_DEPTH, K4A_CALIBRATION_TYPE_DEPTH, &ray, &valid);

			if (valid)
			{
				table_data[idx].xy.x = ray.xyz.x;
				table_data[idx].xy.y = ray.xyz.y;
			}
			else
			{
				table_data[idx].xy.x = nanf("");
				table_data[idx].xy.y = nanf("");
			}
		}
	}
}

static void generate_point_cloud(const k4a::image depth_image,	const k4a_image_t xy_table,	k4a_image_t point_cloud,	int *point_count)
{
	int width = depth_image.get_width_pixels();
	int height = depth_image.get_height_pixels();
	//int height = k4a_image_get_height_pixels(depth_image);

	uint16_t *depth_data = (uint16_t *)(void *)depth_image.get_buffer();
	k4a_float2_t *xy_table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);
	k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);

	*point_count = 0;
	for (int i = 0; i < width * height; i++)
	{
		if (depth_data[i] != 0 && !isnan(xy_table_data[i].xy.x) && !isnan(xy_table_data[i].xy.y))
		{
			point_cloud_data[i].xyz.x = xy_table_data[i].xy.x * (float)depth_data[i];
			point_cloud_data[i].xyz.y = xy_table_data[i].xy.y * (float)depth_data[i];
			point_cloud_data[i].xyz.z = (float)depth_data[i];
			(*point_count)++;
		}
		else
		{
			point_cloud_data[i].xyz.x = nanf("");
			point_cloud_data[i].xyz.y = nanf("");
			point_cloud_data[i].xyz.z = nanf("");
		}
	}
}

static void write_point_cloud(const char *file_name, const k4a_image_t point_cloud, int point_count)
{
	int width = k4a_image_get_width_pixels(point_cloud);
	int height = k4a_image_get_height_pixels(point_cloud);

	k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);

	 //save to the ply file
	std::ofstream ofs(file_name); // text mode first
	ofs << "ply" << std::endl;
	ofs << "format ascii 1.0" << std::endl;
	ofs << "element vertex"
		<< " " << point_count << std::endl;
	ofs << "property float x" << std::endl;
	ofs << "property float y" << std::endl;
	ofs << "property float z" << std::endl;
	ofs << "end_header" << std::endl;
	ofs.close();

	std::stringstream ss;
	for (int i = 0; i < width * height; i++)
	{
		if (isnan(point_cloud_data[i].xyz.x) || isnan(point_cloud_data[i].xyz.y) || isnan(point_cloud_data[i].xyz.z))
		{
			continue;
		}

		ss << (float)point_cloud_data[i].xyz.x << " " << (float)point_cloud_data[i].xyz.y << " "
			<< (float)point_cloud_data[i].xyz.z << std::endl;
	}

	std::ofstream ofs_text(file_name, std::ios::out | std::ios::app);
	ofs_text.write(ss.str().c_str(), (std::streamsize)ss.str().length());
}



int main(int argc, char *argv[]) {
	/*

		找到并打开 Azure Kinect 设备
	*/
	//发现已连接的设备数

	const uint32_t device_count = k4a::device::get_installed_count();
	if (0 == device_count) {
		cout << "Error: no K4A devices found. " << endl;
		return -1;
	}
	else {
		std::cout << "Found " << device_count << " connected devices. " << std::endl;
		if (1 != device_count)// 超过1个设备,也输出错误信息。
		{
			std::cout << "Error: more than one K4A devices found. " << std::endl;
			return -1;
		}
		else// 该示例代码仅限对1个设备操作
		{
			std::cout << "Done: found 1 K4A device. " << std::endl;
		}
	}
	//打开(默认)设备
	k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
	std::cout << "Done: open device. " << std::endl;

	/*
		检索并保存 Azure Kinect 图像数据
	*/
	//配置并启动设备
	k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
	config.camera_fps = K4A_FRAMES_PER_SECOND_30;
	config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
	config.color_resolution = K4A_COLOR_RESOLUTION_1080P;
	config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
	//config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
	config.synchronized_images_only = true; // ensures that depth and color images are both available in the capture
	device.start_cameras(&config);
	std::cout << "Done: start camera." << std::endl;

	//写入txt文件流
	ofstream rgb_out;
	ofstream d_out;
	ofstream ir_out;

	rgb_out.open("./rgb.txt");
	d_out.open("./depth.txt");
	ir_out.open("./ir.txt");

	rgb_out << "#  color images" << endl;
	rgb_out << "#  file: rgbd_dataset" << endl;
	rgb_out << "#  timestamp" << "    " << "filename" << endl;

	d_out << "#  depth images" << endl;
	d_out << "#  file: rgbd_dataset" << endl;
	d_out << "#  timestamp" << "    " << "filename" << endl;

	ir_out << "#  ir images" << endl;
	ir_out << "#  file: rgbd_dataset" << endl;
	ir_out << "#  timestamp" << "    " << "filename" << endl;

	rgb_out << flush;
	d_out << flush;
	//稳定化
	k4a::capture capture;
	int iAuto = 0;//用来稳定,类似自动曝光
	int iAutoError = 0;// 统计自动曝光的失败次数
	while (true) {
		if (device.get_capture(&capture)) {
			std::cout << iAuto << ". Capture several frames to give auto-exposure" << std::endl;

			//跳过前 n 个(成功的数据采集)循环,用来稳定
				if (iAuto != 30) {
					iAuto++;
					continue;
				}
				else {
					std::cout << "Done: auto-exposure" << std::endl;
					break;// 跳出该循环,完成相机的稳定过程
				}

		}
		else {
			std::cout << iAutoError << ". K4A_WAIT_RESULT_TIMEOUT." << std::endl;
			if (iAutoError != 30) {
				iAutoError++;
				continue;
			}
			else {
				std::cout << "Error: failed to give auto-exposure. " << std::endl;
				return -1;
			}
		}
	}
	std::cout << "-----------------------------------" << std::endl;
	std::cout << "----- Have Started Kinect DK. -----" << std::endl;
	std::cout << "-----------------------------------" << std::endl;
	//从设备获取捕获
	k4a::image rgbImage;
	k4a::image depthImage;
	k4a::image irImage;
	k4a::image transformed_depthImage;

	cv::Mat cv_rgbImage_with_alpha;
	cv::Mat cv_rgbImage_no_alpha;
	cv::Mat cv_depth;
	cv::Mat cv_depth_8U;
	cv::Mat cv_irImage;
	cv::Mat cv_irImage_8U;

	while (true)
		for (size_t i = 0; i < 100; i++)
		{
			if (device.get_capture(&capture, std::chrono::milliseconds(0)))
				if (device.get_capture(&capture)) {
					//rgb
						//* Each pixel of BGRA32 data is four bytes.The first three bytes represent Blue, Green,
						//*and Red data.The fourth byte is the alpha channel and is unused in the Azure Kinect APIs.
						rgbImage = capture.get_color_image();
#if DEBUG_std_cout == 1
					std::cout << "[rgb] " << "
"
						<< "format: " << rgbImage.get_format() << "
"
						<< "device_timestamp: " << rgbImage.get_device_timestamp().count() << "
"
						<< "system_timestamp: " << rgbImage.get_system_timestamp().count() << "
"
						<< "height*width: " << rgbImage.get_height_pixels() << ", " << rgbImage.get_width_pixels()
						<< std::endl;
#endif

					//depth
						//* Each pixel of DEPTH16 data is two bytes of little endian unsigned depth data.The unit of the data is in
						//* millimeters from the origin of the camera.
						depthImage = capture.get_depth_image();
#if DEBUG_std_cout == 1
					std::cout << "[depth] " << "
"
						<< "format: " << depthImage.get_format() << "
"
						<< "device_timestamp: " << depthImage.get_device_timestamp().count() << "
"
						<< "system_timestamp: " << depthImage.get_system_timestamp().count() << "
"
						<< "height*width: " << depthImage.get_height_pixels() << ", " << depthImage.get_width_pixels()
						<< std::endl;
#endif

					//ir
						//* Each pixel of IR16 data is two bytes of little endian unsigned depth data.The value of the data represents
						//* brightness.
						irImage = capture.get_ir_image();
#if DEBUG_std_cout == 1
					std::cout << "[ir] " << "
"
						<< "format: " << irImage.get_format() << "
"
						<< "device_timestamp: " << irImage.get_device_timestamp().count() << "
"
						<< "system_timestamp: " << irImage.get_system_timestamp().count() << "
"
						<< "height*width: " << irImage.get_height_pixels() << ", " << irImage.get_width_pixels()
						<< std::endl;
#endif

					//深度图和RGB图配准
						//Get the camera calibration for the entire K4A device, which is used for all transformation functions.
					k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);

					k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);

					transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage);

					cv_rgbImage_with_alpha = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4,
						(void *)rgbImage.get_buffer());
					cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR);

					cv_depth = cv::Mat(transformed_depthImage.get_height_pixels(), transformed_depthImage.get_width_pixels(), CV_16U,
						(void *)transformed_depthImage.get_buffer(), static_cast<size_t>(transformed_depthImage.get_stride_bytes()));

					normalize(cv_depth, cv_depth_8U, 0, 256 * 256, NORM_MINMAX);
					cv_depth_8U.convertTo(cv_depth, CV_8U, 1);

					cv_irImage = cv::Mat(irImage.get_height_pixels(), irImage.get_width_pixels(), CV_16U,
						(void *)irImage.get_buffer(), static_cast<size_t>(irImage.get_stride_bytes()));
					normalize(cv_irImage, cv_irImage_8U, 0, 256 * 256, NORM_MINMAX);
					cv_irImage.convertTo(cv_irImage_8U, CV_8U, 1);

				
					//k4a::image xyzImage;
					//cv::Mat cv_xyzImage;// 16位有符号
					//cv::Mat cv_xyzImage_32F;// 32位float
					点云
					//	/*
					//		Each pixel of the xyz_image consists of three int16_t values, totaling 6 bytes. The three int16_t values are the X, Y, and Z values of the point.
					//		我们将为每个像素存储三个带符号的 16 位坐标值(以毫米为单位)。 因此,XYZ 图像步幅设置为 width * 3 * sizeof(int16_t)。
					//		数据顺序为像素交错式,即,X 坐标 – 像素 0,Y 坐标 – 像素 0,Z 坐标 – 像素 0,X 坐标 – 像素 1,依此类推。
					//		如果无法将某个像素转换为 3D,该函数将为该像素分配值 [0,0,0]。
					//	*/

					//xyzImage = k4aTransformation.depth_image_to_point_cloud(depthImage, K4A_CALIBRATION_TYPE_DEPTH);
					//cv_xyzImage = cv::Mat(xyzImage.get_height_pixels(), xyzImage.get_width_pixels(), CV_16SC3, (void *)xyzImage.get_buffer(), static_cast(xyzImage.get_stride_bytes()));
					//cv_xyzImage.convertTo(cv_xyzImage_32F, CV_32FC3, 1.0 / 1000, 0);// 转为float,同时将单位从 mm 转换为 m.
					//cv::imshow("xyzimage", cv_xyzImage_32F);
				
			
					//show image
					cv::imshow("color", cv_rgbImage_no_alpha);
					cv::imshow("depth", cv_depth_8U);
					cv::imshow("ir", cv_irImage_8U);

					//save image
					double time_rgb = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
							rgbImage.get_device_timestamp()).count());

					std::string filename_rgb = std::to_string(time_rgb / 1000000) + ".png";

					double time_d = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
						depthImage.get_device_timestamp()).count());

					std::string filename_d = std::to_string(time_d / 1000000) + ".png";

					double time_ir = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
						irImage.get_device_timestamp()).count());
					std::string filename_ir = std::to_string(time_ir / 1000000) + ".png";
					imwrite("./rgb/" + filename_rgb, cv_rgbImage_no_alpha);
					imwrite("./depth/" + filename_d, cv_depth_8U);
					imwrite("./ir/" + filename_ir, cv_irImage_8U);

					
					//const int32_t TIMEOUT_IN_MS = 1000;
					//std::string file_name;
					//uint32_t device_count = 0;

					//k4a_device_t device1 = NULL;
					//k4a_device_configuration_t config1 = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
					//k4a_capture_t capture1 = NULL;
					//k4a_image_t depth_image = NULL;
					//k4a_calibration_t calibration1;

					k4a_image_t xy_table = NULL;
					k4a_image_t point_cloud = NULL;
					int point_count = 0;

					double time_point = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
						rgbImage.get_device_timestamp()).count());
					std::string filename_point = std::to_string(time_point / 1000000) + ".ply";
					//file_name = "./pointcloud.ply";

		/*			device_count1 = k4a_device_get_installed_count();

					if (device_count1 == 0)
					{
						printf("No K4A devices found
");
						return 0;
					}*/

					/*if (K4A_RESULT_SUCCEEDED != k4a_device_open(K4A_DEVICE_DEFAULT, &device))
					{
						printf("Failed to open device
");
					}*/


				/*	config1.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
					config1.camera_fps = K4A_FRAMES_PER_SECOND_30;*/
					
				/*	k4a_device_get_calibration(device1, config1.depth_mode, config1.color_resolution, &calibration1);*/

					k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
						k4aCalibration.depth_camera_calibration.resolution_width,
						k4aCalibration.depth_camera_calibration.resolution_height,
						k4aCalibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float2_t),
						&xy_table);

					create_xy_table(&k4aCalibration, xy_table);

					k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
						k4aCalibration.depth_camera_calibration.resolution_width,
						k4aCalibration.depth_camera_calibration.resolution_height,
						k4aCalibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float3_t),
						&point_cloud);
				/*	k4a_device_start_cameras(device, &config);
					k4a_device_get_capture(device, &capture, TIMEOUT_IN_MS);*/

					//depth_image = k4a_capture_get_depth_image(capture1);
					if (depthImage == 0)
					{
						printf("Failed to get depth image from capture
");
					}

					generate_point_cloud(depthImage, xy_table, point_cloud, &point_count);
					
					write_point_cloud(filename_point.c_str(), point_cloud, point_count);

				/*	k4a_image_release(depthImage);
					k4a_capture_release(capture);*/
					k4a_image_release(xy_table);
					k4a_image_release(point_cloud);
					//returnCode = 0;
					//k4a_device_close(device1);


					std::cout << "Acquiring!" << endl;

					//写入depth.txt, rgb.txt文件
					rgb_out << std::to_string(time_rgb / 1000000) << "    " << "rgb/" << filename_rgb << endl;
					d_out << std::to_string(time_d / 1000000) << "    " << "depth/" << filename_d << endl;
					ir_out << std::to_string(time_ir / 1000000) << "    " << "ir/" << filename_ir << endl;

					rgb_out << flush;
					d_out << flush;
					ir_out << flush;

					k4aTransformation.destroy();

					cv_rgbImage_with_alpha.release();
					cv_rgbImage_no_alpha.release();
					cv_depth.release();
					cv_depth_8U.release();
					cv_irImage.release();
					cv_irImage_8U.release();
					capture.reset();

					if (cv::waitKey() == 'q')
					{//按键采集,用户按下'q',跳出循环,结束采集
						std::cout << "----------------------------------" << std::endl;
						std::cout << "------------- closed -------------" << std::endl;
						std::cout << "----------------------------------" << std::endl;
						break;
					}
				}
				else {
					std::cout << "false: K4A_WAIT_RESULT_TIMEOUT." << std::endl;
				}
		}
	cv::destroyAllWindows();
	rgb_out << flush;
	d_out << flush;
	ir_out << flush;
	rgb_out.close();
	d_out.close();
	ir_out.close();


	// 释放,关闭设备
	rgbImage.reset();
	depthImage.reset();
	irImage.reset();
	capture.reset();
	device.close();
	

	return 1;
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408
  • 409
  • 410
  • 411
  • 412
  • 413
  • 414
  • 415
  • 416
  • 417
  • 418
  • 419
  • 420
  • 421
  • 422
  • 423
  • 424
  • 425
  • 426
  • 427
  • 428
  • 429
  • 430
  • 431
  • 432
  • 433
  • 434
  • 435
  • 436
  • 437
  • 438
  • 439
  • 440
  • 441
  • 442
  • 443
  • 444
  • 445
  • 446
  • 447
  • 448
  • 449
  • 450
  • 451
  • 452
  • 453
  • 454
  • 455
  • 456
  • 457
  • 458
  • 459
  • 460
  • 461
  • 462
  • 463
  • 464
  • 465
  • 466
  • 467
  • 468
  • 469

相关包的安装

本文用的OpenCV4.2以及深度相机的SDK包是直接通过NuGet程序包直接安装的,下面是每步的安装截图
右击引用,选择管理NuGet程序包
点击安装,等待安装完毕即可
同样,OpenCV也是相同的方法,但是当我安装OpenCV时,用低版本时总是报错,当用到4.2时才正常,我看网上的说是OpenCV低版本不支持VS2017,我也不知是不是这个原因,反正用到高版本时就行了,用NuGet直接安装OpenCV还有个好处是省了各种配置,只要装上就可用

程序运行结果

放几张程序运行结果图

RGB图
红外IR图
深度图的显示是根据这篇博客,采用归一化的方法进行显示保存,最后保存的图像依然是16位,后面尝试微软演示(5,6,5)的显示方式
深度图
点云可视化图
点云可视化是利用保存的.ply文件进行可视化的,C++对我一个外行太不友好了
matlab显示代码如下

ptCloud = pcread('F:Depth_image_C++open_and_saveopen_and_save14.551055.ply'); % read from a PLY file
figure
pcshow(ptCloud);
title('Original Data');
  • 1
  • 2
  • 3
  • 4

文件保存

文件保存需要分别新建几个文件夹由于保存文件,如图
在这里插入图片描述

最后

这算是我自己学习过程中的一个记录,如果能够对你们有帮助那是更好了。

https://blog.csdn.net/qq_40936780/article/details/102634734?utm_medium=distribute.pc_relevant.none-task-blog-title-3&spm=1001.2101.3001.4242
https://blog.csdn.net/Zlp19970106/article/details/107120743/?utm_medium=distribute.pc_relevant.none-task-blog-title-2&spm=1001.2101.3001.4242
还有篇参考博客找不到了,等找到了再添加吧

文章知识点与官方知识档案匹配,可进一步学习相关知识
OpenCV技能树首页概览20736 人正在系统学习中
注:本文转载自blog.csdn.net的农机AI小白的文章"https://blog.csdn.net/weixin_42532587/article/details/111054649"。版权归原作者所有,此博客不拥有其著作权,亦不承担相应法律责任。如有侵权,请联系我们删除。
复制链接
复制链接
相关推荐
发表评论
登录后才能发表评论和回复 注册

/ 登录

评论记录:

未查询到任何数据!
回复评论:

分类栏目

后端 (14832) 前端 (14280) 移动开发 (3760) 编程语言 (3851) Java (3904) Python (3298) 人工智能 (10119) AIGC (2810) 大数据 (3499) 数据库 (3945) 数据结构与算法 (3757) 音视频 (2669) 云原生 (3145) 云平台 (2965) 前沿技术 (2993) 开源 (2160) 小程序 (2860) 运维 (2533) 服务器 (2698) 操作系统 (2325) 硬件开发 (2491) 嵌入式 (2955) 微软技术 (2769) 软件工程 (2056) 测试 (2865) 网络空间安全 (2948) 网络与通信 (2797) 用户体验设计 (2592) 学习和成长 (2593) 搜索 (2744) 开发工具 (7108) 游戏 (2829) HarmonyOS (2935) 区块链 (2782) 数学 (3112) 3C硬件 (2759) 资讯 (2909) Android (4709) iOS (1850) 代码人生 (3043) 阅读 (2841)

热门文章

101
推荐
关于我们 隐私政策 免责声明 联系我们
Copyright © 2020-2025 蚁人论坛 (iYenn.com) All Rights Reserved.
Scroll to Top