API
 
Loading...
Searching...
No Matches
nnReconstructor.hpp
Go to the documentation of this file.
1/** \file nnReconstructor.hpp
2 * \brief The MagAO-X generic ImageStreamIO stream integrator
3 *
4 * \ingroup app_files
5 */
6
7 #ifndef nnReconstructor_hpp
8 #define nnReconstructor_hpp
9
10 #include <NvInfer.h>
11 #include <cuda_fp16.h>
12 #include <cuda_runtime_api.h>
13 #include <iostream>
14 #include <fstream>
15 #include <vector>
16 #include <limits>
17
18 #include <mx/improc/eigenCube.hpp>
19 #include <mx/improc/eigenImage.hpp>
20
21 #include "../../libMagAOX/libMagAOX.hpp" //Note this is included on command line to trigger pch
22 #include "../../magaox_git_version.h"
23
24 using namespace nvinfer1;
25
26 // Logger for TensorRT info/warning/errors
27 class Logger : public ILogger {
28 void log(Severity severity, const char* msg) noexcept override {
29 if (severity <= Severity::kWARNING) {
30 std::cout << msg << std::endl;
31 }
32 }
33 };
34
35 // #define MAGAOX_CURRENT_SHA1 0
36 // #define MAGAOX_REPO_MODIFIED 0
37
38 namespace MagAOX
39 {
40 namespace app
41 {
42
43 class nnReconstructor : public MagAOXApp<true>, public dev::shmimMonitor<nnReconstructor>
44 {
45 // Give the test harness access.
47
49
50 // The base shmimMonitor type
52
53 /// Floating point type in which to do all calculations.
54 typedef float realT;
55
56 public:
57 /** \name app::dev Configurations
58 *@{
59 */
60
61 ///@}
62
63 protected:
64 /** \name Configurable Parameters
65 *@{
66 */
67 std::string dataDirs; // Location where the data (onnx file, engine, WFS reference) is stored
68 std::string onnxFileName; // Name of the onnx files
69 std::string engineName; // Name of the engine
70 std::string engineDirs; // Name of the engine
71 bool rebuildEngine; // If true, it will rebuild the engine and save it at engineName
72
73 Logger logger; // The tensorRT logger
74 std::vector<char> engineData; // for loading the engine file.
75
76 IRuntime* runtime {nullptr};
77 ICudaEngine* engine {nullptr};
79 int inputC {0};
80 int inputH {0};
81 int inputW {0};
82 int inputSize {0};
83 int input2Size {0};
84 int outputSize {0};
85
86 float* d_input {nullptr};
87 float* d_input2 {nullptr};
88 float* d_output {nullptr};
89
90 float imageNorm; // Normalization constant for the image intensities
91 float modalNorm; // Normalization constant for the modal coefficients
92
93 int m_pupPix; // Number of pixels in the pupil used for the Neural Network
94 int pup_offset1_x; // Horizontal offset to the first set of pupils
95 int pup_offset1_y; // Vertical offset to the first set of pupils
96 int pup_offset2_x; // Horizontal offset to the second set of pupils
97 int pup_offset2_y; // Horizontal offset to the second set of pupils
99 unsigned long frame_counter{ 0 };
100
101 int Npup{ 4 }; // Number of pupils
102 int zeroPad { 2 };
103 float *modeval{ nullptr };
104 float *pp_image{ nullptr };
105 float *pupIs{ nullptr };
106 //auto pupIs = std::make_unique<float[]>(4);
107
108 size_t m_pwfsWidth{ 0 }; ///< The width of the image
109 size_t m_pwfsHeight{ 0 }; ///< The height of the image.
110
111 uint8_t m_pwfsDataType{ 0 }; ///< The ImageStreamIO type code.
112 size_t m_pwfsTypeSize{ 0 }; ///< The size of the type, in bytes.
113
114 // variables for sending the output to aol_modevals
115 std::string m_modevalChannel;
117 uint32_t m_modevalWidth {0}; ///< The width of the shmim
118 uint32_t m_modevalHeight {0}; ///< The height of the shmim
119 uint8_t m_modevalDataType {0}; ///< The ImageStreamIO type code.
120 size_t m_modevalTypeSize {0}; ///< The size of the type, in bytes.
121
122 bool m_modevalOpened {false};
123 bool m_modevalRestart {false};
124
125 public:
126 /// Default c'tor.
128
129 /// D'tor, declared and defined for noexcept.
133
134 virtual void setupConfig();
135
136 /// Implementation of loadConfig logic, separated for testing.
137 /** This is called by loadConfig().
138 */
139 int loadConfigImpl(
140 mx::app::appConfigurator &_config /**< [in] an application configuration from which to load values*/ );
141
142 virtual void loadConfig();
143
144 /// Startup function
145 /**
146 *
147 */
148 virtual int appStartup();
149
150 /// Implementation of the FSM for nnReconstructor.
151 /**
152 * \returns 0 on no critical error
153 * \returns -1 on an error requiring shutdown
154 */
155 virtual int appLogic();
156
157 /// Shutdown the app.
158 /**
159 *
160 */
161 virtual int appShutdown();
162
163 // Custom functions
164 int send_to_shmim();
165
166 void load_engine(const std::string filename);
171
172 // void build_engine(){};
173
174 protected:
175 int allocate( const dev::shmimT &dummy /**< [in] tag to differentiate shmimMonitor parents.*/ );
176
177 int processImage( void *curr_src, ///< [in] pointer to start of current frame.
178 const dev::shmimT &dummy ///< [in] tag to differentiate shmimMonitor parents.
179 );
180 };
181
182 void nnReconstructor::load_engine(const std::string filename) {
183
184 std::ifstream file(filename, std::ios::binary);
185 if (!file) {
186 std::cout << "Error opening " << filename << std::endl;
187 }
188
189 file.seekg(0, std::ios::end);
190
191 engineData = std::vector<char>(file.tellg());
192 file.seekg(0, std::ios::beg);
193 file.read(engineData.data(), engineData.size());
194
195 }
196
198
199 // Create the runtime and deserialize the engine
201 if (!runtime) {
202 std::cout << "Failed to createInferRuntime\n";
203 }
204
205 engine = runtime->deserializeCudaEngine(engineData.data(), engineData.size());
206 if (!engine) {
207 std::cout << "Failed to deserialize CUDA engine.\n";
208 } else {
209 std::cout << "Deserialized CUDA engine.\n";
210 }
211
212 context = engine->createExecutionContext();
213
214
215 int numIOTensors = engine->getNbIOTensors();
216 std::cout << "Number of IO Tensors: " << numIOTensors << std::endl;
217
218 auto inputName = engine->getIOTensorName(0);
219 auto input2Name = engine->getIOTensorName(1);
220 auto outputName = engine->getIOTensorName(2);
221 std::cout << "Tensor IO names: " << inputName << " " << outputName << std::endl;
222
223 const auto inputDims = engine->getTensorShape(inputName);
224 const auto input2Dims = engine->getTensorShape(input2Name);
225 const auto outputDims = engine->getTensorShape(outputName);
226 inputC = inputDims.d[1];
227 inputH = inputDims.d[2];
228 inputW = inputDims.d[3];
230 input2Size = input2Dims.d[1];
231
232 outputSize = outputDims.d[1];
233 std::cout << "Tensor input dimensions: " << inputC << "x" << inputH << "x" << inputW << std::endl;
234 std::cout << "Tensor input size : " << inputSize << std::endl;
235 std::cout << "Tensor input2 size : " << input2Size << std::endl;
236 std::cout << "Tensor output dimensions: " << outputSize << std::endl;
237
238 }
239
241
242 // Allocate device memory for input and output
243 cudaMalloc((void**)&d_input, inputSize * sizeof(float));
244 cudaMalloc((void**)&d_input2, input2Size * sizeof(float));
245 cudaMalloc((void**)&d_output, outputSize * sizeof(float));
246
247 //cudaMalloc((void**)&d_input, inputSize * sizeof(float));
248 //cudaMalloc((void**)&d_output, outputSize * sizeof(float));
249
250 }
251
261
263 if(context)
264 delete context;
265 if(engine)
266 delete engine;
267 if(runtime)
268 delete runtime;
269 };
270
272 {
273 // Check if processImage is running
274 // while(m_dmStream.md[0].write == 1);
275
276 m_modevalStream.md[0].write = 1;
278 m_modevalStream.md[0].cnt0++;
279 m_modevalStream.md[0].write = 0;
280
282
283 return 0;
284 }
285
286 inline nnReconstructor::nnReconstructor() : MagAOXApp( MAGAOX_CURRENT_SHA1, MAGAOX_REPO_MODIFIED )
287 {
288 return;
289 }
290
292 {
293 std::cout << "setupConfig()" << std::endl;
295
296 config.add( "parameters.dataDirs",
297 "",
298 "parameters.dataDirs",
299 argType::Required,
300 "parameters",
301 "dataDirs",
302 false,
303 "string",
304 "The path to the directory with the onnx model." );
305
306 config.add( "parameters.engineDirs",
307 "",
308 "parameters.engineDirs",
309 argType::Required,
310 "parameters",
311 "engineDirs",
312 false,
313 "string",
314 "The path to the directory with the TRT engine." );
315
316 config.add( "parameters.onnxFileName",
317 "",
318 "parameters.onnxFileName",
319 argType::Required,
320 "parameters",
321 "onnxFileName",
322 false,
323 "string",
324 "Name of the Neural Net ONNX file" );
325
326 config.add( "parameters.engineName",
327 "",
328 "parameters.engineName",
329 argType::Required,
330 "parameters",
331 "engineName",
332 false,
333 "string",
334 "Name of the TRT engine." );
335 config.add( "parameters.rebuildEngine",
336 "",
337 "parameters.rebuildEngine",
338 argType::Required,
339 "parameters",
340 "rebuildEngine",
341 false,
342 "bool",
343 "If true the engine will be rebuild." );
344
345 config.add( "parameters.imageNorm",
346 "",
347 "parameters.imageNorm",
348 argType::Required,
349 "parameters",
350 "imageNorm",
351 false,
352 "float",
353 "Normalization term for the preprocessed images." );
354
355 config.add( "parameters.modalNorm",
356 "",
357 "parameters.modalNorm",
358 argType::Required,
359 "parameters",
360 "modalNorm",
361 false,
362 "float",
363 "Normalization term for the modal coefficients." );
364
365 config.add( "parameters.channel",
366 "",
367 "parameters.channel",
368 argType::Required,
369 "parameters",
370 "channel",
371 false,
372 "string",
373 "The output channel." );
374
375 config.add( "parameters.m_pupPix",
376 "",
377 "parameters.m_pupPix",
378 argType::Required,
379 "parameters",
380 "m_pupPix",
381 false,
382 "int",
383 "Number of pixels across a PWFS pupil." );
384
385 config.add( "parameters.pup_offset1_x",
386 "",
387 "parameters.pup_offset1_x",
388 argType::Required,
389 "parameters",
390 "pup_offset1_x",
391 false,
392 "int",
393 "Horizontal offset to the top left of the closest set op PWFS pupils." );
394
395 config.add( "parameters.pup_offset1_y",
396 "",
397 "parameters.pup_offset1_y",
398 argType::Required,
399 "parameters",
400 "pup_offset1_y",
401 false,
402 "int",
403 "Vertical offset to the top left of the closest set op PWFS pupils." );
404
405 config.add( "parameters.pup_offset2_x",
406 "",
407 "parameters.pup_offset2_x",
408 argType::Required,
409 "parameters",
410 "pup_offset2_x",
411 false,
412 "int",
413 "Horizontal offset to the top left of the furthest set op PWFS pupils." );
414
415 config.add( "parameters.pup_offset2_y",
416 "",
417 "parameters.pup_offset2_y",
418 argType::Required,
419 "parameters",
420 "pup_offset2_y",
421 false,
422 "int",
423 "Vertical offset to the top left of the furthest set op PWFS pupils." );
424
425 }
426
427 inline int nnReconstructor::loadConfigImpl( mx::app::appConfigurator &_config )
428 {
429 std::cout << "loadConfigImpl()" << std::endl;
431
432 _config( dataDirs, "parameters.dataDirs" );
433 _config( engineDirs, "parameters.engineDirs" );
434 _config( onnxFileName, "parameters.onnxFileName" );
435 _config( engineName, "parameters.engineName" );
436 _config( rebuildEngine, "parameters.rebuildEngine" );
437
438 _config( imageNorm, "parameters.imageNorm" );
439 _config( modalNorm, "parameters.modalNorm" );
440 _config( m_modevalChannel, "parameters.channel");
441
442 _config( m_pupPix, "parameters.m_pupPix" );
443 _config( pup_offset1_x, "parameters.pup_offset1_x" );
444 _config( pup_offset1_y, "parameters.pup_offset1_y" );
445 _config( pup_offset2_x, "parameters.pup_offset2_x" );
446 _config( pup_offset2_y, "parameters.pup_offset2_y" );
447
448 if( true )
449 {
450 std::cout << "Debug configuration loading: " << std::endl;
451 std::cout << "dataDirs: " << dataDirs << std::endl;
452 std::cout << "engineDirs: " << engineDirs << std::endl;
453 std::cout << "onnxFileName: " << onnxFileName << std::endl;
454 std::cout << "engineName: " << engineName << std::endl;
455 std::cout << "rebuildEngine: " << rebuildEngine << std::endl;
456 std::cout << "imageNorm: " << imageNorm << std::endl;
457 std::cout << "modalNorm: " << modalNorm << std::endl;
458 std::cout << "modeval Channel: " << m_modevalChannel << std::endl;
459
460 std::cout << "m_pupPix: " << m_pupPix << std::endl;
461 std::cout << "pup_offset1_x: " << pup_offset1_x << std::endl;
462 std::cout << "pup_offset1_y: " << pup_offset1_y << std::endl;
463 std::cout << "pup_offset2_x: " << pup_offset2_x << std::endl;
464 std::cout << "pup_offset2_y: " << pup_offset2_y << std::endl;
465 }
466
467 return 0;
468 }
469
471 {
472 loadConfigImpl( config );
473 }
474
476 {
477 if( shmimMonitorT::appStartup() < 0 )
478 {
479 return log<software_error, -1>( { __FILE__, __LINE__ } );
480 }
481
482 std::string full_filepath = engineDirs + "/" + engineName;
483 std::cout << "file: " << full_filepath << std::endl;
484
488
489 // state(stateCodes::READY);
491 return 0;
492 }
493
495 {
496 if( shmimMonitorT::appLogic() < 0 )
497 {
498 return log<software_error, -1>( { __FILE__, __LINE__ } );
499 }
500
501 std::unique_lock<std::mutex> lock( m_indiMutex );
502
503 if( shmimMonitorT::updateINDI() < 0 )
504 {
506 }
507
508 return 0;
509 }
510
512 {
514
515 if( pp_image )
516 {
517 delete[] pp_image;
518 }
519 if( pupIs)
520 {
521 delete[] pupIs;
522 }
523 if( modeval )
524 {
525 delete[] modeval;
526 }
527
530
531 return 0;
532 }
533
534 inline int nnReconstructor::allocate( const dev::shmimT &dummy )
535 {
536 std::cout << "allocate()" << std::endl;
537 static_cast<void>( dummy ); // be unused
538
539 // Wavefront sensor setup
542 std::cout << "Width: " << m_pwfsWidth << " Height: " << m_pwfsHeight << std::endl;
543
545 std::cout << "Pixels: " << pixels_per_quadrant << std::endl;
546 pp_image = new float[Npup * pixels_per_quadrant];
547 pupIs = new float[4];
548 modeval = new float[outputSize];
549 memset( pp_image, 0, sizeof( float ) * Npup * pixels_per_quadrant );
550 memset( pupIs, 0, sizeof( float ) * 4);
551 memset( modeval, 0, sizeof( float) * outputSize);
552 std::cout << "Close shmims" << std::endl;
553 // Allocate the DM shmim interface
554 if(m_modevalOpened){
556 }
557
558 std::cout << "Open shmims" << std::endl;
559 m_modevalOpened = false;
560 m_modevalRestart = false; //Set this up front, since we're about to restart.
561
563 if(m_modevalStream.md[0].sem < 10){
565 }else{
566 m_modevalOpened = true;
567 }
568 }
569
570 std::cout << "Done!" << std::endl;
571 if(!m_modevalOpened){
573 return -1;
574 }else{
575 m_modevalWidth = m_modevalStream.md->size[0];
576 m_modevalHeight = m_modevalStream.md->size[1];
577
578 m_modevalDataType = m_modevalStream.md->datatype;
579 m_modevalTypeSize = sizeof(float);
580
581 log<text_log>( "Opened " + m_modevalChannel + " " + std::to_string(m_modevalWidth) + " x " + std::to_string(m_modevalHeight) + " with data type: " + std::to_string(m_modevalDataType), logPrio::LOG_NOTICE);
582 }
583
584
585 return 0;
586 }
587
588 inline int nnReconstructor::processImage( void *curr_src, const dev::shmimT &dummy )
589 {
590 static_cast<void>( dummy ); // be unused
591
592 // aol_imwfs2 is reference and dark subtracted and is power normalized.
593 //Eigen::Map<eigenImage<unsigned short>> pwfsIm(static_cast<float *>( curr_src ), m_pwfsHeight, m_pwfsWidth );
594 //Eigen::Map<eigenImage<unsigned short>> pwfsIm(static_cast<unsigned short *>( curr_src ), m_pwfsHeight, m_pwfsWidth );
595 Eigen::Map<eigenImage<float>> pwfsIm(reinterpret_cast<float*>(curr_src), m_pwfsHeight, m_pwfsWidth);
596
597 // Split up the four pupils for the Neural Network.
598 int ki = 0;
599 for ( int pupI_i =0; pupI_i < 3; ++pupI_i){
600 pupIs[pupI_i] = 0.;
601 }
602 pupIs[3] = 0.0003;
603
604 for( int col_i = -zeroPad; col_i < (m_pupPix - zeroPad); ++col_i )
605 {
606 for( int row_i = -zeroPad; row_i < (m_pupPix - zeroPad); ++row_i )
607 {
608 if ((col_i < 0) or (col_i >= (m_pupPix - 2*zeroPad)) or (row_i < 0) or (row_i >= (m_pupPix - 2*zeroPad))){
609 pp_image[ki] = 0;
613 }
614 else {
631 }
632 ++ki;
633 }
634 }
635 for ( int pupI_i =0; pupI_i < 3; ++pupI_i){
636 pupIs[pupI_i] = 3 * pupIs[pupI_i] / (64*64);
637 }
638
639 // Copy input data to device
642
643
644 // Run inference
645 void* buffers[] = {d_input, d_input2, d_output};
646 context->executeV2(buffers);
647
648 // Copy output data back to host
650
651 if(frame_counter % 2000 == 0)
652 std::cout << "HOWDY" << std::endl;
655
656 return 0;
657 }
658
659 } // namespace app
660 } // namespace MagAOX
661
662 #endif // nnReconstructor_hpp
663
void log(Severity severity, const char *msg) noexcept override
The base-class for MagAO-X applications.
Definition MagAOXApp.hpp:73
stateCodes::stateCodeT state()
Get the current state code.
static int log(const typename logT::messageT &msg, logPrioT level=logPrio::LOG_DEFAULT)
Make a log entry.
std::mutex m_indiMutex
Mutex for locking INDI communications.
uint32_t m_width
The width of the images in the stream.
int setupConfig(mx::app::appConfigurator &config)
Setup the configuration system.
int updateINDI()
Update the INDI properties for this device controller.
int appLogic()
Checks the shmimMonitor thread.
uint32_t m_height
The height of the images in the stream.
int appShutdown()
Shuts down the shmimMonitor thread.
int loadConfig(mx::app::appConfigurator &config)
load the configuration system results
float realT
Floating point type in which to do all calculations.
size_t m_pwfsWidth
The width of the image.
virtual int appShutdown()
Shutdown the app.
size_t m_pwfsHeight
The height of the image.
int loadConfigImpl(mx::app::appConfigurator &_config)
Implementation of loadConfig logic, separated for testing.
int allocate(const dev::shmimT &dummy)
uint32_t m_modevalHeight
The height of the shmim.
uint8_t m_pwfsDataType
The ImageStreamIO type code.
virtual int appStartup()
Startup function.
size_t m_modevalTypeSize
The size of the type, in bytes.
virtual int appLogic()
Implementation of the FSM for nnReconstructor.
void load_engine(const std::string filename)
int processImage(void *curr_src, const dev::shmimT &dummy)
size_t m_pwfsTypeSize
The size of the type, in bytes.
uint8_t m_modevalDataType
The ImageStreamIO type code.
~nnReconstructor() noexcept
D'tor, declared and defined for noexcept.
uint32_t m_modevalWidth
The width of the shmim.
dev::shmimMonitor< nnReconstructor > shmimMonitorT
@ OPERATING
The device is operating, other than homing.
std::unique_lock< std::mutex > lock(m_indiMutex)
Definition dm.hpp:26
static constexpr logPrioT LOG_NOTICE
A normal but significant condition.
Software ERR log entry.