7 #ifndef nnReconstructor_hpp
8 #define nnReconstructor_hpp
11 #include <cuda_runtime_api.h>
17 #include <mx/improc/eigenCube.hpp>
18 #include <mx/improc/eigenImage.hpp>
20 #include "../../libMagAOX/libMagAOX.hpp"
21 #include "../../magaox_git_version.h"
23 using namespace nvinfer1;
27 void log(Severity severity,
const char*
msg) noexcept
override {
28 if (severity <= Severity::kWARNING) {
45 friend class nnReconstructor_test;
75 IRuntime* runtime {
nullptr};
76 ICudaEngine* engine {
nullptr};
77 IExecutionContext* context {
nullptr};
84 float* d_input {
nullptr};
85 float* d_output {
nullptr};
96 unsigned long frame_counter{ 0 };
100 float *modeval{
nullptr };
101 float *pp_image{
nullptr };
103 size_t m_pwfsWidth{ 0 };
104 size_t m_pwfsHeight{ 0 };
106 uint8_t m_pwfsDataType{ 0 };
107 size_t m_pwfsTypeSize{ 0 };
112 uint32_t m_modevalWidth {0};
113 uint32_t m_modevalHeight {0};
114 uint8_t m_modevalDataType {0};
115 size_t m_modevalTypeSize {0};
117 bool m_modevalOpened {
false};
118 bool m_modevalRestart {
false};
129 virtual void setupConfig();
135 mx::app::appConfigurator &_config );
137 virtual void loadConfig();
143 virtual int appStartup();
150 virtual int appLogic();
156 virtual int appShutdown();
161 void load_engine(
const std::string filename);
162 void create_engine_context();
163 void prepare_engine_memory();
164 void cleanup_engine_memory();
165 void cleanup_engine_context();
172 int processImage(
void *curr_src,
177 void nnReconstructor::load_engine(
const std::string filename) {
179 std::ifstream file(filename, std::ios::binary);
181 std::cout <<
"Error opening " << filename << std::endl;
184 file.seekg(0, std::ios::end);
186 engineData = std::vector<char>(file.tellg());
187 file.seekg(0, std::ios::beg);
188 file.read(engineData.data(), engineData.size());
192 void nnReconstructor::create_engine_context(){
195 runtime = createInferRuntime(logger);
197 std::cout <<
"Failed to createInferRuntime\n";
200 engine = runtime->deserializeCudaEngine(engineData.data(), engineData.size());
202 std::cout <<
"Failed to deserialize CUDA engine.\n";
204 std::cout <<
"Deserialized CUDA engine.\n";
207 context = engine->createExecutionContext();
210 int numIOTensors = engine->getNbIOTensors();
211 std::cout <<
"Number of IO Tensors: " << numIOTensors << std::endl;
213 auto inputName = engine->getIOTensorName(0);
214 auto outputName = engine->getIOTensorName(1);
215 std::cout <<
"Tensor IO names: " << inputName <<
" " << outputName << std::endl;
217 const auto inputDims = engine->getTensorShape(inputName);
218 const auto outputDims = engine->getTensorShape(outputName);
219 inputC = inputDims.d[1];
220 inputH = inputDims.d[2];
221 inputW = inputDims.d[3];
222 inputSize = inputC * inputH * inputW;
224 outputSize = outputDims.d[1];
225 std::cout <<
"Tensor input dimensions: " << inputC <<
"x" << inputH <<
"x" << inputW << std::endl;
226 std::cout <<
"Tensor output dimensions: " << outputSize << std::endl;
230 void nnReconstructor::prepare_engine_memory(){
233 cudaMalloc((
void**)&d_input, inputSize *
sizeof(
float));
234 cudaMalloc((
void**)&d_output, outputSize *
sizeof(
float));
238 void nnReconstructor::cleanup_engine_memory(){
246 void nnReconstructor::cleanup_engine_context(){
255 inline int nnReconstructor::send_to_shmim()
260 m_modevalStream.md[0].write = 1;
261 memcpy( m_modevalStream.array.raw, modeval, outputSize * m_modevalTypeSize );
262 m_modevalStream.md[0].cnt0++;
263 m_modevalStream.md[0].write = 0;
265 ImageStreamIO_sempost( &m_modevalStream, -1 );
270 inline nnReconstructor::nnReconstructor() :
MagAOXApp( MAGAOX_CURRENT_SHA1, MAGAOX_REPO_MODIFIED )
277 std::cout <<
"setupConfig()" << std::endl;
280 config.add(
"parameters.dataDirs",
282 "parameters.dataDirs",
288 "The path to the directory with the onnx model." );
290 config.add(
"parameters.engineDirs",
292 "parameters.engineDirs",
298 "The path to the directory with the TRT engine." );
300 config.add(
"parameters.onnxFileName",
302 "parameters.onnxFileName",
308 "Name of the Neural Net ONNX file" );
310 config.add(
"parameters.engineName",
312 "parameters.engineName",
318 "Name of the TRT engine." );
319 config.add(
"parameters.rebuildEngine",
321 "parameters.rebuildEngine",
327 "If true the engine will be rebuild." );
329 config.add(
"parameters.imageNorm",
331 "parameters.imageNorm",
337 "Normalization term for the preprocessed images." );
339 config.add(
"parameters.modalNorm",
341 "parameters.modalNorm",
347 "Normalization term for the modal coefficients." );
349 config.add(
"parameters.channel",
351 "parameters.channel",
357 "The output channel." );
359 config.add(
"parameters.m_pupPix",
361 "parameters.m_pupPix",
367 "Number of pixels across a PWFS pupil." );
369 config.add(
"parameters.pup_offset1_x",
371 "parameters.pup_offset1_x",
377 "Horizontal offset to the top left of the closest set op PWFS pupils." );
379 config.add(
"parameters.pup_offset1_y",
381 "parameters.pup_offset1_y",
387 "Vertical offset to the top left of the closest set op PWFS pupils." );
389 config.add(
"parameters.pup_offset2_x",
391 "parameters.pup_offset2_x",
397 "Horizontal offset to the top left of the furthest set op PWFS pupils." );
399 config.add(
"parameters.pup_offset2_y",
401 "parameters.pup_offset2_y",
407 "Vertical offset to the top left of the furthest set op PWFS pupils." );
413 std::cout <<
"loadConfigImpl()" << std::endl;
416 _config(
dataDirs,
"parameters.dataDirs" );
417 _config(
engineDirs,
"parameters.engineDirs" );
419 _config(
engineName,
"parameters.engineName" );
422 _config(
imageNorm,
"parameters.imageNorm" );
423 _config(
modalNorm,
"parameters.modalNorm" );
426 _config(
m_pupPix,
"parameters.m_pupPix" );
434 std::cout <<
"Debug configuration loading: " << std::endl;
467 std::cout <<
"file: " << full_filepath << std::endl;
489 log<software_error>( { __FILE__, __LINE__ } );
513 static_cast<void>( dummy );
525 std::cout <<
"Close shmims" << std::endl;
563 static_cast<void>( dummy );
566 Eigen::Map<eigenImage<unsigned short>> pwfsIm(
static_cast<unsigned short *
>( curr_src ),
m_pwfsHeight,
m_pwfsWidth );
571 for(
int col_i = 0; col_i <
m_pupPix; ++col_i )
573 for(
int row_i = 0; row_i <
m_pupPix; ++row_i )
void log(Severity severity, const char *msg) noexcept override
The base-class for MagAO-X applications.
stateCodes::stateCodeT state()
Get the current state code.
static int log(const typename logT::messageT &msg, logPrioT level=logPrio::LOG_DEFAULT)
Make a log entry.
std::mutex m_indiMutex
Mutex for locking INDI communications.
int appStartup()
Startup function.
uint32_t m_width
The width of the images in the stream.
int setupConfig(mx::app::appConfigurator &config)
Setup the configuration system.
int updateINDI()
Update the INDI properties for this device controller.
int appLogic()
Checks the shmimMonitor thread.
uint32_t m_height
The height of the images in the stream.
int appShutdown()
Shuts down the shmimMonitor thread.
int loadConfig(mx::app::appConfigurator &config)
load the configuration system results
float realT
Floating point type in which to do all calculations.
void create_engine_context()
size_t m_pwfsWidth
The width of the image.
void cleanup_engine_memory()
void cleanup_engine_context()
unsigned long frame_counter
virtual int appShutdown()
Shutdown the app.
size_t m_pwfsHeight
The height of the image.
virtual void setupConfig()
int loadConfigImpl(mx::app::appConfigurator &_config)
Implementation of loadConfig logic, separated for testing.
int allocate(const dev::shmimT &dummy)
uint32_t m_modevalHeight
The height of the shmim.
virtual void loadConfig()
virtual int appStartup()
Startup function.
size_t m_modevalTypeSize
The size of the type, in bytes.
std::string m_modevalChannel
virtual int appLogic()
Implementation of the FSM for nnReconstructor.
void load_engine(const std::string filename)
int processImage(void *curr_src, const dev::shmimT &dummy)
void prepare_engine_memory()
uint8_t m_modevalDataType
The ImageStreamIO type code.
std::vector< char > engineData
~nnReconstructor() noexcept
D'tor, declared and defined for noexcept.
uint32_t m_modevalWidth
The width of the shmim.
dev::shmimMonitor< nnReconstructor > shmimMonitorT
@ OPERATING
The device is operating, other than homing.
std::unique_lock< std::mutex > lock(m_indiMutex)
constexpr static logPrioT LOG_NOTICE
A normal but significant condition.