#include #include #include #include #include #include #include using boost::lexical_cast; #include "Time.h" #include "Converter.h" #include "HeadersFAD.h" using namespace std; using namespace FAD; namespace ba = boost::asio; namespace bs = boost::system; namespace dummy = ba::placeholders; using boost::lexical_cast; using ba::ip::tcp; int Port = 0; // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ class tcp_connection : public ba::ip::tcp::socket, public boost::enable_shared_from_this { private: double fStartTime; uint32_t fRunNumber; void AsyncRead(ba::mutable_buffers_1 buffers) { ba::async_read(*this, buffers, boost::bind(&tcp_connection::HandleReceivedData, shared_from_this(), dummy::error, dummy::bytes_transferred)); } void AsyncWrite(const ba::const_buffers_1 &buffers) { ba::async_write(*this, buffers, boost::bind(&tcp_connection::HandleSentData, shared_from_this(), dummy::error, dummy::bytes_transferred)); } void AsyncWait(ba::deadline_timer &timer, int seconds, void (tcp_connection::*handler)(const bs::error_code&))// const { timer.expires_from_now(boost::posix_time::seconds(seconds)); timer.async_wait(boost::bind(handler, shared_from_this(), dummy::error)); } // The constructor is prvate to force the obtained pointer to be shared tcp_connection(ba::io_service& ioservice) : ba::ip::tcp::socket(ioservice), fTriggerSendData(ioservice) { } // Callback when writing was successfull or failed void HandleSentData(const boost::system::error_code& error, size_t bytes_transferred) { cout << "Data sent: (transmitted=" << bytes_transferred << ") rc=" << error.message() << " (" << error << ")" << endl; } vector fBufCommand; vector fBuffer; vector fCommand; FAD::EventHeader fHeader; FAD::ChannelHeader fChHeader[kNumChannels]; ba::deadline_timer fTriggerSendData; bool fTriggerEnabled; void SendData() { if (!fTriggerEnabled) return; fHeader.fPackageLength = sizeof(EventHeader)/2+1; fHeader.fEventCounter++; fHeader.fTimeStamp = uint32_t((Time(Time::utc).UnixTime()-fStartTime)*10000); fBuffer.resize(0); for (int i=0; i buf = fChHeader[i].HtoN(); fBuffer.insert(fBuffer.end(), buf.begin(), buf.end()); fBuffer.insert(fBuffer.end(), fChHeader[i].fRegionOfInterest, 0x42); fHeader.fPackageLength += sizeof(ChannelHeader)/2; fHeader.fPackageLength += fChHeader[i].fRegionOfInterest; } fBuffer.push_back(htons(FAD::kDelimiterEnd)); const vector h = fHeader.HtoN(); fBuffer.insert(fBuffer.begin(), h.begin(), h.end()); AsyncWrite(ba::buffer(ba::const_buffer(fBuffer.data(), fBuffer.size()*2))); } void TriggerSendData(const boost::system::error_code &ec) { if (!is_open()) { // For example: Here we could schedule a new accept if we // would not want to allow two connections at the same time. return; } if (ec==ba::error::basic_errors::operation_aborted) return; // Check whether the deadline has passed. We compare the deadline // against the current time since a new asynchronous operation // may have moved the deadline before this actor had a chance // to run. if (fTriggerSendData.expires_at() > ba::deadline_timer::traits_type::now()) return; // The deadline has passed. SendData(); AsyncWait(fTriggerSendData, 1, &tcp_connection::TriggerSendData); } void HandleReceivedData(const boost::system::error_code& error, size_t bytes_received) { // Do not schedule a new read if the connection failed. if (bytes_received==0) { // Close the connection close(); return; } // No command received yet if (fCommand.size()==0) { transform(fBufCommand.begin(), fBufCommand.begin()+bytes_received/2, fBufCommand.begin(), ntohs); switch (fBufCommand[0]) { case kCmdDrsEnable: case kCmdDrsEnable+0x100: fHeader.Enable(FAD::EventHeader::kDenable, fBufCommand[0]==kCmdDrsEnable); cout << "-> DrsEnable" << endl; break; case kCmdDwrite: case kCmdDwrite+0x100: fHeader.Enable(FAD::EventHeader::kDwrite, fBufCommand[0]==kCmdDwrite); cout << "-> Dwrite" << endl; break; case kCmdTriggerLine: case kCmdTriggerLine+0x100: cout << "-> Trigger line" << endl; fTriggerEnabled = fBufCommand[0]==kCmdTriggerLine; break; case kCmdSclk: case kCmdSclk+0x100: cout << "-> Sclk" << endl; fHeader.Enable(FAD::EventHeader::kSpiSclk, fBufCommand[0]==kCmdSclk); break; case kCmdSrclk: case kCmdSrclk+0x100: cout << "-> Drclk" << endl; break; case kCmdRun: case kCmdRun+0x100: cout << "-> Run" << endl; break; case kCmdContTriggerOn: case kCmdContTriggerOff: if (fBufCommand[0]==kCmdContTriggerOn) AsyncWait(fTriggerSendData, 1, &tcp_connection::TriggerSendData); else fTriggerSendData.cancel(); cout << "-> ContTrig" << endl; break; case kCmdResetTriggerId: cout << "-> Reset" << endl; fHeader.fEventCounter = 0; break; case kCmdSingleTrigger: cout << "-> Trigger" << endl; SendData(); break; default: if (fBufCommand[0]>=kCmdWriteRoi && fBufCommand[0]= kCmdWriteDac && fBufCommand[0](&fBufCommand[0], bytes_received) << endl; return; } fBufCommand.resize(1); AsyncRead(ba::buffer(fBufCommand)); return; } transform(fBufCommand.begin(), fBufCommand.begin()+bytes_received/2, fBufCommand.begin(), ntohs); switch (fCommand[0]) { case kCmdWriteRoi: cout << "-> Set Roi[" << fCommand[1] << "]=" << fBufCommand[0] << endl; fChHeader[fCommand[1]].fRegionOfInterest = fBufCommand[0]; break; case kCmdWriteDac: cout << "-> Set Dac[" << fCommand[1] << "]=" << fBufCommand[0] << endl; fHeader.fDac[fCommand[1]] = fBufCommand[0]; break; } fCommand.resize(0); fBufCommand.resize(1); AsyncRead(ba::buffer(fBufCommand)); } public: typedef boost::shared_ptr shared_ptr; static shared_ptr create(ba::io_service& io_service) { return shared_ptr(new tcp_connection(io_service)); } void start() { // Ownership of buffer must be valid until Handler is called. fHeader.fStartDelimiter = FAD::kDelimiterStart; fHeader.fVersion = 0x104; fHeader.fStatus = 0xf<<12 | FAD::EventHeader::kDenable | FAD::EventHeader::kDwrite | FAD::EventHeader::kDcmLocked | FAD::EventHeader::kDcmReady | FAD::EventHeader::kSpiSclk; fStartTime = Time(Time::utc).UnixTime(); for (int i=0; istart(); // The is now an open connection/server (tcp_connection) // we immediatly schedule another connection // This allowed two client-connection at the same time start_accept(); } cout << "handle-done." << endl; } }; int main(int argc, const char **argv) { try { ba::io_service io_service; Port = argc==2 ? lexical_cast(argv[1]) : 5000; tcp_server server(io_service, Port); // ba::add_service(io_service, &server); // server.add_service(...); cout << "Run..." << flush; // Calling run() from a single thread ensures no concurrent access // of the handler which are called!!! io_service.run(); cout << "end." << endl; } catch (std::exception& e) { std::cerr << e.what() << std::endl; } return 0; } /* ====================== Buffers =========================== char d1[128]; ba::buffer(d1)); std::vector d2(128); ba::buffer(d2); boost::array d3; by::buffer(d3); // -------------------------------- char d1[128]; std::vector d2(128); boost::array d3; boost::array bufs1 = { ba::buffer(d1), ba::buffer(d2), ba::buffer(d3) }; sock.read(bufs1); std::vector bufs2; bufs2.push_back(boost::asio::buffer(d1)); bufs2.push_back(boost::asio::buffer(d2)); bufs2.push_back(boost::asio::buffer(d3)); sock.write(bufs2); // ======================= Read functions ========================= ba::async_read_until --> delimiter streambuf buf; // Ensure validity until handler! by::async_read(s, buf, ....); ba::async_read(s, ba:buffer(data, size), handler); // Single buffer boost::asio::async_read(s, ba::buffer(data, size), compl-func --> ba::transfer_at_least(32), handler); // Multiple buffers boost::asio::async_read(s, buffers, compl-func --> boost::asio::transfer_all(), handler); */ // ================= Others =============================== /* strand Provides serialised handler execution. work Class to inform the io_service when it has work to do. io_service:: dispatch Request the io_service to invoke the given handler. poll Run the io_service's event processing loop to execute ready handlers. poll_one Run the io_service's event processing loop to execute one ready handler. post Request the io_service to invoke the given handler and return immediately. reset Reset the io_service in preparation for a subsequent run() invocation. run Run the io_service's event processing loop. run_one Run the io_service's event processing loop to execute at most one handler. stop Stop the io_service's event processing loop. wrap Create a new handler that automatically dispatches the wrapped handler on the io_service. strand:: The io_service::strand class provides the ability to post and dispatch handlers with the guarantee that none of those handlers will execute concurrently. dispatch Request the strand to invoke the given handler. get_io_service Get the io_service associated with the strand. post Request the strand to invoke the given handler and return immediately. wrap Create a new handler that automatically dispatches the wrapped handler on the strand. work:: The work class is used to inform the io_service when work starts and finishes. This ensures that the io_service's run() function will not exit while work is underway, and that it does exit when there is no unfinished work remaining. get_io_service Get the io_service associated with the work. work Constructor notifies the io_service that work is starting. */