#include #include #include #include #include #include #include using boost::lexical_cast; #include "Time.h" #include "Converter.h" #include "HeadersFAD.h" using namespace std; using namespace FAD; namespace ba = boost::asio; namespace bs = boost::system; namespace dummy = ba::placeholders; using boost::lexical_cast; using ba::ip::tcp; // ------------------------------------------------------------------------ class tcp_connection : public ba::ip::tcp::socket, public boost::enable_shared_from_this { private: const int fBoardId; double fStartTime; uint32_t fRunNumber; void AsyncRead(ba::mutable_buffers_1 buffers) { ba::async_read(*this, buffers, boost::bind(&tcp_connection::HandleReceivedData, shared_from_this(), dummy::error, dummy::bytes_transferred)); } void AsyncWrite(ba::ip::tcp::socket *socket, const ba::const_buffers_1 &buffers) { ba::async_write(*socket, buffers, boost::bind(&tcp_connection::HandleSentData, shared_from_this(), dummy::error, dummy::bytes_transferred)); } void AsyncWait(ba::deadline_timer &timer, int seconds, void (tcp_connection::*handler)(const bs::error_code&))// const { timer.expires_from_now(boost::posix_time::seconds(seconds)); timer.async_wait(boost::bind(handler, shared_from_this(), dummy::error)); } // The constructor is prvate to force the obtained pointer to be shared tcp_connection(ba::io_service& ioservice, int boardid) : ba::ip::tcp::socket(ioservice), fBoardId(boardid), fTriggerSendData(ioservice) { } // Callback when writing was successfull or failed void HandleSentData(const boost::system::error_code& error, size_t bytes_transferred) { cout << "Data sent: (transmitted=" << bytes_transferred << ") rc=" << error.message() << " (" << error << ")" << endl; } vector fBufCommand; vector fBuffer; vector fCommand; FAD::EventHeader fHeader; FAD::ChannelHeader fChHeader[kNumChannels]; ba::deadline_timer fTriggerSendData; bool fTriggerEnabled; bool fCommandSocket; int fSocket; void SendData() { if (!fTriggerEnabled) return; fHeader.fPackageLength = sizeof(EventHeader)/2+1; fHeader.fEventCounter++; fHeader.fTimeStamp = uint32_t((Time(Time::utc).UnixTime()-fStartTime)*10000); fHeader.fRunNumber = fRunNumber; fBuffer.resize(0); for (int i=0; i buf = fChHeader[i].HtoN(); fBuffer.insert(fBuffer.end(), buf.begin(), buf.end()); fBuffer.insert(fBuffer.end(), fChHeader[i].fRegionOfInterest, 0x42); fHeader.fPackageLength += sizeof(ChannelHeader)/2; fHeader.fPackageLength += fChHeader[i].fRegionOfInterest; } fBuffer.push_back(htons(FAD::kDelimiterEnd)); const vector h = fHeader.HtoN(); fBuffer.insert(fBuffer.begin(), h.begin(), h.end()); if (fCommandSocket) AsyncWrite(this, ba::buffer(ba::const_buffer(fBuffer.data(), fBuffer.size()*2))); else { if (fSockets.size()==0) return; fSocket++; fSocket %= fSockets.size(); AsyncWrite(fSockets[fSocket].get(), ba::buffer(ba::const_buffer(fBuffer.data(), fBuffer.size()*2))); } } void TriggerSendData(const boost::system::error_code &ec) { if (!is_open()) { // For example: Here we could schedule a new accept if we // would not want to allow two connections at the same time. return; } if (ec==ba::error::basic_errors::operation_aborted) return; // Check whether the deadline has passed. We compare the deadline // against the current time since a new asynchronous operation // may have moved the deadline before this actor had a chance // to run. if (fTriggerSendData.expires_at() > ba::deadline_timer::traits_type::now()) return; // The deadline has passed. SendData(); AsyncWait(fTriggerSendData, 1, &tcp_connection::TriggerSendData); } void HandleReceivedData(const boost::system::error_code& error, size_t bytes_received) { // Do not schedule a new read if the connection failed. if (bytes_received==0) { // Close the connection close(); return; } // No command received yet if (fCommand.size()==0) { transform(fBufCommand.begin(), fBufCommand.begin()+bytes_received/2, fBufCommand.begin(), ntohs); switch (fBufCommand[0]) { case kCmdDrsEnable: case kCmdDrsEnable+0x100: fHeader.Enable(FAD::EventHeader::kDenable, fBufCommand[0]==kCmdDrsEnable); cout << "-> DrsEnable" << endl; break; case kCmdDwrite: case kCmdDwrite+0x100: fHeader.Enable(FAD::EventHeader::kDwrite, fBufCommand[0]==kCmdDwrite); cout << "-> Dwrite" << endl; break; case kCmdTriggerLine: case kCmdTriggerLine+0x100: cout << "-> Trigger line" << endl; fTriggerEnabled = fBufCommand[0]==kCmdTriggerLine; break; case kCmdSclk: case kCmdSclk+0x100: cout << "-> Sclk" << endl; fHeader.Enable(FAD::EventHeader::kSpiSclk, fBufCommand[0]==kCmdSclk); break; case kCmdSrclk: case kCmdSrclk+0x100: cout << "-> Drclk" << endl; break; case kCmdRun: case kCmdRun+0x100: fStartTime = Time(Time::utc).UnixTime(); cout << "-> Run" << endl; break; case kCmdSocket: case kCmdSocket+0x100: cout << "-> Socket" << endl; fCommandSocket = fBufCommand[0]==kCmdSocket; break; case kCmdContTriggerOn: case kCmdContTriggerOff: if (fBufCommand[0]==kCmdContTriggerOn) AsyncWait(fTriggerSendData, 1, &tcp_connection::TriggerSendData); else fTriggerSendData.cancel(); cout << "-> ContTrig" << endl; break; case kCmdResetTriggerId: cout << "-> Reset" << endl; fHeader.fEventCounter = 0; break; case kCmdSingleTrigger: cout << "-> Trigger" << endl; SendData(); break; case kCmdWriteRunNumberMSW: fCommand = fBufCommand; break; case kCmdWriteRunNumberLSW: fCommand = fBufCommand; break; default: if (fBufCommand[0]>=kCmdWriteRoi && fBufCommand[0]= kCmdWriteDac && fBufCommand[0](&fBufCommand[0], bytes_received) << endl; return; } fBufCommand.resize(1); AsyncRead(ba::buffer(fBufCommand)); return; } transform(fBufCommand.begin(), fBufCommand.begin()+bytes_received/2, fBufCommand.begin(), ntohs); switch (fCommand[0]) { case kCmdWriteRunNumberMSW: fRunNumber &= 0xffff; fRunNumber |= fBufCommand[0]<<16; cout << "-> Set RunNumber MSW" << endl; break; case kCmdWriteRunNumberLSW: fRunNumber &= 0xffff0000; fRunNumber |= fBufCommand[0]; cout << "-> Set RunNumber LSW" << endl; break; case kCmdWriteRoi: cout << "-> Set Roi[" << fCommand[1] << "]=" << fBufCommand[0] << endl; fChHeader[fCommand[1]].fRegionOfInterest = fBufCommand[0]; break; case kCmdWriteDac: cout << "-> Set Dac[" << fCommand[1] << "]=" << fBufCommand[0] << endl; fHeader.fDac[fCommand[1]] = fBufCommand[0]; break; } fCommand.resize(0); fBufCommand.resize(1); AsyncRead(ba::buffer(fBufCommand)); } public: typedef boost::shared_ptr shared_ptr; static shared_ptr create(ba::io_service& io_service, int boardid) { return shared_ptr(new tcp_connection(io_service, boardid)); } void start() { // Ownership of buffer must be valid until Handler is called. fTriggerEnabled=false; fCommandSocket=true; fRunNumber = 1; fHeader.fStartDelimiter = FAD::kDelimiterStart; fHeader.fVersion = 0x104; fHeader.fBoardId = (fBoardId%10) | ((fBoardId/10)<<8); fHeader.fStatus = 0xf<<12 | FAD::EventHeader::kDenable | FAD::EventHeader::kDwrite | FAD::EventHeader::kDcmLocked | FAD::EventHeader::kDcmReady | FAD::EventHeader::kSpiSclk; fStartTime = Time(Time::utc).UnixTime(); for (int i=0; i> fSockets; ~tcp_connection() { fSockets.clear(); } void handle_accept(boost::shared_ptr socket, int port, const boost::system::error_code&/* error*/) { cout << "Added one socket " << socket->remote_endpoint().address().to_v4().to_string(); cout << ":"<< port << endl; fSockets.push_back(socket); } }; class tcp_server { tcp::acceptor acc0; tcp::acceptor acc1; tcp::acceptor acc2; tcp::acceptor acc3; tcp::acceptor acc4; tcp::acceptor acc5; tcp::acceptor acc6; tcp::acceptor acc7; int fBoardId; public: tcp_server(ba::io_service& ioservice, int port, int board) : acc0(ioservice, tcp::endpoint(tcp::v4(), port)), acc1(ioservice, tcp::endpoint(tcp::v4(), port+1)), acc2(ioservice, tcp::endpoint(tcp::v4(), port+2)), acc3(ioservice, tcp::endpoint(tcp::v4(), port+3)), acc4(ioservice, tcp::endpoint(tcp::v4(), port+4)), acc5(ioservice, tcp::endpoint(tcp::v4(), port+5)), acc6(ioservice, tcp::endpoint(tcp::v4(), port+6)), acc7(ioservice, tcp::endpoint(tcp::v4(), port+7)), fBoardId(board) { // We could start listening for more than one connection // here, but since there is only one handler executed each time // it would not make sense. Before one handle_accept is not // finished no new handle_accept will be called. // Workround: Start a new thread in handle_accept start_accept(); } private: void start_accept(tcp_connection::shared_ptr dest, tcp::acceptor &acc) { boost::shared_ptr connection = boost::shared_ptr(new ba::ip::tcp::socket(acc.io_service())); acc.async_accept(*connection, boost::bind(&tcp_connection::handle_accept, dest, connection, acc.local_endpoint().port(), ba::placeholders::error)); } void start_accept() { cout << "Start accept " << acc0.local_endpoint().port() << "..." << flush; tcp_connection::shared_ptr new_connection = tcp_connection::create(/*acceptor_.*/acc0.io_service(), fBoardId); // This will accept a connection without blocking acc0.async_accept(*new_connection, boost::bind(&tcp_server::handle_accept, this, new_connection, ba::placeholders::error)); start_accept(new_connection, acc1); start_accept(new_connection, acc2); start_accept(new_connection, acc3); start_accept(new_connection, acc4); start_accept(new_connection, acc5); start_accept(new_connection, acc6); start_accept(new_connection, acc7); cout << "start-done." << endl; } void handle_accept(tcp_connection::shared_ptr new_connection, const boost::system::error_code& error) { // The connection has been accepted and is now ready to use // not installing a new handler will stop run() cout << "Handle accept..." << flush; if (!error) { new_connection->start(); // The is now an open connection/server (tcp_connection) // we immediatly schedule another connection // This allowed two client-connection at the same time start_accept(); } cout << "handle-done." << endl; } }; int main(int argc, const char **argv) { //try { ba::io_service io_service; int port = argc>=2 ? lexical_cast(argv[1]) : 5000; int n = argc==3 ? lexical_cast(argv[2]) : 1; vector> servers; for (int i=0; i server(new tcp_server(io_service, port, i)); servers.push_back(server); port += 8; } // ba::add_service(io_service, &server); // server.add_service(...); //cout << "Run..." << flush; // Calling run() from a single thread ensures no concurrent access // of the handler which are called!!! io_service.run(); //cout << "end." << endl; } /*catch (std::exception& e) { std::cerr << e.what() << std::endl; }*/ return 0; } /* ====================== Buffers =========================== char d1[128]; ba::buffer(d1)); std::vector d2(128); ba::buffer(d2); boost::array d3; by::buffer(d3); // -------------------------------- char d1[128]; std::vector d2(128); boost::array d3; boost::array bufs1 = { ba::buffer(d1), ba::buffer(d2), ba::buffer(d3) }; sock.read(bufs1); std::vector bufs2; bufs2.push_back(boost::asio::buffer(d1)); bufs2.push_back(boost::asio::buffer(d2)); bufs2.push_back(boost::asio::buffer(d3)); sock.write(bufs2); // ======================= Read functions ========================= ba::async_read_until --> delimiter streambuf buf; // Ensure validity until handler! by::async_read(s, buf, ....); ba::async_read(s, ba:buffer(data, size), handler); // Single buffer boost::asio::async_read(s, ba::buffer(data, size), compl-func --> ba::transfer_at_least(32), handler); // Multiple buffers boost::asio::async_read(s, buffers, compl-func --> boost::asio::transfer_all(), handler); */ // ================= Others =============================== /* strand Provides serialised handler execution. work Class to inform the io_service when it has work to do. io_service:: dispatch Request the io_service to invoke the given handler. poll Run the io_service's event processing loop to execute ready handlers. poll_one Run the io_service's event processing loop to execute one ready handler. post Request the io_service to invoke the given handler and return immediately. reset Reset the io_service in preparation for a subsequent run() invocation. run Run the io_service's event processing loop. run_one Run the io_service's event processing loop to execute at most one handler. stop Stop the io_service's event processing loop. wrap Create a new handler that automatically dispatches the wrapped handler on the io_service. strand:: The io_service::strand class provides the ability to post and dispatch handlers with the guarantee that none of those handlers will execute concurrently. dispatch Request the strand to invoke the given handler. get_io_service Get the io_service associated with the strand. post Request the strand to invoke the given handler and return immediately. wrap Create a new handler that automatically dispatches the wrapped handler on the strand. work:: The work class is used to inform the io_service when work starts and finishes. This ensures that the io_service's run() function will not exit while work is underway, and that it does exit when there is no unfinished work remaining. get_io_service Get the io_service associated with the work. work Constructor notifies the io_service that work is starting. */