Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:18

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @brief RTEMS monitor server
0007  *
0008  * This handles requests for info from RTEMS monitors running on
0009  * other nodes.
0010  */
0011 
0012 /*
0013  * COPYRIGHT (c) 1989-2022. On-Line Applications Research Corporation (OAR).
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040 
0041 #include <rtems.h>
0042 
0043 #include <stdio.h>
0044 #include <string.h>
0045 #include <stdlib.h>
0046 #include <unistd.h>
0047 
0048 #include <rtems/monitor.h>
0049 #include <rtems/score/sysstate.h>
0050 
0051 /*
0052  * Various id's for the server
0053  */
0054 
0055 rtems_id  rtems_monitor_server_task_id;
0056 rtems_id  rtems_monitor_server_request_queue_id;    /* our server */
0057 rtems_id *rtems_monitor_server_request_queue_ids;       /* all servers */
0058 rtems_id  rtems_monitor_server_response_queue_id;       /* our server */
0059 
0060 
0061 /*
0062  * Send a request to a server task
0063  */
0064 
0065 rtems_status_code
0066 rtems_monitor_server_request(
0067     uint32_t                         server_node,
0068     rtems_monitor_server_request_t  *request,
0069     rtems_monitor_server_response_t *response
0070 )
0071 {
0072     rtems_id          server_id;
0073     rtems_status_code status;
0074     size_t            size;
0075 
0076     /*
0077      * What is id of monitor on target node?
0078      * Look it up if we don't know it yet.
0079      */
0080 
0081     server_id = rtems_monitor_server_request_queue_ids[server_node];
0082     if (server_id == 0)
0083     {
0084         status = rtems_message_queue_ident(RTEMS_MONITOR_QUEUE_NAME,
0085                                            server_node,
0086                                            &server_id);
0087         if (status != RTEMS_SUCCESSFUL)
0088         {
0089             rtems_error(status, "ident of remote server failed");
0090             goto done;
0091         }
0092 
0093         rtems_monitor_server_request_queue_ids[server_node] = server_id;
0094     }
0095 
0096     request->return_id = rtems_monitor_server_response_queue_id;
0097 
0098     status = rtems_message_queue_send(server_id, request, sizeof(*request));
0099     if (status != RTEMS_SUCCESSFUL)
0100     {
0101         rtems_error(status, "monitor server request send failed");
0102         goto done;
0103     }
0104 
0105     /*
0106      * Await response, if requested
0107      */
0108 
0109     if (response)
0110     {
0111         status = rtems_message_queue_receive(rtems_monitor_server_response_queue_id,
0112                                              response,
0113                                              &size,
0114                                              RTEMS_WAIT,
0115                                              100);
0116         if (status != RTEMS_SUCCESSFUL)
0117         {
0118             rtems_error(status, "server did not respond");
0119 
0120             /* maybe server task was restarted; look it up again next time */
0121             rtems_monitor_server_request_queue_ids[server_node] = 0;
0122 
0123             goto done;
0124         }
0125 
0126         if (response->command != RTEMS_MONITOR_SERVER_RESPONSE)
0127         {
0128             status = RTEMS_INCORRECT_STATE;
0129             goto done;
0130         }
0131     }
0132 
0133 done:
0134     return status;
0135 }
0136 
0137 
0138 
0139 /*
0140  * monitor server task
0141  */
0142 
0143 void
0144 rtems_monitor_server_task(
0145     rtems_task_argument monitor_flags RTEMS_UNUSED
0146 )
0147 {
0148     rtems_monitor_server_request_t  request;
0149     rtems_monitor_server_response_t response;
0150     rtems_status_code               status;
0151     size_t                          size;
0152 
0153     for (;;)
0154     {
0155         status = rtems_message_queue_receive(
0156                         rtems_monitor_server_request_queue_id,
0157                         &request,
0158                         &size,
0159                         RTEMS_WAIT,
0160                         (rtems_interval) 0);
0161 
0162         if (status != RTEMS_SUCCESSFUL)
0163         {
0164             rtems_error(status, "monitor server msg queue receive error");
0165             goto failed;
0166         }
0167 
0168         if (size != sizeof(request))
0169         {
0170             rtems_error(0, "monitor server bad size on receive");
0171             goto failed;
0172         }
0173 
0174         switch (request.command)
0175         {
0176             case RTEMS_MONITOR_SERVER_CANONICAL:
0177             {
0178                 rtems_monitor_object_type_t object_type;
0179                 rtems_id            id;
0180                 rtems_id            next_id;
0181 
0182                 object_type = (rtems_monitor_object_type_t) request.argument0;
0183                 id          = (rtems_id)            request.argument1;
0184                 next_id = rtems_monitor_object_canonical_get(object_type,
0185                                                              id,
0186                                                              &response.payload,
0187                                                              &size);
0188 
0189                 response.command = RTEMS_MONITOR_SERVER_RESPONSE;
0190                 response.result0 = next_id;
0191                 response.result1 = size;
0192 
0193 #define SERVER_OVERHEAD  (offsetof(rtems_monitor_server_response_t, \
0194                                          payload))
0195 
0196                 status = rtems_message_queue_send(request.return_id,
0197                                                   &response,
0198                                                   size + SERVER_OVERHEAD);
0199                 if (status != RTEMS_SUCCESSFUL)
0200                 {
0201                     rtems_error(status, "response send failed");
0202                     goto failed;
0203                 }
0204                 break;
0205             }
0206 
0207             default:
0208             {
0209                 rtems_error(0, "invalid command to monitor server: %d", request.command);
0210                 goto failed;
0211             }
0212         }
0213     }
0214 
0215 failed:
0216     rtems_task_exit();
0217 }
0218 
0219 
0220 /*
0221  * Kill off any old server
0222  * Not sure if this is useful, but it doesn't help
0223  */
0224 
0225 void
0226 rtems_monitor_server_kill(void)
0227 {
0228     if (rtems_monitor_server_task_id)
0229         rtems_task_delete(rtems_monitor_server_task_id);
0230     rtems_monitor_server_task_id = 0;
0231 
0232     if (rtems_monitor_server_request_queue_id)
0233         rtems_message_queue_delete(rtems_monitor_server_request_queue_id);
0234     rtems_monitor_server_request_queue_ids = 0;
0235 
0236     if (rtems_monitor_server_response_queue_id)
0237         rtems_message_queue_delete(rtems_monitor_server_response_queue_id);
0238     rtems_monitor_server_response_queue_id = 0;
0239 
0240     if (rtems_monitor_server_request_queue_ids)
0241         free(rtems_monitor_server_request_queue_ids);
0242     rtems_monitor_server_request_queue_ids = 0;
0243 }
0244 
0245 
0246 void
0247 rtems_monitor_server_init(
0248     uint32_t   monitor_flags RTEMS_UNUSED
0249 )
0250 {
0251     #if defined(RTEMS_MULTIPROCESSING)
0252     rtems_status_code status;
0253 
0254     if (_System_state_Is_multiprocessing    &&
0255         (_MPCI_Configuration.maximum_nodes > 1))
0256     {
0257         uint32_t   maximum_nodes = _MPCI_Configuration.maximum_nodes;
0258 
0259         /*
0260          * create the msg que our server will listen
0261          * Since we only get msgs from other RTEMS monitors, we just
0262          * need reserve space for 1 msg from each node.
0263          */
0264 
0265         status = rtems_message_queue_create(
0266                        RTEMS_MONITOR_QUEUE_NAME,
0267                        maximum_nodes,
0268                        sizeof(rtems_monitor_server_request_t),
0269                        RTEMS_GLOBAL,
0270                        &rtems_monitor_server_request_queue_id);
0271 
0272         if (status != RTEMS_SUCCESSFUL)
0273         {
0274             rtems_error(status, "could not create monitor server message queue");
0275             goto done;
0276         }
0277 
0278         /*
0279          * create the msg que our responses will come on
0280          * Since monitor just does one thing at a time, we only need 1 item
0281          * message queue.
0282          */
0283 
0284         status = rtems_message_queue_create(
0285                        RTEMS_MONITOR_RESPONSE_QUEUE_NAME,
0286                        1, /* depth */
0287                        sizeof(rtems_monitor_server_response_t),
0288                        RTEMS_GLOBAL,
0289                        &rtems_monitor_server_response_queue_id);
0290 
0291         if (status != RTEMS_SUCCESSFUL)
0292         {
0293             rtems_error(status, "could not create monitor response message queue");
0294             goto done;
0295         }
0296 
0297         /* need an id for queue of each other server we might talk to */
0298         /* indexed by node, so add 1 to maximum_nodes */
0299         rtems_monitor_server_request_queue_ids =
0300                    (rtems_id *) malloc((maximum_nodes + 1) * sizeof(rtems_id));
0301         (void) memset(rtems_monitor_server_request_queue_ids,
0302                       0,
0303                       (maximum_nodes + 1) * sizeof(rtems_id));
0304 
0305         rtems_monitor_server_request_queue_ids[rtems_monitor_node] =
0306                    rtems_monitor_server_request_queue_id;
0307 
0308         /*
0309          * create the server task
0310          */
0311         status = rtems_task_create(RTEMS_MONITOR_SERVER_NAME,
0312                                    1,
0313                                    0 /* default stack */,
0314                                    RTEMS_INTERRUPT_LEVEL(0),
0315                                    RTEMS_DEFAULT_ATTRIBUTES,
0316                                    &rtems_monitor_server_task_id);
0317         if (status != RTEMS_SUCCESSFUL)
0318         {
0319             rtems_error(status, "could not create monitor server task");
0320             goto done;
0321         }
0322 
0323         /*
0324          * Start the server task
0325          */
0326         status = rtems_task_start(rtems_monitor_server_task_id,
0327                                   rtems_monitor_server_task,
0328                                   monitor_flags);
0329         if (status != RTEMS_SUCCESSFUL)
0330         {
0331             rtems_error(status, "could not start monitor server");
0332             goto done;
0333         }
0334     }
0335 
0336 done:
0337     #endif
0338     return;
0339 }