一种基于队列的调度器的实现
软件开发中,经常会遇到下边几种情况;中断中处理部分任务,后续耗时操作需要转移到中断外执行,否则导致中断不能及时响应后续任务;回调函数中处理部分任务,然后由其他模块处理其他任务。在有OS支持时,可通过信号量、事件、消息队列等方法去实现。在无OS支持时,常见的方法有通过设置状态标志、由主循环的任务去接着执行,这种方法在基于状态机的框架比较常见,缺点也是显而易见的,代码可读性较差,中断中设置了一堆状态,然后再去找相关的状态机后续操作,有时候跳转几次就晕了,后续业务逻辑变化,升级也比较困难。
本文提出一种基于队列的调度器,分割前后台任务平面,配合分层、封装等方法,实现复杂的操作。主要参考了Nodic公司的SDK
基本原理
初始化一个任务队列,任务队列每一项包含参数、任务处理函数句柄;中断/回调等后台任务,负责保存数据,同时构造后续操作的任务结构体入队列,通过级联的方法实现复杂的逻辑。调度执行程序按顺序取队列中的元素,执行任务处理函数。
代码
头文件: m_scheduler.h
#ifndef M_SCHEDULER_H__
#define M_SCHEDULER_H__
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#define M_SCHED_EVENT_HEADER_SIZE 8 /**< Size of m_scheduler.event_header_t (only for use inside M_SCHED_BUF_SIZE()). */
/** @defgroup ERRORS_BASE Error Codes Base number definitions
* @{ */
#define ERROR_BASE_NUM (0x0) ///< Global error base
#define ERROR_SDM_BASE_NUM (0x1000) ///< SDM error base
#define ERROR_SOC_BASE_NUM (0x2000) ///< SoC error base
#define ERROR_STK_BASE_NUM (0x3000) ///< STK error base
/** @} */
#define SUCCESS (ERROR_BASE_NUM + 0) ///< Successful command
#define ERROR_SVC_HANDLER_MISSING (ERROR_BASE_NUM + 1) ///< SVC handler is missing
#define ERROR_SOFTDEVICE_NOT_ENABLED (ERROR_BASE_NUM + 2) ///< SoftDevice has not been enabled
#define ERROR_INTERNAL (ERROR_BASE_NUM + 3) ///< Internal Error
#define ERROR_NO_MEM (ERROR_BASE_NUM + 4) ///< No Memory for operation
#define ERROR_NOT_FOUND (ERROR_BASE_NUM + 5) ///< Not found
#define ERROR_NOT_SUPPORTED (ERROR_BASE_NUM + 6) ///< Not supported
#define ERROR_INVALID_PARAM (ERROR_BASE_NUM + 7) ///< Invalid Parameter
#define ERROR_INVALID_STATE (ERROR_BASE_NUM + 8) ///< Invalid state, operation disallowed in this state
#define ERROR_INVALID_LENGTH (ERROR_BASE_NUM + 9) ///< Invalid Length
#define ERROR_INVALID_FLAGS (ERROR_BASE_NUM + 10) ///< Invalid Flags
#define ERROR_INVALID_DATA (ERROR_BASE_NUM + 11) ///< Invalid Data
#define ERROR_DATA_SIZE (ERROR_BASE_NUM + 12) ///< Invalid Data size
#define ERROR_TIMEOUT (ERROR_BASE_NUM + 13) ///< Operation timed out
#define ERROR_NULL (ERROR_BASE_NUM + 14) ///< Null Pointer
#define ERROR_FORBIDDEN (ERROR_BASE_NUM + 15) ///< Forbidden Operation
#define ERROR_INVALID_ADDR (ERROR_BASE_NUM + 16) ///< Bad Memory Address
#define ERROR_BUSY (ERROR_BASE_NUM + 17) ///< Busy
#define ERROR_CONN_COUNT (ERROR_BASE_NUM + 18) ///< Maximum connection count exceeded.
#define ERROR_RESOURCES (ERROR_BASE_NUM + 19) ///< Not enough resources for operation
#define M_ERROR_CHECK(ERR_CODE) {}
#define CRITICAL_REGION_ENTER()
#define CRITICAL_REGION_EXIT()
static inline bool is_word_aligned(void const* p)
{
return (((uintptr_t)p & 0x03) == 0);
}
#define CEIL_DIV(A, B) \
(((A) + (B) - 1) / (B))
/************ Memory distribution ******************************************************
------------------------- offset type size
m_sched_event_handler_t 0 (*f)() 4
event_data_size 4 int16 2 align event_header_t
m_sched_event_handler_t 8 (*f)() 4
event_data_size 12 int16 2 align event_header_t
...
...
event_data[event_data_size] (queue_size+1)* M_SCHED_EVENT_HEADER_SIZE(8)
event_data[event_data_size]
...
... total queue_size+1
(queue_size+1)*event_data_size
#define M_SCHED_BUF_SIZE(EVENT_SIZE, QUEUE_SIZE) \
(((EVENT_SIZE) + M_SCHED_EVENT_HEADER_SIZE) * ((QUEUE_SIZE) + 1))
/**@brief Scheduler event handler type. */
typedef void (*m_sched_event_handler_t)(void * p_event_data, uint16_t event_size);
#define M_SCHED_INIT(EVENT_SIZE, QUEUE_SIZE) \
do \
{ \
static uint32_t M_SCHED_BUF[CEIL_DIV(M_SCHED_BUF_SIZE((EVENT_SIZE), (QUEUE_SIZE)), \
sizeof(uint32_t))]; \
uint32_t ERR_CODE = m_sched_init((EVENT_SIZE), (QUEUE_SIZE), M_SCHED_BUF); \
M_ERROR_CHECK(ERR_CODE); \
} while (0)
uint32_t m_sched_init(uint16_t max_event_size, uint16_t queue_size, void * p_evt_buffer);
void m_sched_execute(void);
uint32_t m_sched_event_put(void const * p_event_data,
uint16_t event_size,
m_sched_event_handler_t handler);
uint16_t m_sched_queue_utilization_get(void);
uint16_t m_sched_queue_space_get(void);
void m_sched_pause(void);
void m_sched_resume(void);
#ifdef __cplusplus
}
#endif
#endif // M_SCHEDULER_H__
源文件: m_scheduler.c
/**
******************************************************************************
* @file ../middlewares/src/m_scheduler.c
* @author yhangzzz
* @version V1.0.0
* @date 2018.10.22
* @brief m_scheduler.c
******************************************************************************
*/
#include "m_common.h"
#if (defined(M_SCHEDULER_ENABLED) && M_SCHEDULER_ENABLED) ? 1 : 0
#include "m_scheduler.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
/**@brief Structure for holding a scheduled event header. */
typedef struct
{
m_sched_event_handler_t handler; /**< Pointer to event handler to receive the event. */
uint16_t event_data_size; /**< Size of event data. */
} event_header_t;
STATIC_ASSERT(sizeof(event_header_t) <= M_SCHED_EVENT_HEADER_SIZE);
static event_header_t * m_queue_event_headers; /**< Array for holding the queue event headers. */
static uint8_t * m_queue_event_data; /**< Array for holding the queue event data. */
static volatile uint8_t m_queue_start_index; /**< Index of queue entry at the start of the queue. */
static volatile uint8_t m_queue_end_index; /**< Index of queue entry at the end of the queue. */
static uint16_t m_queue_event_size; /**< Maximum event size in queue. */
static uint16_t m_queue_size; /**< Number of queue entries. */
#if M_SCHEDULER_WITH_PROFILER
static uint16_t m_max_queue_utilization; /**< Maximum observed queue utilization. */
#endif
#if M_SCHEDULER_WITH_PAUSE
static uint32_t m_scheduler_paused_counter = 0; /**< Counter storing the difference between pausing
and resuming the scheduler. */
#endif
/**@brief Function for incrementing a queue index, and handle wrap-around.
*
* @param[in] index Old index.
*
* @return New (incremented) index.
*/
static inline uint8_t next_index(uint8_t index)
{
return (index < m_queue_size) ? (index + 1) : 0;
}
static inline uint8_t m_sched_queue_full()
{
uint8_t tmp = m_queue_start_index;
return next_index(m_queue_end_index) == tmp;
}
/**@brief Macro for checking if a queue is full. */
#define M_SCHED_QUEUE_FULL() m_sched_queue_full()
static inline uint8_t m_sched_queue_empty()
{
uint8_t tmp = m_queue_start_index;
return m_queue_end_index == tmp;
}
/**@brief Macro for checking if a queue is empty. */
#define M_SCHED_QUEUE_EMPTY() m_sched_queue_empty()
uint32_t m_sched_init(uint16_t event_size, uint16_t queue_size, void * p_event_buffer)
{
uint16_t data_start_index = (queue_size + 1) * sizeof(event_header_t);
// Check that buffer is correctly aligned
if (!is_word_aligned(p_event_buffer))
{
return ERROR_INVALID_PARAM;
}
// Initialize event scheduler
m_queue_event_headers = p_event_buffer;
m_queue_event_data = &((uint8_t *)p_event_buffer)[data_start_index];
m_queue_end_index = 0;
m_queue_start_index = 0;
m_queue_event_size = event_size;
m_queue_size = queue_size;
#if M_SCHEDULER_WITH_PROFILER
m_max_queue_utilization = 0;
#endif
return SUCCESS;
}
uint16_t m_sched_queue_space_get()
{
uint16_t start = m_queue_start_index;
uint16_t end = m_queue_end_index;
uint16_t free_space = m_queue_size - ((end >= start) ?
(end - start) : (m_queue_size + 1 - start + end));
return free_space;
}
#if M_SCHEDULER_WITH_PROFILER
static void queue_utilization_check(void)
{
uint16_t start = m_queue_start_index;
uint16_t end = m_queue_end_index;
uint16_t queue_utilization = (end >= start) ? (end - start) :
(m_queue_size + 1 - start + end);
if (queue_utilization > m_max_queue_utilization)
{
m_max_queue_utilization = queue_utilization;
}
}
uint16_t m_sched_queue_utilization_get(void)
{
return m_max_queue_utilization;
}
#endif // M_SCHEDULER_WITH_PROFILER
uint32_t m_sched_event_put(void const * p_event_data,
uint16_t event_data_size,
m_sched_event_handler_t handler)
{
uint32_t err_code;
if (event_data_size <= m_queue_event_size)
{
uint16_t event_index = 0xFFFF;
CRITICAL_REGION_ENTER();
if (!M_SCHED_QUEUE_FULL())
{
event_index = m_queue_end_index;
m_queue_end_index = next_index(m_queue_end_index);
#if M_SCHEDULER_WITH_PROFILER
// This function call must be protected with critical region because
// it modifies 'm_max_queue_utilization'.
queue_utilization_check();
#endif
}
CRITICAL_REGION_EXIT();
if (event_index != 0xFFFF)
{
// NOTE: This can be done outside the critical region since the event consumer will
// always be called from the main loop, and will thus never interrupt this code.
m_queue_event_headers[event_index].handler = handler;
if ((p_event_data != NULL) && (event_data_size > 0))
{
memcpy(&m_queue_event_data[event_index * m_queue_event_size],
p_event_data,
event_data_size);
m_queue_event_headers[event_index].event_data_size = event_data_size;
}
else
{
m_queue_event_headers[event_index].event_data_size = 0;
}
err_code = SUCCESS;
}
else
{
err_code = ERROR_NO_MEM;
}
}
else
{
err_code = ERROR_INVALID_LENGTH;
}
return err_code;
}
#if M_SCHEDULER_WITH_PAUSE
void m_sched_pause(void)
{
CRITICAL_REGION_ENTER();
if (m_scheduler_paused_counter < UINT32_MAX)
{
m_scheduler_paused_counter++;
}
CRITICAL_REGION_EXIT();
}
void m_sched_resume(void)
{
CRITICAL_REGION_ENTER();
if (m_scheduler_paused_counter > 0)
{
m_scheduler_paused_counter--;
}
CRITICAL_REGION_EXIT();
}
#endif //M_SCHEDULER_WITH_PAUSE
/**@brief Function for checking if scheduler is paused which means that should break processing
* events.
*
* @return Boolean value - true if scheduler is paused, false otherwise.
*/
static inline bool is_m_sched_paused(void)
{
#if M_SCHEDULER_WITH_PAUSE
return (m_scheduler_paused_counter > 0);
#else
return false;
#endif
}
void m_sched_execute(void)
{
while (!is_m_sched_paused() && !M_SCHED_QUEUE_EMPTY())
{
// Since this function is only called from the main loop, there is no
// need for a critical region here, however a special care must be taken
// regarding update of the queue start index (see the end of the loop).
uint16_t event_index = m_queue_start_index;
void * p_event_data;
uint16_t event_data_size;
m_sched_event_handler_t event_handler;
p_event_data = &m_queue_event_data[event_index * m_queue_event_size];
event_data_size = m_queue_event_headers[event_index].event_data_size;
event_handler = m_queue_event_headers[event_index].handler;
event_handler(p_event_data, event_data_size);
// Event processed, now it is safe to move the queue start index,
// so the queue entry occupied by this event can be used to store
// a next one.
m_queue_start_index = next_index(m_queue_start_index);
}
}
#endif //#if (defined(M_SCHEDULER_ENABLED) && M_SCHEDULER_ENABLED)?1:0