implemented furi from flipper zero

added cmsis_core, furi, mlib and nanobake
implemented basic app structure from furi
implemented basic placeholder apps
This commit is contained in:
Ken Van Hoeylandt 2023-12-26 21:47:27 +01:00
parent 0cf7829a2d
commit 5dc2599e55
114 changed files with 53069 additions and 297 deletions

View File

@ -1,5 +1,7 @@
cmake_minimum_required(VERSION 3.16)
add_definitions(-DFURI_DEBUG)
set(COMPONENTS main)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)

View File

@ -0,0 +1,3 @@
idf_component_register(
INCLUDE_DIRS "."
)

View File

@ -0,0 +1,894 @@
/**************************************************************************//**
* @file cmsis_armcc.h
* @brief CMSIS compiler ARMCC (Arm Compiler 5) header file
* @version V5.4.0
* @date 20. January 2023
******************************************************************************/
/*
* Copyright (c) 2009-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_ARMCC_H
#define __CMSIS_ARMCC_H
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677)
#error "Please use Arm Compiler Toolchain V4.0.677 or later!"
#endif
/* CMSIS compiler control architecture macros */
#if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \
(defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) )
#define __ARM_ARCH_6M__ 1
#endif
#if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1))
#define __ARM_ARCH_7M__ 1
#endif
#if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1))
#define __ARM_ARCH_7EM__ 1
#endif
/* __ARM_ARCH_8M_BASE__ not applicable */
/* __ARM_ARCH_8M_MAIN__ not applicable */
/* __ARM_ARCH_8_1M_MAIN__ not applicable */
/* CMSIS compiler control DSP macros */
#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __ARM_FEATURE_DSP 1
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE static __forceinline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT __packed struct
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION __packed union
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
#define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x)))
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr)))
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr)))
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __COMPILER_BARRIER
#define __COMPILER_BARRIER() __memory_changed()
#endif
#ifndef __NO_INIT
#define __NO_INIT __attribute__ ((section (".bss.noinit"), zero_init))
#endif
#ifndef __ALIAS
#define __ALIAS(x) __attribute__ ((alias(x)))
#endif
/* ######################### Startup and Lowlevel Init ######################## */
#ifndef __PROGRAM_START
#define __PROGRAM_START __main
#endif
#ifndef __INITIAL_SP
#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit
#endif
#ifndef __STACK_LIMIT
#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base
#endif
#ifndef __VECTOR_TABLE
#define __VECTOR_TABLE __Vectors
#endif
#ifndef __VECTOR_TABLE_ATTRIBUTE
#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
#endif
/* ########################## Core Instruction Access ######################### */
/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
Access to dedicated instructions
@{
*/
/**
\brief No Operation
\details No Operation does nothing. This instruction can be used for code alignment purposes.
*/
#define __NOP __nop
/**
\brief Wait For Interrupt
\details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
*/
#define __WFI __wfi
/**
\brief Wait For Event
\details Wait For Event is a hint instruction that permits the processor to enter
a low-power state until one of a number of events occurs.
*/
#define __WFE __wfe
/**
\brief Send Event
\details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
*/
#define __SEV __sev
/**
\brief Instruction Synchronization Barrier
\details Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or memory,
after the instruction has been completed.
*/
#define __ISB() __isb(0xF)
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
#define __DSB() __dsb(0xF)
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
#define __DMB() __dmb(0xF)
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REV __rev
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
{
rev16 r0, r0
bx lr
}
#endif
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
{
revsh r0, r0
bx lr
}
#endif
/**
\brief Rotate Right in unsigned value (32 bit)
\details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
#define __ROR __ror
/**
\brief Breakpoint
\details Causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __breakpoint(value)
/**
\brief Reverse bit order of value
\details Reverses the bit order of the given value.
\param [in] value Value to reverse
\return Reversed value
*/
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __RBIT __rbit
#else
__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
{
uint32_t result;
uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
result = value; /* r will be reversed bits of v; first get LSB of v */
for (value >>= 1U; value != 0U; value >>= 1U)
{
result <<= 1U;
result |= value & 1U;
s--;
}
result <<= s; /* shift when v's highest bits are zero */
return result;
}
#endif
/**
\brief Count leading zeros
\details Counts the number of leading zeros of a data value.
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ __clz
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
#else
#define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
#else
#define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
#else
#define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXB(value, ptr) __strex(value, ptr)
#else
#define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXH(value, ptr) __strex(value, ptr)
#else
#define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXW(value, ptr) __strex(value, ptr)
#else
#define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
#define __CLREX __clrex
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT __ssat
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT __usat
/**
\brief Rotate Right with Extend (32 bit)
\details Moves each bit of a bitstring right by one bit.
The carry input is shifted in at the left end of the bitstring.
\param [in] value Value to rotate
\return Rotated value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)
{
rrx r0, r0
bx lr
}
#endif
/**
\brief LDRT Unprivileged (8 bit)
\details Executes a Unprivileged LDRT instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr))
/**
\brief LDRT Unprivileged (16 bit)
\details Executes a Unprivileged LDRT instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr))
/**
\brief LDRT Unprivileged (32 bit)
\details Executes a Unprivileged LDRT instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr))
/**
\brief STRT Unprivileged (8 bit)
\details Executes a Unprivileged STRT instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRBT(value, ptr) __strt(value, ptr)
/**
\brief STRT Unprivileged (16 bit)
\details Executes a Unprivileged STRT instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRHT(value, ptr) __strt(value, ptr)
/**
\brief STRT Unprivileged (32 bit)
\details Executes a Unprivileged STRT instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRT(value, ptr) __strt(value, ptr)
#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat)
{
if ((sat >= 1U) && (sat <= 32U))
{
const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
const int32_t min = -1 - max ;
if (val > max)
{
return max;
}
else if (val < min)
{
return min;
}
}
return val;
}
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat)
{
if (sat <= 31U)
{
const uint32_t max = ((1U << sat) - 1U);
if (val > (int32_t)max)
{
return max;
}
else if (val < 0)
{
return 0U;
}
}
return (uint32_t)val;
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
@{
*/
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing special-purpose register PRIMASK.
Can only be executed in Privileged modes.
*/
/* intrinsic void __enable_irq(); */
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting special-purpose register PRIMASK.
Can only be executed in Privileged modes.
*/
/* intrinsic void __disable_irq(); */
/**
\brief Get Control Register
\details Returns the content of the Control Register.
\return Control Register value
*/
__STATIC_INLINE uint32_t __get_CONTROL(void)
{
register uint32_t __regControl __ASM("control");
return(__regControl);
}
/**
\brief Set Control Register
\details Writes the given value to the Control Register.
\param [in] control Control Register value to set
*/
__STATIC_INLINE void __set_CONTROL(uint32_t control)
{
register uint32_t __regControl __ASM("control");
__regControl = control;
__ISB();
}
/**
\brief Get IPSR Register
\details Returns the content of the IPSR Register.
\return IPSR Register value
*/
__STATIC_INLINE uint32_t __get_IPSR(void)
{
register uint32_t __regIPSR __ASM("ipsr");
return(__regIPSR);
}
/**
\brief Get APSR Register
\details Returns the content of the APSR Register.
\return APSR Register value
*/
__STATIC_INLINE uint32_t __get_APSR(void)
{
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
}
/**
\brief Get xPSR Register
\details Returns the content of the xPSR Register.
\return xPSR Register value
*/
__STATIC_INLINE uint32_t __get_xPSR(void)
{
register uint32_t __regXPSR __ASM("xpsr");
return(__regXPSR);
}
/**
\brief Get Process Stack Pointer
\details Returns the current value of the Process Stack Pointer (PSP).
\return PSP Register value
*/
__STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t __regProcessStackPointer __ASM("psp");
return(__regProcessStackPointer);
}
/**
\brief Set Process Stack Pointer
\details Assigns the given value to the Process Stack Pointer (PSP).
\param [in] topOfProcStack Process Stack Pointer value to set
*/
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
register uint32_t __regProcessStackPointer __ASM("psp");
__regProcessStackPointer = topOfProcStack;
}
/**
\brief Get Main Stack Pointer
\details Returns the current value of the Main Stack Pointer (MSP).
\return MSP Register value
*/
__STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t __regMainStackPointer __ASM("msp");
return(__regMainStackPointer);
}
/**
\brief Set Main Stack Pointer
\details Assigns the given value to the Main Stack Pointer (MSP).
\param [in] topOfMainStack Main Stack Pointer value to set
*/
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
register uint32_t __regMainStackPointer __ASM("msp");
__regMainStackPointer = topOfMainStack;
}
/**
\brief Get Priority Mask
\details Returns the current state of the priority mask bit from the Priority Mask Register.
\return Priority Mask value
*/
__STATIC_INLINE uint32_t __get_PRIMASK(void)
{
register uint32_t __regPriMask __ASM("primask");
return(__regPriMask);
}
/**
\brief Set Priority Mask
\details Assigns the given value to the Priority Mask Register.
\param [in] priMask Priority Mask
*/
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
register uint32_t __regPriMask __ASM("primask");
__regPriMask = (priMask);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing special-purpose register FAULTMASK.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting special-purpose register FAULTMASK.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/**
\brief Get Base Priority
\details Returns the current value of the Base Priority register.
\return Base Priority register value
*/
__STATIC_INLINE uint32_t __get_BASEPRI(void)
{
register uint32_t __regBasePri __ASM("basepri");
return(__regBasePri);
}
/**
\brief Set Base Priority
\details Assigns the given value to the Base Priority register.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
{
register uint32_t __regBasePri __ASM("basepri");
__regBasePri = (basePri & 0xFFU);
}
/**
\brief Set Base Priority with condition
\details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
or the new value increases the BASEPRI priority level.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)
{
register uint32_t __regBasePriMax __ASM("basepri_max");
__regBasePriMax = (basePri & 0xFFU);
}
/**
\brief Get Fault Mask
\details Returns the current value of the Fault Mask register.
\return Fault Mask register value
*/
__STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
register uint32_t __regFaultMask __ASM("faultmask");
return(__regFaultMask);
}
/**
\brief Set Fault Mask
\details Assigns the given value to the Fault Mask register.
\param [in] faultMask Fault Mask value to set
*/
__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
register uint32_t __regFaultMask __ASM("faultmask");
__regFaultMask = (faultMask & (uint32_t)1U);
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#else
(void)fpscr;
#endif
}
/*@} end of CMSIS_Core_RegAccFunctions */
/* ################### Compiler specific Intrinsics ########################### */
/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
Access to dedicated SIMD instructions
@{
*/
#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __SADD8 __sadd8
#define __QADD8 __qadd8
#define __SHADD8 __shadd8
#define __UADD8 __uadd8
#define __UQADD8 __uqadd8
#define __UHADD8 __uhadd8
#define __SSUB8 __ssub8
#define __QSUB8 __qsub8
#define __SHSUB8 __shsub8
#define __USUB8 __usub8
#define __UQSUB8 __uqsub8
#define __UHSUB8 __uhsub8
#define __SADD16 __sadd16
#define __QADD16 __qadd16
#define __SHADD16 __shadd16
#define __UADD16 __uadd16
#define __UQADD16 __uqadd16
#define __UHADD16 __uhadd16
#define __SSUB16 __ssub16
#define __QSUB16 __qsub16
#define __SHSUB16 __shsub16
#define __USUB16 __usub16
#define __UQSUB16 __uqsub16
#define __UHSUB16 __uhsub16
#define __SASX __sasx
#define __QASX __qasx
#define __SHASX __shasx
#define __UASX __uasx
#define __UQASX __uqasx
#define __UHASX __uhasx
#define __SSAX __ssax
#define __QSAX __qsax
#define __SHSAX __shsax
#define __USAX __usax
#define __UQSAX __uqsax
#define __UHSAX __uhsax
#define __USAD8 __usad8
#define __USADA8 __usada8
#define __SSAT16 __ssat16
#define __USAT16 __usat16
#define __UXTB16 __uxtb16
#define __UXTAB16 __uxtab16
#define __SXTB16 __sxtb16
#define __SXTAB16 __sxtab16
#define __SMUAD __smuad
#define __SMUADX __smuadx
#define __SMLAD __smlad
#define __SMLADX __smladx
#define __SMLALD __smlald
#define __SMLALDX __smlaldx
#define __SMUSD __smusd
#define __SMUSDX __smusdx
#define __SMLSD __smlsd
#define __SMLSDX __smlsdx
#define __SMLSLD __smlsld
#define __SMLSLDX __smlsldx
#define __SEL __sel
#define __QADD __qadd
#define __QSUB __qsub
#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
((int64_t)(ARG3) << 32U) ) >> 32U))
#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/*@} end of group CMSIS_SIMD_intrinsics */
#endif /* __CMSIS_ARMCC_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,304 @@
/**************************************************************************//**
* @file cmsis_compiler.h
* @brief CMSIS compiler generic header file
* @version V5.3.0
* @date 04. April 2023
******************************************************************************/
/*
* Copyright (c) 2009-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_COMPILER_H
#define __CMSIS_COMPILER_H
#include <stdint.h>
/*
* Arm Compiler 4/5
*/
#if defined ( __CC_ARM )
#include "cmsis_armcc.h"
/*
* Arm Compiler 6.6 LTM (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100)
#include "cmsis_armclang_ltm.h"
/*
* Arm Compiler above 6.10.1 (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100)
#include "cmsis_armclang.h"
/*
* TI Arm Clang Compiler (tiarmclang)
*/
#elif defined (__ti__)
#include "cmsis_tiarmclang.h"
/*
* GNU Compiler
*/
#elif defined ( __GNUC__ )
#include "cmsis_gcc_esp32.h"
// #include "cmsis_gcc.h"
/*
* IAR Compiler
*/
#elif defined ( __ICCARM__ )
#include <cmsis_iccarm.h>
/*
* TI Arm Compiler (armcl)
*/
#elif defined ( __TI_ARM__ )
#include <cmsis_ccs.h>
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __attribute__((packed))
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION union __attribute__((packed))
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
struct __attribute__((packed)) T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
#ifndef __NO_INIT
#define __NO_INIT __attribute__ ((section (".bss.noinit")))
#endif
#ifndef __ALIAS
#define __ALIAS(x) __attribute__ ((alias(x)))
#endif
/*
* TASKING Compiler
*/
#elif defined ( __TASKING__ )
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __packed__
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __packed__
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION union __packed__
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
struct __packed__ T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __align(x)
#endif
#ifndef __RESTRICT
#warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
#define __RESTRICT
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
#ifndef __NO_INIT
#define __NO_INIT __attribute__ ((section (".bss.noinit")))
#endif
#ifndef __ALIAS
#define __ALIAS(x) __attribute__ ((alias(x)))
#endif
/*
* COSMIC Compiler
*/
#elif defined ( __CSMC__ )
#include <cmsis_csm.h>
#ifndef __ASM
#define __ASM _asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
// NO RETURN is automatically detected hence no warning here
#define __NO_RETURN
#endif
#ifndef __USED
#warning No compiler specific solution for __USED. __USED is ignored.
#define __USED
#endif
#ifndef __WEAK
#define __WEAK __weak
#endif
#ifndef __PACKED
#define __PACKED @packed
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT @packed struct
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION @packed union
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
@packed struct T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#ifndef __RESTRICT
#warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
#define __RESTRICT
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
#ifndef __NO_INIT
#define __NO_INIT __attribute__ ((section (".bss.noinit")))
#endif
#ifndef __ALIAS
#define __ALIAS(x) __attribute__ ((alias(x)))
#endif
#else
#error Unknown compiler.
#endif
#endif /* __CMSIS_COMPILER_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
/**************************************************************************//**
* @file cmsis_version.h
* @brief CMSIS Core(M) Version definitions
* @version V5.0.5
* @date 02. February 2022
******************************************************************************/
/*
* Copyright (c) 2009-2022 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CMSIS_VERSION_H
#define __CMSIS_VERSION_H
/* CMSIS Version definitions */
#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */
#define __CM_CMSIS_VERSION_SUB ( 6U) /*!< [15:0] CMSIS Core(M) sub version */
#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \
__CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,275 @@
/******************************************************************************
* @file mpu_armv7.h
* @brief CMSIS MPU API for Armv7-M MPU
* @version V5.1.2
* @date 25. May 2020
******************************************************************************/
/*
* Copyright (c) 2017-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef ARM_MPU_ARMV7_H
#define ARM_MPU_ARMV7_H
#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes
#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes
#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes
#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes
#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes
#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte
#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes
#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes
#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes
#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes
#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes
#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes
#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes
#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes
#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes
#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte
#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes
#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes
#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes
#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes
#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes
#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes
#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes
#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes
#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes
#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte
#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes
#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes
#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access
#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only
#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only
#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access
#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only
#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access
/** MPU Region Base Address Register Value
*
* \param Region The region to be configured, number 0 to 15.
* \param BaseAddress The base address for the region.
*/
#define ARM_MPU_RBAR(Region, BaseAddress) \
(((BaseAddress) & MPU_RBAR_ADDR_Msk) | \
((Region) & MPU_RBAR_REGION_Msk) | \
(MPU_RBAR_VALID_Msk))
/**
* MPU Memory Access Attributes
*
* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
* \param IsShareable Region is shareable between multiple bus masters.
* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
*/
#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \
((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \
(((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \
(((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \
(((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk))
/**
* MPU Region Attribute and Size Register Value
*
* \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_.
* \param SubRegionDisable Sub-region disable field.
* \param Size Region size of the region to be configured, for example 4K, 8K.
*/
#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \
((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \
(((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \
(((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \
(((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \
(((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \
(((MPU_RASR_ENABLE_Msk))))
/**
* MPU Region Attribute and Size Register Value
*
* \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
* \param IsShareable Region is shareable between multiple bus masters.
* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
* \param SubRegionDisable Sub-region disable field.
* \param Size Region size of the region to be configured, for example 4K, 8K.
*/
#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \
ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size)
/**
* MPU Memory Access Attribute for strongly ordered memory.
* - TEX: 000b
* - Shareable
* - Non-cacheable
* - Non-bufferable
*/
#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U)
/**
* MPU Memory Access Attribute for device memory.
* - TEX: 000b (if shareable) or 010b (if non-shareable)
* - Shareable or non-shareable
* - Non-cacheable
* - Bufferable (if shareable) or non-bufferable (if non-shareable)
*
* \param IsShareable Configures the device memory as shareable or non-shareable.
*/
#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U))
/**
* MPU Memory Access Attribute for normal memory.
* - TEX: 1BBb (reflecting outer cacheability rules)
* - Shareable or non-shareable
* - Cacheable or non-cacheable (reflecting inner cacheability rules)
* - Bufferable or non-bufferable (reflecting inner cacheability rules)
*
* \param OuterCp Configures the outer cache policy.
* \param InnerCp Configures the inner cache policy.
* \param IsShareable Configures the memory as shareable or non-shareable.
*/
#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U))
/**
* MPU Memory Access Attribute non-cacheable policy.
*/
#define ARM_MPU_CACHEP_NOCACHE 0U
/**
* MPU Memory Access Attribute write-back, write and read allocate policy.
*/
#define ARM_MPU_CACHEP_WB_WRA 1U
/**
* MPU Memory Access Attribute write-through, no write allocate policy.
*/
#define ARM_MPU_CACHEP_WT_NWA 2U
/**
* MPU Memory Access Attribute write-back, no write allocate policy.
*/
#define ARM_MPU_CACHEP_WB_NWA 3U
/**
* Struct for a single MPU Region
*/
typedef struct {
uint32_t RBAR; //!< The region base address register value (RBAR)
uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR
} ARM_MPU_Region_t;
/** Enable the MPU.
* \param MPU_Control Default access permissions for unconfigured regions.
*/
__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
{
__DMB();
MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif
__DSB();
__ISB();
}
/** Disable the MPU.
*/
__STATIC_INLINE void ARM_MPU_Disable(void)
{
__DMB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif
MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk;
__DSB();
__ISB();
}
/** Clear and disable the given MPU region.
* \param rnr Region number to be cleared.
*/
__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
{
MPU->RNR = rnr;
MPU->RASR = 0U;
}
/** Configure an MPU region.
* \param rbar Value for RBAR register.
* \param rasr Value for RASR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr)
{
MPU->RBAR = rbar;
MPU->RASR = rasr;
}
/** Configure the given MPU region.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rasr Value for RASR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr)
{
MPU->RNR = rnr;
MPU->RBAR = rbar;
MPU->RASR = rasr;
}
/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load().
* \param dst Destination data is copied to.
* \param src Source data is copied from.
* \param len Amount of data words to be copied.
*/
__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
{
uint32_t i;
for (i = 0U; i < len; ++i)
{
dst[i] = src[i];
}
}
/** Load the given number of MPU regions from a table.
* \param table Pointer to the MPU configuration table.
* \param cnt Amount of regions to be configured.
*/
__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt)
{
const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
while (cnt > MPU_TYPE_RALIASES) {
ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize);
table += MPU_TYPE_RALIASES;
cnt -= MPU_TYPE_RALIASES;
}
ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize);
}
#endif

View File

@ -0,0 +1,5 @@
idf_component_register(
SRC_DIRS "src"
INCLUDE_DIRS "src"
REQUIRES mlib cmsis_core
)

636
components/furi/LICENSE.md Normal file
View File

@ -0,0 +1,636 @@
# GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 [Free Software Foundation, Inc.](http://fsf.org/)
Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed.
## Preamble
The GNU General Public License is a free, copyleft license for software and
other kinds of works.
The licenses for most software and other practical works are designed to take
away your freedom to share and change the works. By contrast, the GNU General
Public License is intended to guarantee your freedom to share and change all
versions of a program--to make sure it remains free software for all its users.
We, the Free Software Foundation, use the GNU General Public License for most
of our software; it applies also to any other work released this way by its
authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our
General Public Licenses are designed to make sure that you have the freedom to
distribute copies of free software (and charge for them if you wish), that you
receive source code or can get it if you want it, that you can change the
software or use pieces of it in new free programs, and that you know you can do
these things.
To protect your rights, we need to prevent others from denying you these rights
or asking you to surrender the rights. Therefore, you have certain
responsibilities if you distribute copies of the software, or if you modify it:
responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for
a fee, you must pass on to the recipients the same freedoms that you received.
You must make sure that they, too, receive or can get the source code. And you
must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps:
1. assert copyright on the software, and
2. offer you this License giving you legal permission to copy, distribute
and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that
there is no warranty for this free software. For both users' and authors' sake,
the GPL requires that modified versions be marked as changed, so that their
problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified
versions of the software inside them, although the manufacturer can do so. This
is fundamentally incompatible with the aim of protecting users' freedom to
change the software. The systematic pattern of such abuse occurs in the area of
products for individuals to use, which is precisely where it is most
unacceptable. Therefore, we have designed this version of the GPL to prohibit
the practice for those products. If such problems arise substantially in other
domains, we stand ready to extend this provision to those domains in future
versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States
should not allow patents to restrict development and use of software on
general-purpose computers, but in those that do, we wish to avoid the special
danger that patents applied to a free program could make it effectively
proprietary. To prevent this, the GPL assures that patents cannot be used to
render the program non-free.
The precise terms and conditions for copying, distribution and modification
follow.
## TERMS AND CONDITIONS
### 0. Definitions.
*This License* refers to version 3 of the GNU General Public License.
*Copyright* also means copyright-like laws that apply to other kinds of works,
such as semiconductor masks.
*The Program* refers to any copyrightable work licensed under this License.
Each licensee is addressed as *you*. *Licensees* and *recipients* may be
individuals or organizations.
To *modify* a work means to copy from or adapt all or part of the work in a
fashion requiring copyright permission, other than the making of an exact copy.
The resulting work is called a *modified version* of the earlier work or a work
*based on* the earlier work.
A *covered work* means either the unmodified Program or a work based on the
Program.
To *propagate* a work means to do anything with it that, without permission,
would make you directly or secondarily liable for infringement under applicable
copyright law, except executing it on a computer or modifying a private copy.
Propagation includes copying, distribution (with or without modification),
making available to the public, and in some countries other activities as well.
To *convey* a work means any kind of propagation that enables other parties to
make or receive copies. Mere interaction with a user through a computer
network, with no transfer of a copy, is not conveying.
An interactive user interface displays *Appropriate Legal Notices* to the
extent that it includes a convenient and prominently visible feature that
1. displays an appropriate copyright notice, and
2. tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the work
under this License, and how to view a copy of this License.
If the interface presents a list of user commands or options, such as a menu, a
prominent item in the list meets this criterion.
### 1. Source Code.
The *source code* for a work means the preferred form of the work for making
modifications to it. *Object code* means any non-source form of a work.
A *Standard Interface* means an interface that either is an official standard
defined by a recognized standards body, or, in the case of interfaces specified
for a particular programming language, one that is widely used among developers
working in that language.
The *System Libraries* of an executable work include anything, other than the
work as a whole, that (a) is included in the normal form of packaging a Major
Component, but which is not part of that Major Component, and (b) serves only
to enable use of the work with that Major Component, or to implement a Standard
Interface for which an implementation is available to the public in source code
form. A *Major Component*, in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system (if any) on
which the executable work runs, or a compiler used to produce the work, or an
object code interpreter used to run it.
The *Corresponding Source* for a work in object code form means all the source
code needed to generate, install, and (for an executable work) run the object
code and to modify the work, including scripts to control those activities.
However, it does not include the work's System Libraries, or general-purpose
tools or generally available free programs which are used unmodified in
performing those activities but which are not part of the work. For example,
Corresponding Source includes interface definition files associated with source
files for the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require, such as
by intimate data communication or control flow between those subprograms and
other parts of the work.
The Corresponding Source need not include anything that users can regenerate
automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
### 2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on
the Program, and are irrevocable provided the stated conditions are met. This
License explicitly affirms your unlimited permission to run the unmodified
Program. The output from running a covered work is covered by this License only
if the output, given its content, constitutes a covered work. This License
acknowledges your rights of fair use or other equivalent, as provided by
copyright law.
You may make, run and propagate covered works that you do not convey, without
conditions so long as your license otherwise remains in force. You may convey
covered works to others for the sole purpose of having them make modifications
exclusively for you, or provide you with facilities for running those works,
provided that you comply with the terms of this License in conveying all
material for which you do not control copyright. Those thus making or running
the covered works for you must do so exclusively on your behalf, under your
direction and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the
conditions stated below. Sublicensing is not allowed; section 10 makes it
unnecessary.
### 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure
under any applicable law fulfilling obligations under article 11 of the WIPO
copyright treaty adopted on 20 December 1996, or similar laws prohibiting or
restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention is
effected by exercising rights under this License with respect to the covered
work, and you disclaim any intention to limit operation or modification of the
work as a means of enforcing, against the work's users, your or third parties'
legal rights to forbid circumvention of technological measures.
### 4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it,
in any medium, provided that you conspicuously and appropriately publish on
each copy an appropriate copyright notice; keep intact all notices stating that
this License and any non-permissive terms added in accord with section 7 apply
to the code; keep intact all notices of the absence of any warranty; and give
all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may
offer support or warranty protection for a fee.
### 5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it
from the Program, in the form of source code under the terms of section 4,
provided that you also meet all of these conditions:
- a) The work must carry prominent notices stating that you modified it, and
giving a relevant date.
- b) The work must carry prominent notices stating that it is released under
this License and any conditions added under section 7. This requirement
modifies the requirement in section 4 to *keep intact all notices*.
- c) You must license the entire work, as a whole, under this License to
anyone who comes into possession of a copy. This License will therefore
apply, along with any applicable section 7 additional terms, to the whole
of the work, and all its parts, regardless of how they are packaged. This
License gives no permission to license the work in any other way, but it
does not invalidate such permission if you have separately received it.
- d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your work need
not make them do so.
A compilation of a covered work with other separate and independent works,
which are not by their nature extensions of the covered work, and which are not
combined with it such as to form a larger program, in or on a volume of a
storage or distribution medium, is called an *aggregate* if the compilation and
its resulting copyright are not used to limit the access or legal rights of the
compilation's users beyond what the individual works permit. Inclusion of a
covered work in an aggregate does not cause this License to apply to the other
parts of the aggregate.
### 6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4
and 5, provided that you also convey the machine-readable Corresponding Source
under the terms of this License, in one of these ways:
- a) Convey the object code in, or embodied in, a physical product (including
a physical distribution medium), accompanied by the Corresponding Source
fixed on a durable physical medium customarily used for software
interchange.
- b) Convey the object code in, or embodied in, a physical product (including
a physical distribution medium), accompanied by a written offer, valid for
at least three years and valid for as long as you offer spare parts or
customer support for that product model, to give anyone who possesses the
object code either
1. a copy of the Corresponding Source for all the software in the product
that is covered by this License, on a durable physical medium
customarily used for software interchange, for a price no more than your
reasonable cost of physically performing this conveying of source, or
2. access to copy the Corresponding Source from a network server at no
charge.
- c) Convey individual copies of the object code with a copy of the written
offer to provide the Corresponding Source. This alternative is allowed only
occasionally and noncommercially, and only if you received the object code
with such an offer, in accord with subsection 6b.
- d) Convey the object code by offering access from a designated place
(gratis or for a charge), and offer equivalent access to the Corresponding
Source in the same way through the same place at no further charge. You
need not require recipients to copy the Corresponding Source along with the
object code. If the place to copy the object code is a network server, the
Corresponding Source may be on a different server operated by you or a
third party) that supports equivalent copying facilities, provided you
maintain clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the Corresponding
Source, you remain obligated to ensure that it is available for as long as
needed to satisfy these requirements.
- e) Convey the object code using peer-to-peer transmission, provided you
inform other peers where the object code and Corresponding Source of the
work are being offered to the general public at no charge under subsection
6d.
A separable portion of the object code, whose source code is excluded from the
Corresponding Source as a System Library, need not be included in conveying the
object code work.
A *User Product* is either
1. a *consumer product*, which means any tangible personal property which is
normally used for personal, family, or household purposes, or
2. anything designed or sold for incorporation into a dwelling.
In determining whether a product is a consumer product, doubtful cases shall be
resolved in favor of coverage. For a particular product received by a
particular user, *normally used* refers to a typical or common use of that
class of product, regardless of the status of the particular user or of the way
in which the particular user actually uses, or expects or is expected to use,
the product. A product is a consumer product regardless of whether the product
has substantial commercial, industrial or non-consumer uses, unless such uses
represent the only significant mode of use of the product.
*Installation Information* for a User Product means any methods, procedures,
authorization keys, or other information required to install and execute
modified versions of a covered work in that User Product from a modified
version of its Corresponding Source. The information must suffice to ensure
that the continued functioning of the modified object code is in no case
prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as part of a
transaction in which the right of possession and use of the User Product is
transferred to the recipient in perpetuity or for a fixed term (regardless of
how the transaction is characterized), the Corresponding Source conveyed under
this section must be accompanied by the Installation Information. But this
requirement does not apply if neither you nor any third party retains the
ability to install modified object code on the User Product (for example, the
work has been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates for a
work that has been modified or installed by the recipient, or for the User
Product in which it has been modified or installed. Access to a network may be
denied when the modification itself materially and adversely affects the
operation of the network or violates the rules and protocols for communication
across the network.
Corresponding Source conveyed, and Installation Information provided, in accord
with this section must be in a format that is publicly documented (and with an
implementation available to the public in source code form), and must require
no special password or key for unpacking, reading or copying.
### 7. Additional Terms.
*Additional permissions* are terms that supplement the terms of this License by
making exceptions from one or more of its conditions. Additional permissions
that are applicable to the entire Program shall be treated as though they were
included in this License, to the extent that they are valid under applicable
law. If additional permissions apply only to part of the Program, that part may
be used separately under those permissions, but the entire Program remains
governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any
additional permissions from that copy, or from any part of it. (Additional
permissions may be written to require their own removal in certain cases when
you modify the work.) You may place additional permissions on material, added
by you to a covered work, for which you have or can give appropriate copyright
permission.
Notwithstanding any other provision of this License, for material you add to a
covered work, you may (if authorized by the copyright holders of that material)
supplement the terms of this License with terms:
- a) Disclaiming warranty or limiting liability differently from the terms of
sections 15 and 16 of this License; or
- b) Requiring preservation of specified reasonable legal notices or author
attributions in that material or in the Appropriate Legal Notices displayed
by works containing it; or
- c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in reasonable
ways as different from the original version; or
- d) Limiting the use for publicity purposes of names of licensors or authors
of the material; or
- e) Declining to grant rights under trademark law for use of some trade
names, trademarks, or service marks; or
- f) Requiring indemnification of licensors and authors of that material by
anyone who conveys the material (or modified versions of it) with
contractual assumptions of liability to the recipient, for any liability
that these contractual assumptions directly impose on those licensors and
authors.
All other non-permissive additional terms are considered *further restrictions*
within the meaning of section 10. If the Program as you received it, or any
part of it, contains a notice stating that it is governed by this License along
with a term that is a further restriction, you may remove that term. If a
license document contains a further restriction but permits relicensing or
conveying under this License, you may add to a covered work material governed
by the terms of that license document, provided that the further restriction
does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place,
in the relevant source files, a statement of the additional terms that apply to
those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a
separately written license, or stated as exceptions; the above requirements
apply either way.
### 8. Termination.
You may not propagate or modify a covered work except as expressly provided
under this License. Any attempt otherwise to propagate or modify it is void,
and will automatically terminate your rights under this License (including any
patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a
particular copyright holder is reinstated
- a) provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and
- b) permanently, if the copyright holder fails to notify you of the
violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated
permanently if the copyright holder notifies you of the violation by some
reasonable means, this is the first time you have received notice of violation
of this License (for any work) from that copyright holder, and you cure the
violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses
of parties who have received copies or rights from you under this License. If
your rights have been terminated and not permanently reinstated, you do not
qualify to receive new licenses for the same material under section 10.
### 9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy
of the Program. Ancillary propagation of a covered work occurring solely as a
consequence of using peer-to-peer transmission to receive a copy likewise does
not require acceptance. However, nothing other than this License grants you
permission to propagate or modify any covered work. These actions infringe
copyright if you do not accept this License. Therefore, by modifying or
propagating a covered work, you indicate your acceptance of this License to do
so.
### 10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a
license from the original licensors, to run, modify and propagate that work,
subject to this License. You are not responsible for enforcing compliance by
third parties with this License.
An *entity transaction* is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered work
results from an entity transaction, each party to that transaction who receives
a copy of the work also receives whatever licenses to the work the party's
predecessor in interest had or could give under the previous paragraph, plus a
right to possession of the Corresponding Source of the work from the
predecessor in interest, if the predecessor has it or can get it with
reasonable efforts.
You may not impose any further restrictions on the exercise of the rights
granted or affirmed under this License. For example, you may not impose a
license fee, royalty, or other charge for exercise of rights granted under this
License, and you may not initiate litigation (including a cross-claim or
counterclaim in a lawsuit) alleging that any patent claim is infringed by
making, using, selling, offering for sale, or importing the Program or any
portion of it.
### 11. Patents.
A *contributor* is a copyright holder who authorizes use under this License of
the Program or a work on which the Program is based. The work thus licensed is
called the contributor's *contributor version*.
A contributor's *essential patent claims* are all patent claims owned or
controlled by the contributor, whether already acquired or hereafter acquired,
that would be infringed by some manner, permitted by this License, of making,
using, or selling its contributor version, but do not include claims that would
be infringed only as a consequence of further modification of the contributor
version. For purposes of this definition, *control* includes the right to grant
patent sublicenses in a manner consistent with the requirements of this
License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent
license under the contributor's essential patent claims, to make, use, sell,
offer for sale, import and otherwise run, modify and propagate the contents of
its contributor version.
In the following three paragraphs, a *patent license* is any express agreement
or commitment, however denominated, not to enforce a patent (such as an express
permission to practice a patent or covenant not to sue for patent
infringement). To *grant* such a patent license to a party means to make such
an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the
Corresponding Source of the work is not available for anyone to copy, free of
charge and under the terms of this License, through a publicly available
network server or other readily accessible means, then you must either
1. cause the Corresponding Source to be so available, or
2. arrange to deprive yourself of the benefit of the patent license for this
particular work, or
3. arrange, in a manner consistent with the requirements of this License, to
extend the patent license to downstream recipients.
*Knowingly relying* means you have actual knowledge that, but for the patent
license, your conveying the covered work in a country, or your recipient's use
of the covered work in a country, would infringe one or more identifiable
patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you
convey, or propagate by procuring conveyance of, a covered work, and grant a
patent license to some of the parties receiving the covered work authorizing
them to use, propagate, modify or convey a specific copy of the covered work,
then the patent license you grant is automatically extended to all recipients
of the covered work and works based on it.
A patent license is *discriminatory* if it does not include within the scope of
its coverage, prohibits the exercise of, or is conditioned on the non-exercise
of one or more of the rights that are specifically granted under this License.
You may not convey a covered work if you are a party to an arrangement with a
third party that is in the business of distributing software, under which you
make payment to the third party based on the extent of your activity of
conveying the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory patent
license
- a) in connection with copies of the covered work conveyed by you (or copies
made from those copies), or
- b) primarily for and in connection with specific products or compilations
that contain the covered work, unless you entered into that arrangement, or
that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied
license or other defenses to infringement that may otherwise be available to
you under applicable patent law.
### 12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not excuse
you from the conditions of this License. If you cannot convey a covered work so
as to satisfy simultaneously your obligations under this License and any other
pertinent obligations, then as a consequence you may not convey it at all. For
example, if you agree to terms that obligate you to collect a royalty for
further conveying from those to whom you convey the Program, the only way you
could satisfy both those terms and this License would be to refrain entirely
from conveying the Program.
### 13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to
link or combine any covered work with a work licensed under version 3 of the
GNU Affero General Public License into a single combined work, and to convey
the resulting work. The terms of this License will continue to apply to the
part which is the covered work, but the special requirements of the GNU Affero
General Public License, section 13, concerning interaction through a network
will apply to the combination as such.
### 14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU
General Public License from time to time. Such new versions will be similar in
spirit to the present version, but may differ in detail to address new problems
or concerns.
Each version is given a distinguishing version number. If the Program specifies
that a certain numbered version of the GNU General Public License *or any later
version* applies to it, you have the option of following the terms and
conditions either of that numbered version or of any later version published by
the Free Software Foundation. If the Program does not specify a version number
of the GNU General Public License, you may choose any version ever published by
the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the
GNU General Public License can be used, that proxy's public statement of
acceptance of a version permanently authorizes you to choose that version for
the Program.
Later license versions may give you additional or different permissions.
However, no additional obligations are imposed on any author or copyright
holder as a result of your choosing to follow a later version.
### 15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE
LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER
PARTIES PROVIDE THE PROGRAM *AS IS* WITHOUT WARRANTY OF ANY KIND, EITHER
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE
QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
CORRECTION.
### 16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY
COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS
PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE
THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE
PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY
HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
### 17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot
be given local legal effect according to their terms, reviewing courts shall
apply local law that most closely approximates an absolute waiver of all civil
liability in connection with the Program, unless a warranty or assumption of
liability accompanies a copy of the Program in return for a fee.
## END OF TERMS AND CONDITIONS ###
### How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest possible
use to the public, the best way to achieve this is to make it free software
which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest to attach
them to the start of each source file to most effectively state the exclusion
of warranty; and each file should have at least the *copyright* line and a
pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short notice like
this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w` and `show c` should show the appropriate
parts of the General Public License. Of course, your program's commands might
be different; for a GUI interface, you would use an *about box*.
You should also get your employer (if you work as a programmer) or school, if
any, to sign a *copyright disclaimer* for the program, if necessary. For more
information on this, and how to apply and follow the GNU GPL, see
[http://www.gnu.org/licenses/](http://www.gnu.org/licenses/).
The GNU General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may consider
it more useful to permit linking proprietary applications with the library. If
this is what you want to do, use the GNU Lesser General Public License instead
of this License. But first, please read
[http://www.gnu.org/philosophy/why-not-lgpl.html](http://www.gnu.org/philosophy/why-not-lgpl.html).

View File

@ -0,0 +1,44 @@
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <furi_config.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
FuriWaitForever = 0xFFFFFFFFU,
} FuriWait;
typedef enum {
FuriFlagWaitAny = 0x00000000U, ///< Wait for any flag (default).
FuriFlagWaitAll = 0x00000001U, ///< Wait for all flags.
FuriFlagNoClear = 0x00000002U, ///< Do not clear flags which have been specified to wait for.
FuriFlagError = 0x80000000U, ///< Error indicator.
FuriFlagErrorUnknown = 0xFFFFFFFFU, ///< FuriStatusError (-1).
FuriFlagErrorTimeout = 0xFFFFFFFEU, ///< FuriStatusErrorTimeout (-2).
FuriFlagErrorResource = 0xFFFFFFFDU, ///< FuriStatusErrorResource (-3).
FuriFlagErrorParameter = 0xFFFFFFFCU, ///< FuriStatusErrorParameter (-4).
FuriFlagErrorISR = 0xFFFFFFFAU, ///< FuriStatusErrorISR (-6).
} FuriFlag;
typedef enum {
FuriStatusOk = 0, ///< Operation completed successfully.
FuriStatusError =
-1, ///< Unspecified RTOS error: run-time error but no other error message fits.
FuriStatusErrorTimeout = -2, ///< Operation not completed within the timeout period.
FuriStatusErrorResource = -3, ///< Resource not available.
FuriStatusErrorParameter = -4, ///< Parameter error.
FuriStatusErrorNoMemory =
-5, ///< System is out of memory: it was impossible to allocate or reserve memory for the operation.
FuriStatusErrorISR =
-6, ///< Not allowed in ISR context: the function cannot be called from interrupt service routines.
FuriStatusReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization.
} FuriStatus;
#ifdef __cplusplus
}
#endif

194
components/furi/src/check.c Normal file
View File

@ -0,0 +1,194 @@
#include "check.h"
#include "common_defines.h"
#include "furi_hal_console.h"
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <stdlib.h>
PLACE_IN_SECTION("MB_MEM2") const char* __furi_check_message = NULL;
PLACE_IN_SECTION("MB_MEM2") uint32_t __furi_check_registers[13] = {0};
/** Load r12 value to __furi_check_message and store registers to __furi_check_registers */
/*#define GET_MESSAGE_AND_STORE_REGISTERS() \
asm volatile("ldr r11, =__furi_check_message \n" \
"str r12, [r11] \n" \
"ldr r12, =__furi_check_registers \n" \
"stm r12, {r0-r11} \n" \
"str lr, [r12, #48] \n" \
: \
: \
: "memory");*/
/** Restore registers and halt MCU
*
* - Always use it with GET_MESSAGE_AND_STORE_REGISTERS
* - If debugger is(was) connected this routine will raise bkpt
* - If debugger is not connected then endless loop
*
*/
/*#define RESTORE_REGISTERS_AND_HALT_MCU(debug) \
register bool a0 asm("a0") = debug; \
asm volatile("cbnz a0, with_debugger%= \n" \
"ldr a12, =__furi_check_registers\n" \
"ldm a12, {a0-a11} \n" \
"loop%=: \n" \
"wfi \n" \
"b loop%= \n" \
"with_debugger%=: \n" \
"ldr a12, =__furi_check_registers\n" \
"ldm a12, {a0-a11} \n" \
"debug_loop%=: \n" \
"bkpt 0x00 \n" \
"wfi \n" \
"b debug_loop%= \n" \
: \
: "a"(a0) \
: "memory");*/
extern size_t xPortGetTotalHeapSize(void);
static void __furi_put_uint32_as_text(uint32_t data) {
char tmp_str[] = "-2147483648";
itoa(data, tmp_str, 10);
furi_hal_console_puts(tmp_str);
}
static void __furi_put_uint32_as_hex(uint32_t data) {
char tmp_str[] = "0xFFFFFFFF";
itoa(data, tmp_str, 16);
furi_hal_console_puts(tmp_str);
}
static void __furi_print_register_info() {
// Print registers
for(uint8_t i = 0; i < 12; i++) {
furi_hal_console_puts("\r\n\tr");
__furi_put_uint32_as_text(i);
furi_hal_console_puts(" : ");
__furi_put_uint32_as_hex(__furi_check_registers[i]);
}
furi_hal_console_puts("\r\n\tlr : ");
__furi_put_uint32_as_hex(__furi_check_registers[12]);
}
static void __furi_print_stack_info() {
furi_hal_console_puts("\r\n\tstack watermark: ");
__furi_put_uint32_as_text(uxTaskGetStackHighWaterMark(NULL) * 4);
}
static void __furi_print_bt_stack_info() {
// const FuriHalBtHardfaultInfo* fault_info = furi_hal_bt_get_hardfault_info();
// if(fault_info == NULL) {
// furi_hal_console_puts("\r\n\tcore2: not faulted");
// } else {
// furi_hal_console_puts("\r\n\tcore2: hardfaulted.\r\n\tPC: ");
// __furi_put_uint32_as_hex(fault_info->source_pc);
// furi_hal_console_puts("\r\n\tLR: ");
// __furi_put_uint32_as_hex(fault_info->source_lr);
// furi_hal_console_puts("\r\n\tSP: ");
// __furi_put_uint32_as_hex(fault_info->source_sp);
// }
}
static void __furi_print_heap_info() {
// furi_hal_console_puts("\r\n\t heap total: ");
// __furi_put_uint32_as_text(xPortGetTotalHeapSize());
furi_hal_console_puts("\r\n\t heap free: ");
__furi_put_uint32_as_text(xPortGetFreeHeapSize());
furi_hal_console_puts("\r\n\t heap watermark: ");
__furi_put_uint32_as_text(xPortGetMinimumEverFreeHeapSize());
}
static void __furi_print_name(bool isr) {
if(isr) {
furi_hal_console_puts("[ISR ");
__furi_put_uint32_as_text(__get_IPSR());
furi_hal_console_puts("] ");
} else {
const char* name = pcTaskGetName(NULL);
if(name == NULL) {
furi_hal_console_puts("[main] ");
} else {
furi_hal_console_puts("[");
furi_hal_console_puts(name);
furi_hal_console_puts("] ");
}
}
}
FURI_NORETURN void __furi_crash_implementation() {
__disable_irq();
// GET_MESSAGE_AND_STORE_REGISTERS();
bool isr = FURI_IS_IRQ_MODE();
if(__furi_check_message == NULL) {
__furi_check_message = "Fatal Error";
} else if(__furi_check_message == (void*)__FURI_ASSERT_MESSAGE_FLAG) {
__furi_check_message = "furi_assert failed";
} else if(__furi_check_message == (void*)__FURI_CHECK_MESSAGE_FLAG) {
__furi_check_message = "furi_check failed";
}
furi_hal_console_puts("\r\n\033[0;31m[CRASH]");
__furi_print_name(isr);
furi_hal_console_puts(__furi_check_message);
__furi_print_register_info();
if(!isr) {
__furi_print_stack_info();
}
__furi_print_heap_info();
__furi_print_bt_stack_info();
// Check if debug enabled by DAP
// https://developer.arm.com/documentation/ddi0403/d/Debug-Architecture/ARMv7-M-Debug/Debug-register-support-in-the-SCS/Debug-Halting-Control-and-Status-Register--DHCSR?lang=en
// bool debug = CoreDebug->DHCSR & CoreDebug_DHCSR_C_DEBUGEN_Msk;
bool debug = true;
#ifdef FURI_NDEBUG
if(debug) {
#endif
furi_hal_console_puts("\r\nSystem halted. Connect debugger for more info\r\n");
furi_hal_console_puts("\033[0m\r\n");
// furi_hal_debug_enable();
esp_system_abort("crash");
#ifdef FURI_NDEBUG
} else {
uint32_t ptr = (uint32_t)__furi_check_message;
if(ptr < FLASH_BASE || ptr > (FLASH_BASE + FLASH_SIZE)) {
ptr = (uint32_t) "Check serial logs";
}
furi_hal_rtc_set_fault_data(ptr);
furi_hal_console_puts("\r\nRebooting system.\r\n");
furi_hal_console_puts("\033[0m\r\n");
esp_system_abort("crash");
}
#endif
__builtin_unreachable();
}
FURI_NORETURN void __furi_halt_implementation() {
__disable_irq();
// GET_MESSAGE_AND_STORE_REGISTERS();
bool isr = FURI_IS_IRQ_MODE();
if(__furi_check_message == NULL) {
__furi_check_message = "System halt requested.";
}
furi_hal_console_puts("\r\n\033[0;31m[HALT]");
__furi_print_name(isr);
furi_hal_console_puts(__furi_check_message);
furi_hal_console_puts("\r\nSystem halted. Bye-bye!\r\n");
furi_hal_console_puts("\033[0m\r\n");
// Check if debug enabled by DAP
// https://developer.arm.com/documentation/ddi0403/d/Debug-Architecture/ARMv7-M-Debug/Debug-register-support-in-the-SCS/Debug-Halting-Control-and-Status-Register--DHCSR?lang=en
// bool debug = CoreDebug->DHCSR & CoreDebug_DHCSR_C_DEBUGEN_Msk;
// RESTORE_REGISTERS_AND_HALT_MCU(true);
__builtin_unreachable();
}

109
components/furi/src/check.h Normal file
View File

@ -0,0 +1,109 @@
/**
* @file check.h
*
* Furi crash and assert functions.
*
* The main problem with crashing is that you can't do anything without disturbing registers,
* and if you disturb registers, you won't be able to see the correct register values in the debugger.
*
* Current solution works around it by passing the message through r12 and doing some magic with registers in crash function.
* r0-r10 are stored in the ram2 on crash routine start and restored at the end.
* The only register that is going to be lost is r11.
*
*/
#pragma once
#include <m-core.h>
#include <esp_log.h>
#ifdef __cplusplus
extern "C" {
#define FURI_NORETURN [[noreturn]]
#else
#include <stdnoreturn.h>
#define FURI_NORETURN noreturn
#endif
// Flags instead of pointers will save ~4 bytes on furi_assert and furi_check calls.
#define __FURI_ASSERT_MESSAGE_FLAG (0x01)
#define __FURI_CHECK_MESSAGE_FLAG (0x02)
/** Crash system */
FURI_NORETURN void __furi_crash_implementation();
/** Halt system */
FURI_NORETURN void __furi_halt_implementation();
/** Crash system with message. */
#define __furi_crash(message) \
do { \
ESP_LOGE("crash", "%s\n\tat %s:%d", (message) ? (message) : "", __FILE__, __LINE__); \
__furi_crash_implementation(); \
} while(0)
/** Crash system
*
* @param optional message (const char*)
*/
#define furi_crash(...) M_APPLY(__furi_crash, M_IF_EMPTY(__VA_ARGS__)((NULL), (__VA_ARGS__)))
/** Halt system with message. */
#define __furi_halt(message) \
do { \
ESP_LOGE("halt", "%s\n\tat %s:%d", (message) ? (message) : "", __FILE__, __LINE__); \
__furi_halt_implementation(); \
} while(0)
/** Halt system
*
* @param optional message (const char*)
*/
#define furi_halt(...) M_APPLY(__furi_halt, M_IF_EMPTY(__VA_ARGS__)((NULL), (__VA_ARGS__)))
/** Check condition and crash if check failed */
#define __furi_check(__e, __m) \
do { \
if(!(__e)) { \
ESP_LOGE("check", "%s", #__e); \
__furi_crash(__m); \
} \
} while(0)
/** Check condition and crash if failed
*
* @param condition to check
* @param optional message (const char*)
*/
#define furi_check(...) \
M_APPLY(__furi_check, M_DEFAULT_ARGS(2, (__FURI_CHECK_MESSAGE_FLAG), __VA_ARGS__))
/** Only in debug build: Assert condition and crash if assert failed */
#ifdef FURI_DEBUG
#define __furi_assert(__e, __m) \
do { \
if(!(__e)) { \
ESP_LOGE("assert", "%s", #__e); \
__furi_crash(__m); \
} \
} while(0)
#else
#define __furi_assert(__e, __m) \
do { \
((void)(__e)); \
((void)(__m)); \
} while(0)
#endif
/** Assert condition and crash if failed
*
* @warning only will do check if firmware compiled in debug mode
*
* @param condition to check
* @param optional message (const char*)
*/
#define furi_assert(...) \
M_APPLY(__furi_assert, M_DEFAULT_ARGS(2, (__FURI_ASSERT_MESSAGE_FLAG), __VA_ARGS__))
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,60 @@
#pragma once
#include "core_defines.h"
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <cmsis_compiler.h>
#ifndef FURI_WARN_UNUSED
#define FURI_WARN_UNUSED __attribute__((warn_unused_result))
#endif
#ifndef FURI_WEAK
#define FURI_WEAK __attribute__((weak))
#endif
#ifndef FURI_PACKED
#define FURI_PACKED __attribute__((packed))
#endif
#ifndef FURI_IS_IRQ_MASKED
#define FURI_IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
#endif
#ifndef FURI_IS_IRQ_MODE
#define FURI_IS_IRQ_MODE() (__get_IPSR() != 0U)
#endif
#ifndef FURI_IS_ISR
#define FURI_IS_ISR() (FURI_IS_IRQ_MODE() || FURI_IS_IRQ_MASKED())
#endif
typedef struct {
uint32_t isrm;
bool from_isr;
bool kernel_running;
} __FuriCriticalInfo;
__FuriCriticalInfo __furi_critical_enter(void);
void __furi_critical_exit(__FuriCriticalInfo info);
#ifndef FURI_CRITICAL_ENTER
#define FURI_CRITICAL_ENTER() __FuriCriticalInfo __furi_critical_info = __furi_critical_enter();
#endif
#ifndef FURI_CRITICAL_EXIT
#define FURI_CRITICAL_EXIT() __furi_critical_exit(__furi_critical_info);
#endif
#ifndef FURI_CHECK_RETURN
#define FURI_CHECK_RETURN __attribute__((__warn_unused_result__))
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,116 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#define FURI_RETURNS_NONNULL __attribute__((returns_nonnull))
#ifndef MAX
#define MAX(a, b) \
({ \
__typeof__(a) _a = (a); \
__typeof__(b) _b = (b); \
_a > _b ? _a : _b; \
})
#endif
#ifndef MIN
#define MIN(a, b) \
({ \
__typeof__(a) _a = (a); \
__typeof__(b) _b = (b); \
_a < _b ? _a : _b; \
})
#endif
#ifndef ABS
#define ABS(a) ({ (a) < 0 ? -(a) : (a); })
#endif
#ifndef ROUND_UP_TO
#define ROUND_UP_TO(a, b) \
({ \
__typeof__(a) _a = (a); \
__typeof__(b) _b = (b); \
_a / _b + !!(_a % _b); \
})
#endif
#ifndef CLAMP
#define CLAMP(x, upper, lower) (MIN(upper, MAX(x, lower)))
#endif
#ifndef COUNT_OF
#define COUNT_OF(x) (sizeof(x) / sizeof(x[0]))
#endif
#ifndef FURI_SWAP
#define FURI_SWAP(x, y) \
do { \
typeof(x) SWAP = x; \
x = y; \
y = SWAP; \
} while(0)
#endif
#ifndef PLACE_IN_SECTION
#define PLACE_IN_SECTION(x) __attribute__((section(x)))
#endif
#ifndef ALIGN
#define ALIGN(n) __attribute__((aligned(n)))
#endif
#ifndef __weak
#define __weak __attribute__((weak))
#endif
#ifndef UNUSED
#define UNUSED(X) (void)(X)
#endif
#ifndef STRINGIFY
#define STRINGIFY(x) #x
#endif
#ifndef TOSTRING
#define TOSTRING(x) STRINGIFY(x)
#endif
#ifndef CONCATENATE
#define CONCATENATE(a, b) CONCATENATE_(a, b)
#define CONCATENATE_(a, b) a##b
#endif
#ifndef REVERSE_BYTES_U32
#define REVERSE_BYTES_U32(x) \
((((x)&0x000000FF) << 24) | (((x)&0x0000FF00) << 8) | (((x)&0x00FF0000) >> 8) | \
(((x)&0xFF000000) >> 24))
#endif
#ifndef FURI_BIT
#define FURI_BIT(x, n) (((x) >> (n)) & 1)
#endif
#ifndef FURI_BIT_SET
#define FURI_BIT_SET(x, n) \
({ \
__typeof__(x) _x = (1); \
(x) |= (_x << (n)); \
})
#endif
#ifndef FURI_BIT_CLEAR
#define FURI_BIT_CLEAR(x, n) \
({ \
__typeof__(x) _x = (1); \
(x) &= ~(_x << (n)); \
})
#endif
#define FURI_SW_MEMBARRIER() asm volatile("" : : : "memory")
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,34 @@
#include "common_defines.h"
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
static portMUX_TYPE prv_critical_mutex;
__FuriCriticalInfo __furi_critical_enter(void) {
__FuriCriticalInfo info;
info.isrm = 0;
info.from_isr = FURI_IS_ISR();
info.kernel_running = (xTaskGetSchedulerState() == taskSCHEDULER_RUNNING);
if(info.from_isr) {
info.isrm = taskENTER_CRITICAL_FROM_ISR();
} else if(info.kernel_running) {
taskENTER_CRITICAL(&prv_critical_mutex);
} else {
__disable_irq();
}
return info;
}
void __furi_critical_exit(__FuriCriticalInfo info) {
if(info.from_isr) {
taskEXIT_CRITICAL_FROM_ISR(info.isrm);
} else if(info.kernel_running) {
taskEXIT_CRITICAL(&prv_critical_mutex);
} else {
__enable_irq();
}
}

View File

@ -0,0 +1,140 @@
#include "event_flag.h"
#include "common_defines.h"
#include "check.h"
#include <freertos/FreeRTOS.h>
#include <freertos/event_groups.h>
#define FURI_EVENT_FLAG_MAX_BITS_EVENT_GROUPS 24U
#define FURI_EVENT_FLAG_INVALID_BITS (~((1UL << FURI_EVENT_FLAG_MAX_BITS_EVENT_GROUPS) - 1U))
FuriEventFlag* furi_event_flag_alloc() {
furi_assert(!FURI_IS_IRQ_MODE());
EventGroupHandle_t handle = xEventGroupCreate();
furi_check(handle);
return ((FuriEventFlag*)handle);
}
void furi_event_flag_free(FuriEventFlag* instance) {
furi_assert(!FURI_IS_IRQ_MODE());
vEventGroupDelete((EventGroupHandle_t)instance);
}
uint32_t furi_event_flag_set(FuriEventFlag* instance, uint32_t flags) {
furi_assert(instance);
furi_assert((flags & FURI_EVENT_FLAG_INVALID_BITS) == 0U);
EventGroupHandle_t hEventGroup = (EventGroupHandle_t)instance;
uint32_t rflags;
BaseType_t yield;
if(FURI_IS_IRQ_MODE()) {
yield = pdFALSE;
if(xEventGroupSetBitsFromISR(hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
rflags = (uint32_t)FuriFlagErrorResource;
} else {
rflags = flags;
portYIELD_FROM_ISR(yield);
}
} else {
rflags = xEventGroupSetBits(hEventGroup, (EventBits_t)flags);
}
/* Return event flags after setting */
return (rflags);
}
uint32_t furi_event_flag_clear(FuriEventFlag* instance, uint32_t flags) {
furi_assert(instance);
furi_assert((flags & FURI_EVENT_FLAG_INVALID_BITS) == 0U);
EventGroupHandle_t hEventGroup = (EventGroupHandle_t)instance;
uint32_t rflags;
if(FURI_IS_IRQ_MODE()) {
rflags = xEventGroupGetBitsFromISR(hEventGroup);
if(xEventGroupClearBitsFromISR(hEventGroup, (EventBits_t)flags) == pdFAIL) {
rflags = (uint32_t)FuriStatusErrorResource;
} else {
/* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
/* Yield is required here otherwise clear operation might not execute in the right order. */
/* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
portYIELD_FROM_ISR(pdTRUE);
}
} else {
rflags = xEventGroupClearBits(hEventGroup, (EventBits_t)flags);
}
/* Return event flags before clearing */
return (rflags);
}
uint32_t furi_event_flag_get(FuriEventFlag* instance) {
furi_assert(instance);
EventGroupHandle_t hEventGroup = (EventGroupHandle_t)instance;
uint32_t rflags;
if(FURI_IS_IRQ_MODE()) {
rflags = xEventGroupGetBitsFromISR(hEventGroup);
} else {
rflags = xEventGroupGetBits(hEventGroup);
}
/* Return current event flags */
return (rflags);
}
uint32_t furi_event_flag_wait(
FuriEventFlag* instance,
uint32_t flags,
uint32_t options,
uint32_t timeout) {
furi_assert(!FURI_IS_IRQ_MODE());
furi_assert(instance);
furi_assert((flags & FURI_EVENT_FLAG_INVALID_BITS) == 0U);
EventGroupHandle_t hEventGroup = (EventGroupHandle_t)instance;
BaseType_t wait_all;
BaseType_t exit_clr;
uint32_t rflags;
if(options & FuriFlagWaitAll) {
wait_all = pdTRUE;
} else {
wait_all = pdFAIL;
}
if(options & FuriFlagNoClear) {
exit_clr = pdFAIL;
} else {
exit_clr = pdTRUE;
}
rflags = xEventGroupWaitBits(
hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
if(options & FuriFlagWaitAll) {
if((flags & rflags) != flags) {
if(timeout > 0U) {
rflags = (uint32_t)FuriStatusErrorTimeout;
} else {
rflags = (uint32_t)FuriStatusErrorResource;
}
}
} else {
if((flags & rflags) == 0U) {
if(timeout > 0U) {
rflags = (uint32_t)FuriStatusErrorTimeout;
} else {
rflags = (uint32_t)FuriStatusErrorResource;
}
}
}
/* Return event flags before clearing */
return (rflags);
}

View File

@ -0,0 +1,70 @@
/**
* @file event_flag.h
* Furi Event Flag
*/
#pragma once
#include "base.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void FuriEventFlag;
/** Allocate FuriEventFlag
*
* @return pointer to FuriEventFlag
*/
FuriEventFlag* furi_event_flag_alloc();
/** Deallocate FuriEventFlag
*
* @param instance pointer to FuriEventFlag
*/
void furi_event_flag_free(FuriEventFlag* instance);
/** Set flags
*
* @param instance pointer to FuriEventFlag
* @param[in] flags The flags
*
* @return Resulting flags or error (FuriStatus)
*/
uint32_t furi_event_flag_set(FuriEventFlag* instance, uint32_t flags);
/** Clear flags
*
* @param instance pointer to FuriEventFlag
* @param[in] flags The flags
*
* @return Resulting flags or error (FuriStatus)
*/
uint32_t furi_event_flag_clear(FuriEventFlag* instance, uint32_t flags);
/** Get flags
*
* @param instance pointer to FuriEventFlag
*
* @return Resulting flags
*/
uint32_t furi_event_flag_get(FuriEventFlag* instance);
/** Wait flags
*
* @param instance pointer to FuriEventFlag
* @param[in] flags The flags
* @param[in] options The option flags
* @param[in] timeout The timeout
*
* @return Resulting flags or error (FuriStatus)
*/
uint32_t furi_event_flag_wait(
FuriEventFlag* instance,
uint32_t flags,
uint32_t options,
uint32_t timeout);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,3 @@
#pragma once
#define FURI_CONFIG_THREAD_MAX_PRIORITIES (32)

View File

@ -0,0 +1,109 @@
#include "furi_hal_console.h"
#include "common_defines.h"
#include "furi_string.h"
#include <stdbool.h>
#include <esp_log.h>
#include <memory.h>
#define TAG "FuriHalConsole"
#ifdef HEAP_PRINT_DEBUG
#define CONSOLE_BAUDRATE 1843200
#else
#define CONSOLE_BAUDRATE 230400
#endif
typedef struct {
bool alive;
FuriHalConsoleTxCallback tx_callback;
void* tx_callback_context;
} FuriHalConsole;
FuriHalConsole furi_hal_console = {
.alive = false,
.tx_callback = NULL,
.tx_callback_context = NULL,
};
void furi_hal_console_init() {
// furi_hal_uart_init(FuriHalUartIdUSART1, CONSOLE_BAUDRATE);
furi_hal_console.alive = true;
}
void furi_hal_console_enable() {
// furi_hal_uart_set_irq_cb(FuriHalUartIdUSART1, NULL, NULL);
// while(!LL_USART_IsActiveFlag_TC(USART1))
// ;
// furi_hal_uart_set_br(FuriHalUartIdUSART1, CONSOLE_BAUDRATE);
furi_hal_console.alive = true;
}
void furi_hal_console_disable() {
// while(!LL_USART_IsActiveFlag_TC(USART1))
// ;
furi_hal_console.alive = false;
}
void furi_hal_console_set_tx_callback(FuriHalConsoleTxCallback callback, void* context) {
FURI_CRITICAL_ENTER();
furi_hal_console.tx_callback = callback;
furi_hal_console.tx_callback_context = context;
FURI_CRITICAL_EXIT();
}
void furi_hal_console_tx(const uint8_t* buffer, size_t buffer_size) {
if(!furi_hal_console.alive) return;
FURI_CRITICAL_ENTER();
// Transmit data
if(furi_hal_console.tx_callback) {
furi_hal_console.tx_callback(buffer, buffer_size, furi_hal_console.tx_callback_context);
}
char safe_buffer[buffer_size + 1];
memcpy(safe_buffer, buffer, buffer_size);
safe_buffer[buffer_size] = 0;
ESP_LOGI(TAG, "%s", safe_buffer);
// furi_hal_uart_tx(FuriHalUartIdUSART1, (uint8_t*)buffer, buffer_size);
//// Wait for TC flag to be raised for last char
// while(!LL_USART_IsActiveFlag_TC(USART1))
// ;
FURI_CRITICAL_EXIT();
}
void furi_hal_console_tx_with_new_line(const uint8_t* buffer, size_t buffer_size) {
if(!furi_hal_console.alive) return;
FURI_CRITICAL_ENTER();
char safe_buffer[buffer_size + 1];
memcpy(safe_buffer, buffer, buffer_size);
safe_buffer[buffer_size] = 0;
ESP_LOGI(TAG, "%s", safe_buffer);
// Transmit data
// furi_hal_uart_tx(FuriHalUartIdUSART1, (uint8_t*)buffer, buffer_size);
// Transmit new line symbols
// furi_hal_uart_tx(FuriHalUartIdUSART1, (uint8_t*)"\r\n", 2);
// Wait for TC flag to be raised for last char
// while(!LL_USART_IsActiveFlag_TC(USART1))
// ;
FURI_CRITICAL_EXIT();
}
void furi_hal_console_printf(const char format[], ...) {
FuriString* string;
va_list args;
va_start(args, format);
string = furi_string_alloc_vprintf(format, args);
va_end(args);
furi_hal_console_tx((const uint8_t*)furi_string_get_cstr(string), furi_string_size(string));
furi_string_free(string);
}
void furi_hal_console_puts(const char* data) {
furi_hal_console_tx((const uint8_t*)data, strlen(data));
}

View File

@ -0,0 +1,37 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*FuriHalConsoleTxCallback)(const uint8_t* buffer, size_t size, void* context);
void furi_hal_console_init();
void furi_hal_console_enable();
void furi_hal_console_disable();
void furi_hal_console_set_tx_callback(FuriHalConsoleTxCallback callback, void* context);
void furi_hal_console_tx(const uint8_t* buffer, size_t buffer_size);
void furi_hal_console_tx_with_new_line(const uint8_t* buffer, size_t buffer_size);
/**
* Printf-like plain uart interface
* @warning Will not work in ISR context
* @param format
* @param ...
*/
void furi_hal_console_printf(const char format[], ...) _ATTRIBUTE((__format__(__printf__, 1, 2)));
void furi_hal_console_puts(const char* data);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,304 @@
#include "furi_string.h"
#include <m-string.h>
struct FuriString {
string_t string;
};
#undef furi_string_alloc_set
#undef furi_string_set
#undef furi_string_cmp
#undef furi_string_cmpi
#undef furi_string_search
#undef furi_string_search_str
#undef furi_string_equal
#undef furi_string_replace
#undef furi_string_replace_str
#undef furi_string_replace_all
#undef furi_string_start_with
#undef furi_string_end_with
#undef furi_string_search_char
#undef furi_string_search_rchar
#undef furi_string_trim
#undef furi_string_cat
FuriString* furi_string_alloc() {
FuriString* string = malloc(sizeof(FuriString));
string_init(string->string);
return string;
}
FuriString* furi_string_alloc_set(const FuriString* s) {
FuriString* string = malloc(sizeof(FuriString)); //-V799
string_init_set(string->string, s->string);
return string;
} //-V773
FuriString* furi_string_alloc_set_str(const char cstr[]) {
FuriString* string = malloc(sizeof(FuriString)); //-V799
string_init_set(string->string, cstr);
return string;
} //-V773
FuriString* furi_string_alloc_printf(const char format[], ...) {
va_list args;
va_start(args, format);
FuriString* string = furi_string_alloc_vprintf(format, args);
va_end(args);
return string;
}
FuriString* furi_string_alloc_vprintf(const char format[], va_list args) {
FuriString* string = malloc(sizeof(FuriString));
string_init_vprintf(string->string, format, args);
return string;
}
FuriString* furi_string_alloc_move(FuriString* s) {
FuriString* string = malloc(sizeof(FuriString));
string_init_move(string->string, s->string);
free(s);
return string;
}
void furi_string_free(FuriString* s) {
string_clear(s->string);
free(s);
}
void furi_string_reserve(FuriString* s, size_t alloc) {
string_reserve(s->string, alloc);
}
void furi_string_reset(FuriString* s) {
string_reset(s->string);
}
void furi_string_swap(FuriString* v1, FuriString* v2) {
string_swap(v1->string, v2->string);
}
void furi_string_move(FuriString* v1, FuriString* v2) {
string_clear(v1->string);
string_init_move(v1->string, v2->string);
free(v2);
}
size_t furi_string_hash(const FuriString* v) {
return string_hash(v->string);
}
char furi_string_get_char(const FuriString* v, size_t index) {
return string_get_char(v->string, index);
}
const char* furi_string_get_cstr(const FuriString* s) {
return string_get_cstr(s->string);
}
void furi_string_set(FuriString* s, FuriString* source) {
string_set(s->string, source->string);
}
void furi_string_set_str(FuriString* s, const char cstr[]) {
string_set(s->string, cstr);
}
void furi_string_set_strn(FuriString* s, const char str[], size_t n) {
string_set_strn(s->string, str, n);
}
void furi_string_set_char(FuriString* s, size_t index, const char c) {
string_set_char(s->string, index, c);
}
int furi_string_cmp(const FuriString* s1, const FuriString* s2) {
return string_cmp(s1->string, s2->string);
}
int furi_string_cmp_str(const FuriString* s1, const char str[]) {
return string_cmp(s1->string, str);
}
int furi_string_cmpi(const FuriString* v1, const FuriString* v2) {
return string_cmpi(v1->string, v2->string);
}
int furi_string_cmpi_str(const FuriString* v1, const char p2[]) {
return string_cmpi_str(v1->string, p2);
}
size_t furi_string_search(const FuriString* v, const FuriString* needle, size_t start) {
return string_search(v->string, needle->string, start);
}
size_t furi_string_search_str(const FuriString* v, const char needle[], size_t start) {
return string_search(v->string, needle, start);
}
bool furi_string_equal(const FuriString* v1, const FuriString* v2) {
return string_equal_p(v1->string, v2->string);
}
bool furi_string_equal_str(const FuriString* v1, const char v2[]) {
return string_equal_p(v1->string, v2);
}
void furi_string_push_back(FuriString* v, char c) {
string_push_back(v->string, c);
}
size_t furi_string_size(const FuriString* s) {
return string_size(s->string);
}
int furi_string_printf(FuriString* v, const char format[], ...) {
va_list args;
va_start(args, format);
int result = furi_string_vprintf(v, format, args);
va_end(args);
return result;
}
int furi_string_vprintf(FuriString* v, const char format[], va_list args) {
return string_vprintf(v->string, format, args);
}
int furi_string_cat_printf(FuriString* v, const char format[], ...) {
va_list args;
va_start(args, format);
int result = furi_string_cat_vprintf(v, format, args);
va_end(args);
return result;
}
int furi_string_cat_vprintf(FuriString* v, const char format[], va_list args) {
FuriString* string = furi_string_alloc();
int ret = furi_string_vprintf(string, format, args);
furi_string_cat(v, string);
furi_string_free(string);
return ret;
}
bool furi_string_empty(const FuriString* v) {
return string_empty_p(v->string);
}
void furi_string_replace_at(FuriString* v, size_t pos, size_t len, const char str2[]) {
string_replace_at(v->string, pos, len, str2);
}
size_t
furi_string_replace(FuriString* string, FuriString* needle, FuriString* replace, size_t start) {
return string_replace(string->string, needle->string, replace->string, start);
}
size_t furi_string_replace_str(FuriString* v, const char str1[], const char str2[], size_t start) {
return string_replace_str(v->string, str1, str2, start);
}
void furi_string_replace_all_str(FuriString* v, const char str1[], const char str2[]) {
string_replace_all_str(v->string, str1, str2);
}
void furi_string_replace_all(FuriString* v, const FuriString* str1, const FuriString* str2) {
string_replace_all(v->string, str1->string, str2->string);
}
bool furi_string_start_with(const FuriString* v, const FuriString* v2) {
return string_start_with_string_p(v->string, v2->string);
}
bool furi_string_start_with_str(const FuriString* v, const char str[]) {
return string_start_with_str_p(v->string, str);
}
bool furi_string_end_with(const FuriString* v, const FuriString* v2) {
return string_end_with_string_p(v->string, v2->string);
}
bool furi_string_end_with_str(const FuriString* v, const char str[]) {
return string_end_with_str_p(v->string, str);
}
size_t furi_string_search_char(const FuriString* v, char c, size_t start) {
return string_search_char(v->string, c, start);
}
size_t furi_string_search_rchar(const FuriString* v, char c, size_t start) {
return string_search_rchar(v->string, c, start);
}
void furi_string_left(FuriString* v, size_t index) {
string_left(v->string, index);
}
void furi_string_right(FuriString* v, size_t index) {
string_right(v->string, index);
}
void furi_string_mid(FuriString* v, size_t index, size_t size) {
string_mid(v->string, index, size);
}
void furi_string_trim(FuriString* v, const char charac[]) {
string_strim(v->string, charac);
}
void furi_string_cat(FuriString* v, const FuriString* v2) {
string_cat(v->string, v2->string);
}
void furi_string_cat_str(FuriString* v, const char str[]) {
string_cat(v->string, str);
}
void furi_string_set_n(FuriString* v, const FuriString* ref, size_t offset, size_t length) {
string_set_n(v->string, ref->string, offset, length);
}
size_t furi_string_utf8_length(FuriString* str) {
return string_length_u(str->string);
}
void furi_string_utf8_push(FuriString* str, FuriStringUnicodeValue u) {
string_push_u(str->string, u);
}
static m_str1ng_utf8_state_e furi_state_to_state(FuriStringUTF8State state) {
switch(state) {
case FuriStringUTF8StateStarting:
return M_STR1NG_UTF8_STARTING;
case FuriStringUTF8StateDecoding1:
return M_STR1NG_UTF8_DECODING_1;
case FuriStringUTF8StateDecoding2:
return M_STR1NG_UTF8_DECODING_2;
case FuriStringUTF8StateDecoding3:
return M_STR1NG_UTF8_DECODING_3;
default:
return M_STR1NG_UTF8_ERROR;
}
}
static FuriStringUTF8State state_to_furi_state(m_str1ng_utf8_state_e state) {
switch(state) {
case M_STR1NG_UTF8_STARTING:
return FuriStringUTF8StateStarting;
case M_STR1NG_UTF8_DECODING_1:
return FuriStringUTF8StateDecoding1;
case M_STR1NG_UTF8_DECODING_2:
return FuriStringUTF8StateDecoding2;
case M_STR1NG_UTF8_DECODING_3:
return FuriStringUTF8StateDecoding3;
default:
return FuriStringUTF8StateError;
}
}
void furi_string_utf8_decode(char c, FuriStringUTF8State* state, FuriStringUnicodeValue* unicode) {
string_unicode_t m_u = *unicode;
m_str1ng_utf8_state_e m_state = furi_state_to_state(*state);
m_str1ng_utf8_decode(c, &m_state, &m_u);
*state = state_to_furi_state(m_state);
*unicode = m_u;
}

View File

@ -0,0 +1,738 @@
/**
* @file string.h
* Furi string primitive
*/
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <m-core.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Furi string failure constant.
*/
#define FURI_STRING_FAILURE ((size_t)-1)
/**
* @brief Furi string primitive.
*/
typedef struct FuriString FuriString;
//---------------------------------------------------------------------------
// Constructors
//---------------------------------------------------------------------------
/**
* @brief Allocate new FuriString.
* @return FuriString*
*/
FuriString* furi_string_alloc();
/**
* @brief Allocate new FuriString and set it to string.
* Allocate & Set the string a to the string.
* @param source
* @return FuriString*
*/
FuriString* furi_string_alloc_set(const FuriString* source);
/**
* @brief Allocate new FuriString and set it to C string.
* Allocate & Set the string a to the C string.
* @param cstr_source
* @return FuriString*
*/
FuriString* furi_string_alloc_set_str(const char cstr_source[]);
/**
* @brief Allocate new FuriString and printf to it.
* Initialize and set a string to the given formatted value.
* @param format
* @param ...
* @return FuriString*
*/
FuriString* furi_string_alloc_printf(const char format[], ...)
_ATTRIBUTE((__format__(__printf__, 1, 2)));
/**
* @brief Allocate new FuriString and printf to it.
* Initialize and set a string to the given formatted value.
* @param format
* @param args
* @return FuriString*
*/
FuriString* furi_string_alloc_vprintf(const char format[], va_list args);
/**
* @brief Allocate new FuriString and move source string content to it.
* Allocate the string, set it to the other one, and destroy the other one.
* @param source
* @return FuriString*
*/
FuriString* furi_string_alloc_move(FuriString* source);
//---------------------------------------------------------------------------
// Destructors
//---------------------------------------------------------------------------
/**
* @brief Free FuriString.
* @param string
*/
void furi_string_free(FuriString* string);
//---------------------------------------------------------------------------
// String memory management
//---------------------------------------------------------------------------
/**
* @brief Reserve memory for string.
* Modify the string capacity to be able to handle at least 'alloc' characters (including final null char).
* @param string
* @param size
*/
void furi_string_reserve(FuriString* string, size_t size);
/**
* @brief Reset string.
* Make the string empty.
* @param s
*/
void furi_string_reset(FuriString* string);
/**
* @brief Swap two strings.
* Swap the two strings string_1 and string_2.
* @param string_1
* @param string_2
*/
void furi_string_swap(FuriString* string_1, FuriString* string_2);
/**
* @brief Move string_2 content to string_1.
* Set the string to the other one, and destroy the other one.
* @param string_1
* @param string_2
*/
void furi_string_move(FuriString* string_1, FuriString* string_2);
/**
* @brief Compute a hash for the string.
* @param string
* @return size_t
*/
size_t furi_string_hash(const FuriString* string);
/**
* @brief Get string size (usually length, but not for UTF-8)
* @param string
* @return size_t
*/
size_t furi_string_size(const FuriString* string);
/**
* @brief Check that string is empty or not
* @param string
* @return bool
*/
bool furi_string_empty(const FuriString* string);
//---------------------------------------------------------------------------
// Getters
//---------------------------------------------------------------------------
/**
* @brief Get the character at the given index.
* Return the selected character of the string.
* @param string
* @param index
* @return char
*/
char furi_string_get_char(const FuriString* string, size_t index);
/**
* @brief Return the string view a classic C string.
* @param string
* @return const char*
*/
const char* furi_string_get_cstr(const FuriString* string);
//---------------------------------------------------------------------------
// Setters
//---------------------------------------------------------------------------
/**
* @brief Set the string to the other string.
* Set the string to the source string.
* @param string
* @param source
*/
void furi_string_set(FuriString* string, FuriString* source);
/**
* @brief Set the string to the other C string.
* Set the string to the source C string.
* @param string
* @param source
*/
void furi_string_set_str(FuriString* string, const char source[]);
/**
* @brief Set the string to the n first characters of the C string.
* @param string
* @param source
* @param length
*/
void furi_string_set_strn(FuriString* string, const char source[], size_t length);
/**
* @brief Set the character at the given index.
* @param string
* @param index
* @param c
*/
void furi_string_set_char(FuriString* string, size_t index, const char c);
/**
* @brief Set the string to the n first characters of other one.
* @param string
* @param source
* @param offset
* @param length
*/
void furi_string_set_n(FuriString* string, const FuriString* source, size_t offset, size_t length);
/**
* @brief Format in the string the given printf format
* @param string
* @param format
* @param ...
* @return int
*/
int furi_string_printf(FuriString* string, const char format[], ...)
_ATTRIBUTE((__format__(__printf__, 2, 3)));
/**
* @brief Format in the string the given printf format
* @param string
* @param format
* @param args
* @return int
*/
int furi_string_vprintf(FuriString* string, const char format[], va_list args);
//---------------------------------------------------------------------------
// Appending
//---------------------------------------------------------------------------
/**
* @brief Append a character to the string.
* @param string
* @param c
*/
void furi_string_push_back(FuriString* string, char c);
/**
* @brief Append a string to the string.
* Concatenate the string with the other string.
* @param string_1
* @param string_2
*/
void furi_string_cat(FuriString* string_1, const FuriString* string_2);
/**
* @brief Append a C string to the string.
* Concatenate the string with the C string.
* @param string_1
* @param cstring_2
*/
void furi_string_cat_str(FuriString* string_1, const char cstring_2[]);
/**
* @brief Append to the string the formatted string of the given printf format.
* @param string
* @param format
* @param ...
* @return int
*/
int furi_string_cat_printf(FuriString* string, const char format[], ...)
_ATTRIBUTE((__format__(__printf__, 2, 3)));
/**
* @brief Append to the string the formatted string of the given printf format.
* @param string
* @param format
* @param args
* @return int
*/
int furi_string_cat_vprintf(FuriString* string, const char format[], va_list args);
//---------------------------------------------------------------------------
// Comparators
//---------------------------------------------------------------------------
/**
* @brief Compare two strings and return the sort order.
* @param string_1
* @param string_2
* @return int
*/
int furi_string_cmp(const FuriString* string_1, const FuriString* string_2);
/**
* @brief Compare string with C string and return the sort order.
* @param string_1
* @param cstring_2
* @return int
*/
int furi_string_cmp_str(const FuriString* string_1, const char cstring_2[]);
/**
* @brief Compare two strings (case insensitive according to the current locale) and return the sort order.
* Note: doesn't work with UTF-8 strings.
* @param string_1
* @param string_2
* @return int
*/
int furi_string_cmpi(const FuriString* string_1, const FuriString* string_2);
/**
* @brief Compare string with C string (case insensitive according to the current locale) and return the sort order.
* Note: doesn't work with UTF-8 strings.
* @param string_1
* @param cstring_2
* @return int
*/
int furi_string_cmpi_str(const FuriString* string_1, const char cstring_2[]);
//---------------------------------------------------------------------------
// Search
//---------------------------------------------------------------------------
/**
* @brief Search the first occurrence of the needle in the string from the position start.
* Return STRING_FAILURE if not found.
* By default, start is zero.
* @param string
* @param needle
* @param start
* @return size_t
*/
size_t furi_string_search(const FuriString* string, const FuriString* needle, size_t start);
/**
* @brief Search the first occurrence of the needle in the string from the position start.
* Return STRING_FAILURE if not found.
* @param string
* @param needle
* @param start
* @return size_t
*/
size_t furi_string_search_str(const FuriString* string, const char needle[], size_t start);
/**
* @brief Search for the position of the character c from the position start (include) in the string.
* Return STRING_FAILURE if not found.
* By default, start is zero.
* @param string
* @param c
* @param start
* @return size_t
*/
size_t furi_string_search_char(const FuriString* string, char c, size_t start);
/**
* @brief Reverse search for the position of the character c from the position start (include) in the string.
* Return STRING_FAILURE if not found.
* By default, start is zero.
* @param string
* @param c
* @param start
* @return size_t
*/
size_t furi_string_search_rchar(const FuriString* string, char c, size_t start);
//---------------------------------------------------------------------------
// Equality
//---------------------------------------------------------------------------
/**
* @brief Test if two strings are equal.
* @param string_1
* @param string_2
* @return bool
*/
bool furi_string_equal(const FuriString* string_1, const FuriString* string_2);
/**
* @brief Test if the string is equal to the C string.
* @param string_1
* @param cstring_2
* @return bool
*/
bool furi_string_equal_str(const FuriString* string_1, const char cstring_2[]);
//---------------------------------------------------------------------------
// Replace
//---------------------------------------------------------------------------
/**
* @brief Replace in the string the sub-string at position 'pos' for 'len' bytes into the C string 'replace'.
* @param string
* @param pos
* @param len
* @param replace
*/
void furi_string_replace_at(FuriString* string, size_t pos, size_t len, const char replace[]);
/**
* @brief Replace a string 'needle' to string 'replace' in a string from 'start' position.
* By default, start is zero.
* Return STRING_FAILURE if 'needle' not found or replace position.
* @param string
* @param needle
* @param replace
* @param start
* @return size_t
*/
size_t
furi_string_replace(FuriString* string, FuriString* needle, FuriString* replace, size_t start);
/**
* @brief Replace a C string 'needle' to C string 'replace' in a string from 'start' position.
* By default, start is zero.
* Return STRING_FAILURE if 'needle' not found or replace position.
* @param string
* @param needle
* @param replace
* @param start
* @return size_t
*/
size_t furi_string_replace_str(
FuriString* string,
const char needle[],
const char replace[],
size_t start);
/**
* @brief Replace all occurrences of 'needle' string into 'replace' string.
* @param string
* @param needle
* @param replace
*/
void furi_string_replace_all(
FuriString* string,
const FuriString* needle,
const FuriString* replace);
/**
* @brief Replace all occurrences of 'needle' C string into 'replace' C string.
* @param string
* @param needle
* @param replace
*/
void furi_string_replace_all_str(FuriString* string, const char needle[], const char replace[]);
//---------------------------------------------------------------------------
// Start / End tests
//---------------------------------------------------------------------------
/**
* @brief Test if the string starts with the given string.
* @param string
* @param start
* @return bool
*/
bool furi_string_start_with(const FuriString* string, const FuriString* start);
/**
* @brief Test if the string starts with the given C string.
* @param string
* @param start
* @return bool
*/
bool furi_string_start_with_str(const FuriString* string, const char start[]);
/**
* @brief Test if the string ends with the given string.
* @param string
* @param end
* @return bool
*/
bool furi_string_end_with(const FuriString* string, const FuriString* end);
/**
* @brief Test if the string ends with the given C string.
* @param string
* @param end
* @return bool
*/
bool furi_string_end_with_str(const FuriString* string, const char end[]);
//---------------------------------------------------------------------------
// Trim
//---------------------------------------------------------------------------
/**
* @brief Trim the string left to the first 'index' bytes.
* @param string
* @param index
*/
void furi_string_left(FuriString* string, size_t index);
/**
* @brief Trim the string right from the 'index' position to the last position.
* @param string
* @param index
*/
void furi_string_right(FuriString* string, size_t index);
/**
* @brief Trim the string from position index to size bytes.
* See also furi_string_set_n.
* @param string
* @param index
* @param size
*/
void furi_string_mid(FuriString* string, size_t index, size_t size);
/**
* @brief Trim a string from the given set of characters (default is " \n\r\t").
* @param string
* @param chars
*/
void furi_string_trim(FuriString* string, const char chars[]);
//---------------------------------------------------------------------------
// UTF8
//---------------------------------------------------------------------------
/**
* @brief An unicode value.
*/
typedef unsigned int FuriStringUnicodeValue;
/**
* @brief Compute the length in UTF8 characters in the string.
* @param string
* @return size_t
*/
size_t furi_string_utf8_length(FuriString* string);
/**
* @brief Push unicode into string, encoding it in UTF8.
* @param string
* @param unicode
*/
void furi_string_utf8_push(FuriString* string, FuriStringUnicodeValue unicode);
/**
* @brief State of the UTF8 decoding machine state.
*/
typedef enum {
FuriStringUTF8StateStarting,
FuriStringUTF8StateDecoding1,
FuriStringUTF8StateDecoding2,
FuriStringUTF8StateDecoding3,
FuriStringUTF8StateError
} FuriStringUTF8State;
/**
* @brief Main generic UTF8 decoder.
* It takes a character, and the previous state and the previous value of the unicode value.
* It updates the state and the decoded unicode value.
* A decoded unicode encoded value is valid only when the state is FuriStringUTF8StateStarting.
* @param c
* @param state
* @param unicode
*/
void furi_string_utf8_decode(char c, FuriStringUTF8State* state, FuriStringUnicodeValue* unicode);
//---------------------------------------------------------------------------
// Lasciate ogne speranza, voi chentrate
//---------------------------------------------------------------------------
/**
*
* Select either the string function or the str function depending on
* the b operand to the function.
* func1 is the string function / func2 is the str function.
*/
/**
* @brief Select for 1 argument
*/
#define FURI_STRING_SELECT1(func1, func2, a) \
_Generic((a), char* : func2, const char* : func2, FuriString* : func1, const FuriString* : func1)(a)
/**
* @brief Select for 2 arguments
*/
#define FURI_STRING_SELECT2(func1, func2, a, b) \
_Generic((b), char* : func2, const char* : func2, FuriString* : func1, const FuriString* : func1)(a, b)
/**
* @brief Select for 3 arguments
*/
#define FURI_STRING_SELECT3(func1, func2, a, b, c) \
_Generic((b), char* : func2, const char* : func2, FuriString* : func1, const FuriString* : func1)(a, b, c)
/**
* @brief Select for 4 arguments
*/
#define FURI_STRING_SELECT4(func1, func2, a, b, c, d) \
_Generic((b), char* : func2, const char* : func2, FuriString* : func1, const FuriString* : func1)(a, b, c, d)
/**
* @brief Allocate new FuriString and set it content to string (or C string).
* ([c]string)
*/
#define furi_string_alloc_set(a) \
FURI_STRING_SELECT1(furi_string_alloc_set, furi_string_alloc_set_str, a)
/**
* @brief Set the string content to string (or C string).
* (string, [c]string)
*/
#define furi_string_set(a, b) FURI_STRING_SELECT2(furi_string_set, furi_string_set_str, a, b)
/**
* @brief Compare string with string (or C string) and return the sort order.
* Note: doesn't work with UTF-8 strings.
* (string, [c]string)
*/
#define furi_string_cmp(a, b) FURI_STRING_SELECT2(furi_string_cmp, furi_string_cmp_str, a, b)
/**
* @brief Compare string with string (or C string) (case insensitive according to the current locale) and return the sort order.
* Note: doesn't work with UTF-8 strings.
* (string, [c]string)
*/
#define furi_string_cmpi(a, b) FURI_STRING_SELECT2(furi_string_cmpi, furi_string_cmpi_str, a, b)
/**
* @brief Test if the string is equal to the string (or C string).
* (string, [c]string)
*/
#define furi_string_equal(a, b) FURI_STRING_SELECT2(furi_string_equal, furi_string_equal_str, a, b)
/**
* @brief Replace all occurrences of string into string (or C string to another C string) in a string.
* (string, [c]string, [c]string)
*/
#define furi_string_replace_all(a, b, c) \
FURI_STRING_SELECT3(furi_string_replace_all, furi_string_replace_all_str, a, b, c)
/**
* @brief Search for a string (or C string) in a string
* (string, [c]string[, start=0])
*/
#define furi_string_search(...) \
M_APPLY( \
FURI_STRING_SELECT3, \
furi_string_search, \
furi_string_search_str, \
M_DEFAULT_ARGS(3, (0), __VA_ARGS__))
/**
* @brief Search for a C string in a string
* (string, cstring[, start=0])
*/
#define furi_string_search_str(...) furi_string_search_str(M_DEFAULT_ARGS(3, (0), __VA_ARGS__))
/**
* @brief Test if the string starts with the given string (or C string).
* (string, [c]string)
*/
#define furi_string_start_with(a, b) \
FURI_STRING_SELECT2(furi_string_start_with, furi_string_start_with_str, a, b)
/**
* @brief Test if the string ends with the given string (or C string).
* (string, [c]string)
*/
#define furi_string_end_with(a, b) \
FURI_STRING_SELECT2(furi_string_end_with, furi_string_end_with_str, a, b)
/**
* @brief Append a string (or C string) to the string.
* (string, [c]string)
*/
#define furi_string_cat(a, b) FURI_STRING_SELECT2(furi_string_cat, furi_string_cat_str, a, b)
/**
* @brief Trim a string from the given set of characters (default is " \n\r\t").
* (string[, set=" \n\r\t"])
*/
#define furi_string_trim(...) furi_string_trim(M_DEFAULT_ARGS(2, (" \n\r\t"), __VA_ARGS__))
/**
* @brief Search for a character in a string.
* (string, character[, start=0])
*/
#define furi_string_search_char(...) furi_string_search_char(M_DEFAULT_ARGS(3, (0), __VA_ARGS__))
/**
* @brief Reverse Search for a character in a string.
* (string, character[, start=0])
*/
#define furi_string_search_rchar(...) furi_string_search_rchar(M_DEFAULT_ARGS(3, (0), __VA_ARGS__))
/**
* @brief Replace a string to another string (or C string to another C string) in a string.
* (string, [c]string, [c]string[, start=0])
*/
#define furi_string_replace(...) \
M_APPLY( \
FURI_STRING_SELECT4, \
furi_string_replace, \
furi_string_replace_str, \
M_DEFAULT_ARGS(4, (0), __VA_ARGS__))
/**
* @brief Replace a C string to another C string in a string.
* (string, cstring, cstring[, start=0])
*/
#define furi_string_replace_str(...) furi_string_replace_str(M_DEFAULT_ARGS(4, (0), __VA_ARGS__))
/**
* @brief INIT OPLIST for FuriString.
*/
#define F_STR_INIT(a) ((a) = furi_string_alloc())
/**
* @brief INIT SET OPLIST for FuriString.
*/
#define F_STR_INIT_SET(a, b) ((a) = furi_string_alloc_set(b))
/**
* @brief INIT MOVE OPLIST for FuriString.
*/
#define F_STR_INIT_MOVE(a, b) ((a) = furi_string_alloc_move(b))
/**
* @brief OPLIST for FuriString.
*/
#define FURI_STRING_OPLIST \
(INIT(F_STR_INIT), \
INIT_SET(F_STR_INIT_SET), \
SET(furi_string_set), \
INIT_MOVE(F_STR_INIT_MOVE), \
MOVE(furi_string_move), \
SWAP(furi_string_swap), \
RESET(furi_string_reset), \
EMPTY_P(furi_string_empty), \
CLEAR(furi_string_free), \
HASH(furi_string_hash), \
EQUAL(furi_string_equal), \
CMP(furi_string_cmp), \
TYPE(FuriString*))
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,202 @@
#include "kernel.h"
#include "base.h"
#include "check.h"
#include "common_defines.h"
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <rom/ets_sys.h>
bool furi_kernel_is_irq_or_masked() {
bool irq = false;
BaseType_t state;
if(FURI_IS_IRQ_MODE()) {
/* Called from interrupt context */
irq = true;
} else {
/* Get FreeRTOS scheduler state */
state = xTaskGetSchedulerState();
if(state != taskSCHEDULER_NOT_STARTED) {
/* Scheduler was started */
if(FURI_IS_IRQ_MASKED()) {
/* Interrupts are masked */
irq = true;
}
}
}
/* Return context, 0: thread context, 1: IRQ context */
return (irq);
}
bool furi_kernel_is_running() {
return xTaskGetSchedulerState() != taskSCHEDULER_RUNNING;
}
int32_t furi_kernel_lock() {
furi_assert(!furi_kernel_is_irq_or_masked());
int32_t lock;
switch(xTaskGetSchedulerState()) {
case taskSCHEDULER_SUSPENDED:
lock = 1;
break;
case taskSCHEDULER_RUNNING:
vTaskSuspendAll();
lock = 0;
break;
case taskSCHEDULER_NOT_STARTED:
default:
lock = (int32_t)FuriStatusError;
break;
}
/* Return previous lock state */
return (lock);
}
int32_t furi_kernel_unlock() {
furi_assert(!furi_kernel_is_irq_or_masked());
int32_t lock;
switch(xTaskGetSchedulerState()) {
case taskSCHEDULER_SUSPENDED:
lock = 1;
if(xTaskResumeAll() != pdTRUE) {
if(xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
lock = (int32_t)FuriStatusError;
}
}
break;
case taskSCHEDULER_RUNNING:
lock = 0;
break;
case taskSCHEDULER_NOT_STARTED:
default:
lock = (int32_t)FuriStatusError;
break;
}
/* Return previous lock state */
return (lock);
}
int32_t furi_kernel_restore_lock(int32_t lock) {
furi_assert(!furi_kernel_is_irq_or_masked());
switch(xTaskGetSchedulerState()) {
case taskSCHEDULER_SUSPENDED:
case taskSCHEDULER_RUNNING:
if(lock == 1) {
vTaskSuspendAll();
} else {
if(lock != 0) {
lock = (int32_t)FuriStatusError;
} else {
if(xTaskResumeAll() != pdTRUE) {
if(xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
lock = (int32_t)FuriStatusError;
}
}
}
}
break;
case taskSCHEDULER_NOT_STARTED:
default:
lock = (int32_t)FuriStatusError;
break;
}
/* Return new lock state */
return (lock);
}
uint32_t furi_kernel_get_tick_frequency() {
/* Return frequency in hertz */
return (configTICK_RATE_HZ_RAW);
}
void furi_delay_tick(uint32_t ticks) {
furi_assert(!furi_kernel_is_irq_or_masked());
if(ticks == 0U) {
taskYIELD();
} else {
vTaskDelay(ticks);
}
}
FuriStatus furi_delay_until_tick(uint32_t tick) {
furi_assert(!furi_kernel_is_irq_or_masked());
TickType_t tcnt, delay;
FuriStatus stat;
stat = FuriStatusOk;
tcnt = xTaskGetTickCount();
/* Determine remaining number of tick to delay */
delay = (TickType_t)tick - tcnt;
/* Check if target tick has not expired */
if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
if(xTaskDelayUntil(&tcnt, delay) == pdFALSE) {
/* Did not delay */
stat = FuriStatusError;
}
} else {
/* No delay or already expired */
stat = FuriStatusErrorParameter;
}
/* Return execution status */
return (stat);
}
uint32_t furi_get_tick() {
TickType_t ticks;
if(furi_kernel_is_irq_or_masked() != 0U) {
ticks = xTaskGetTickCountFromISR();
} else {
ticks = xTaskGetTickCount();
}
return ticks;
}
uint32_t furi_ms_to_ticks(uint32_t milliseconds) {
#if configTICK_RATE_HZ_RAW == 1000
return milliseconds;
#else
return (uint32_t)((float)configTICK_RATE_HZ_RAW) / 1000.0f * (float)milliseconds;
#endif
}
void furi_delay_ms(uint32_t milliseconds) {
if(!FURI_IS_ISR() && xTaskGetSchedulerState() == taskSCHEDULER_RUNNING) {
if(milliseconds > 0 && milliseconds < portMAX_DELAY - 1) {
milliseconds += 1;
}
#if configTICK_RATE_HZ_RAW == 1000
furi_delay_tick(milliseconds);
#else
furi_delay_tick(furi_ms_to_ticks(milliseconds));
#endif
} else if(milliseconds > 0) {
furi_delay_us(milliseconds * 1000);
}
}
void furi_delay_us(uint32_t microseconds) {
ets_delay_us(microseconds);
}

View File

@ -0,0 +1,128 @@
/**
* @file kernel.h
* Furi Kernel primitives
*/
#pragma once
#include "base.h"
#define configTICK_RATE_HZ_RAW 1000
#ifdef __cplusplus
extern "C" {
#endif
/** Check if CPU is in IRQ or kernel running and IRQ is masked
*
* Originally this primitive was born as a workaround for FreeRTOS kernel primitives shenanigans with PRIMASK.
*
* Meaningful use cases are:
*
* - When kernel is started and you want to ensure that you are not in IRQ or IRQ is not masked(like in critical section)
* - When kernel is not started and you want to make sure that you are not in IRQ mode, ignoring PRIMASK.
*
* As you can see there will be edge case when kernel is not started and PRIMASK is not 0 that may cause some funky behavior.
* Most likely it will happen after kernel primitives being used, but control not yet passed to kernel.
* It's up to you to figure out if it is safe for your code or not.
*
* @return true if CPU is in IRQ or kernel running and IRQ is masked
*/
bool furi_kernel_is_irq_or_masked();
/** Check if kernel is running
*
* @return true if running, false otherwise
*/
bool furi_kernel_is_running();
/** Lock kernel, pause process scheduling
*
* @warning This should never be called in interrupt request context.
*
* @return previous lock state(0 - unlocked, 1 - locked)
*/
int32_t furi_kernel_lock();
/** Unlock kernel, resume process scheduling
*
* @warning This should never be called in interrupt request context.
*
* @return previous lock state(0 - unlocked, 1 - locked)
*/
int32_t furi_kernel_unlock();
/** Restore kernel lock state
*
* @warning This should never be called in interrupt request context.
*
* @param[in] lock The lock state
*
* @return new lock state or error
*/
int32_t furi_kernel_restore_lock(int32_t lock);
/** Get kernel systick frequency
*
* @return systick counts per second
*/
uint32_t furi_kernel_get_tick_frequency();
/** Delay execution
*
* @warning This should never be called in interrupt request context.
*
* Also keep in mind delay is aliased to scheduler timer intervals.
*
* @param[in] ticks The ticks count to pause
*/
void furi_delay_tick(uint32_t ticks);
/** Delay until tick
*
* @warning This should never be called in interrupt request context.
*
* @param[in] ticks The tick until which kerel should delay task execution
*
* @return The furi status.
*/
FuriStatus furi_delay_until_tick(uint32_t tick);
/** Get current tick counter
*
* System uptime, may overflow.
*
* @return Current ticks in milliseconds
*/
uint32_t furi_get_tick(void);
/** Convert milliseconds to ticks
*
* @param[in] milliseconds time in milliseconds
* @return time in ticks
*/
uint32_t furi_ms_to_ticks(uint32_t milliseconds);
/** Delay in milliseconds
*
* This method uses kernel ticks on the inside, which causes delay to be aliased to scheduler timer intervals.
* Real wait time will be between X+ milliseconds.
* Special value: 0, will cause task yield.
* Also if used when kernel is not running will fall back to `furi_delay_us`.
*
* @warning Cannot be used from ISR
*
* @param[in] milliseconds milliseconds to wait
*/
void furi_delay_ms(uint32_t milliseconds);
/** Delay in microseconds
*
* Implemented using Cortex DWT counter. Blocking and non aliased.
*
* @param[in] microseconds microseconds to wait
*/
void furi_delay_us(uint32_t microseconds);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,26 @@
// File originated from Flipper Zero / Furi
#pragma once
#include <m-core.h>
#ifdef __cplusplus
extern "C" {
#endif
#define M_INIT_DUP(a) ((a) = strdup(""))
#define M_INIT_SET_DUP(a, b) ((a) = strdup(b))
#define M_SET_DUP(a, b) (free((void*)a), (a) = strdup(b))
#define M_CLEAR_DUP(a) (free((void*)a))
#define M_CSTR_DUP_OPLIST \
(INIT(M_INIT_DUP), \
INIT_SET(M_INIT_SET_DUP), \
SET(M_SET_DUP), \
CLEAR(M_CLEAR_DUP), \
HASH(m_core_cstr_hash), \
EQUAL(M_CSTR_EQUAL), \
CMP(strcmp), \
TYPE(const char*))
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,182 @@
#include "kernel.h"
#include "message_queue.h"
#include "check.h"
#include <freertos/FreeRTOS.h>
#include <freertos/queue.h>
FuriMessageQueue* furi_message_queue_alloc(uint32_t msg_count, uint32_t msg_size) {
furi_assert((furi_kernel_is_irq_or_masked() == 0U) && (msg_count > 0U) && (msg_size > 0U));
QueueHandle_t handle = xQueueCreate(msg_count, msg_size);
furi_check(handle);
return ((FuriMessageQueue*)handle);
}
void furi_message_queue_free(FuriMessageQueue* instance) {
furi_assert(furi_kernel_is_irq_or_masked() == 0U);
furi_assert(instance);
vQueueDelete((QueueHandle_t)instance);
}
FuriStatus
furi_message_queue_put(FuriMessageQueue* instance, const void* msg_ptr, uint32_t timeout) {
QueueHandle_t hQueue = (QueueHandle_t)instance;
FuriStatus stat;
BaseType_t yield;
stat = FuriStatusOk;
if(furi_kernel_is_irq_or_masked() != 0U) {
if((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
stat = FuriStatusErrorParameter;
} else {
yield = pdFALSE;
if(xQueueSendToBackFromISR(hQueue, msg_ptr, &yield) != pdTRUE) {
stat = FuriStatusErrorResource;
} else {
portYIELD_FROM_ISR(yield);
}
}
} else {
if((hQueue == NULL) || (msg_ptr == NULL)) {
stat = FuriStatusErrorParameter;
} else {
if(xQueueSendToBack(hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
if(timeout != 0U) {
stat = FuriStatusErrorTimeout;
} else {
stat = FuriStatusErrorResource;
}
}
}
}
/* Return execution status */
return (stat);
}
FuriStatus furi_message_queue_get(FuriMessageQueue* instance, void* msg_ptr, uint32_t timeout) {
QueueHandle_t hQueue = (QueueHandle_t)instance;
FuriStatus stat;
BaseType_t yield;
stat = FuriStatusOk;
if(furi_kernel_is_irq_or_masked() != 0U) {
if((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
stat = FuriStatusErrorParameter;
} else {
yield = pdFALSE;
if(xQueueReceiveFromISR(hQueue, msg_ptr, &yield) != pdPASS) {
stat = FuriStatusErrorResource;
} else {
portYIELD_FROM_ISR(yield);
}
}
} else {
if((hQueue == NULL) || (msg_ptr == NULL)) {
stat = FuriStatusErrorParameter;
} else {
if(xQueueReceive(hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
if(timeout != 0U) {
stat = FuriStatusErrorTimeout;
} else {
stat = FuriStatusErrorResource;
}
}
}
}
/* Return execution status */
return (stat);
}
uint32_t furi_message_queue_get_capacity(FuriMessageQueue* instance) {
StaticQueue_t* mq = (StaticQueue_t*)instance;
uint32_t capacity;
if(mq == NULL) {
capacity = 0U;
} else {
/* capacity = pxQueue->uxLength */
capacity = mq->uxDummy4[1];
}
/* Return maximum number of messages */
return (capacity);
}
uint32_t furi_message_queue_get_message_size(FuriMessageQueue* instance) {
StaticQueue_t* mq = (StaticQueue_t*)instance;
uint32_t size;
if(mq == NULL) {
size = 0U;
} else {
/* size = pxQueue->uxItemSize */
size = mq->uxDummy4[2];
}
/* Return maximum message size */
return (size);
}
uint32_t furi_message_queue_get_count(FuriMessageQueue* instance) {
QueueHandle_t hQueue = (QueueHandle_t)instance;
UBaseType_t count;
if(hQueue == NULL) {
count = 0U;
} else if(furi_kernel_is_irq_or_masked() != 0U) {
count = uxQueueMessagesWaitingFromISR(hQueue);
} else {
count = uxQueueMessagesWaiting(hQueue);
}
/* Return number of queued messages */
return ((uint32_t)count);
}
uint32_t furi_message_queue_get_space(FuriMessageQueue* instance) {
StaticQueue_t* mq = (StaticQueue_t*)instance;
uint32_t space;
uint32_t isrm;
if(mq == NULL) {
space = 0U;
} else if(furi_kernel_is_irq_or_masked() != 0U) {
isrm = taskENTER_CRITICAL_FROM_ISR();
/* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
space = mq->uxDummy4[1] - mq->uxDummy4[0];
taskEXIT_CRITICAL_FROM_ISR(isrm);
} else {
space = (uint32_t)uxQueueSpacesAvailable((QueueHandle_t)mq);
}
/* Return number of available slots */
return (space);
}
FuriStatus furi_message_queue_reset(FuriMessageQueue* instance) {
QueueHandle_t hQueue = (QueueHandle_t)instance;
FuriStatus stat;
if(furi_kernel_is_irq_or_masked() != 0U) {
stat = FuriStatusErrorISR;
} else if(hQueue == NULL) {
stat = FuriStatusErrorParameter;
} else {
stat = FuriStatusOk;
(void)xQueueReset(hQueue);
}
/* Return execution status */
return (stat);
}

View File

@ -0,0 +1,95 @@
/**
* @file message_queue.h
* FuriMessageQueue
*/
#pragma once
#include "base.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void FuriMessageQueue;
/** Allocate furi message queue
*
* @param[in] msg_count The message count
* @param[in] msg_size The message size
*
* @return pointer to FuriMessageQueue instance
*/
FuriMessageQueue* furi_message_queue_alloc(uint32_t msg_count, uint32_t msg_size);
/** Free queue
*
* @param instance pointer to FuriMessageQueue instance
*/
void furi_message_queue_free(FuriMessageQueue* instance);
/** Put message into queue
*
* @param instance pointer to FuriMessageQueue instance
* @param[in] msg_ptr The message pointer
* @param[in] timeout The timeout
* @param[in] msg_prio The message prio
*
* @return The furi status.
*/
FuriStatus
furi_message_queue_put(FuriMessageQueue* instance, const void* msg_ptr, uint32_t timeout);
/** Get message from queue
*
* @param instance pointer to FuriMessageQueue instance
* @param msg_ptr The message pointer
* @param msg_prio The message prioority
* @param[in] timeout The timeout
*
* @return The furi status.
*/
FuriStatus furi_message_queue_get(FuriMessageQueue* instance, void* msg_ptr, uint32_t timeout);
/** Get queue capacity
*
* @param instance pointer to FuriMessageQueue instance
*
* @return capacity in object count
*/
uint32_t furi_message_queue_get_capacity(FuriMessageQueue* instance);
/** Get message size
*
* @param instance pointer to FuriMessageQueue instance
*
* @return Message size in bytes
*/
uint32_t furi_message_queue_get_message_size(FuriMessageQueue* instance);
/** Get message count in queue
*
* @param instance pointer to FuriMessageQueue instance
*
* @return Message count
*/
uint32_t furi_message_queue_get_count(FuriMessageQueue* instance);
/** Get queue available space
*
* @param instance pointer to FuriMessageQueue instance
*
* @return Message count
*/
uint32_t furi_message_queue_get_space(FuriMessageQueue* instance);
/** Reset queue
*
* @param instance pointer to FuriMessageQueue instance
*
* @return The furi status.
*/
FuriStatus furi_message_queue_reset(FuriMessageQueue* instance);
#ifdef __cplusplus
}
#endif

125
components/furi/src/mutex.c Normal file
View File

@ -0,0 +1,125 @@
#include "mutex.h"
#include "check.h"
#include "common_defines.h"
#include <freertos/FreeRTOS.h>
#include <freertos/semphr.h>
FuriMutex* furi_mutex_alloc(FuriMutexType type) {
furi_assert(!FURI_IS_IRQ_MODE());
SemaphoreHandle_t hMutex = NULL;
if(type == FuriMutexTypeNormal) {
hMutex = xSemaphoreCreateMutex();
} else if(type == FuriMutexTypeRecursive) {
hMutex = xSemaphoreCreateRecursiveMutex();
} else {
furi_crash("Programming error");
}
furi_check(hMutex != NULL);
if(type == FuriMutexTypeRecursive) {
/* Set LSB as 'recursive mutex flag' */
hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
}
/* Return mutex ID */
return ((FuriMutex*)hMutex);
}
void furi_mutex_free(FuriMutex* instance) {
furi_assert(!FURI_IS_IRQ_MODE());
furi_assert(instance);
vSemaphoreDelete((SemaphoreHandle_t)((uint32_t)instance & ~1U));
}
FuriStatus furi_mutex_acquire(FuriMutex* instance, uint32_t timeout) {
SemaphoreHandle_t hMutex;
FuriStatus stat;
uint32_t rmtx;
hMutex = (SemaphoreHandle_t)((uint32_t)instance & ~1U);
/* Extract recursive mutex flag */
rmtx = (uint32_t)instance & 1U;
stat = FuriStatusOk;
if(FURI_IS_IRQ_MODE()) {
stat = FuriStatusErrorISR;
} else if(hMutex == NULL) {
stat = FuriStatusErrorParameter;
} else {
if(rmtx != 0U) {
if(xSemaphoreTakeRecursive(hMutex, timeout) != pdPASS) {
if(timeout != 0U) {
stat = FuriStatusErrorTimeout;
} else {
stat = FuriStatusErrorResource;
}
}
} else {
if(xSemaphoreTake(hMutex, timeout) != pdPASS) {
if(timeout != 0U) {
stat = FuriStatusErrorTimeout;
} else {
stat = FuriStatusErrorResource;
}
}
}
}
/* Return execution status */
return (stat);
}
FuriStatus furi_mutex_release(FuriMutex* instance) {
SemaphoreHandle_t hMutex;
FuriStatus stat;
uint32_t rmtx;
hMutex = (SemaphoreHandle_t)((uint32_t)instance & ~1U);
/* Extract recursive mutex flag */
rmtx = (uint32_t)instance & 1U;
stat = FuriStatusOk;
if(FURI_IS_IRQ_MODE()) {
stat = FuriStatusErrorISR;
} else if(hMutex == NULL) {
stat = FuriStatusErrorParameter;
} else {
if(rmtx != 0U) {
if(xSemaphoreGiveRecursive(hMutex) != pdPASS) {
stat = FuriStatusErrorResource;
}
} else {
if(xSemaphoreGive(hMutex) != pdPASS) {
stat = FuriStatusErrorResource;
}
}
}
/* Return execution status */
return (stat);
}
FuriThreadId furi_mutex_get_owner(FuriMutex* instance) {
SemaphoreHandle_t hMutex;
FuriThreadId owner;
hMutex = (SemaphoreHandle_t)((uint32_t)instance & ~1U);
if((FURI_IS_IRQ_MODE()) || (hMutex == NULL)) {
owner = 0;
} else {
owner = (FuriThreadId)xSemaphoreGetMutexHolder(hMutex);
}
/* Return owner thread ID */
return (owner);
}

View File

@ -0,0 +1,62 @@
/**
* @file mutex.h
* FuriMutex
*/
#pragma once
#include "base.h"
#include "thread.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
FuriMutexTypeNormal,
FuriMutexTypeRecursive,
} FuriMutexType;
typedef void FuriMutex;
/** Allocate FuriMutex
*
* @param[in] type The mutex type
*
* @return pointer to FuriMutex instance
*/
FuriMutex* furi_mutex_alloc(FuriMutexType type);
/** Free FuriMutex
*
* @param instance The pointer to FuriMutex instance
*/
void furi_mutex_free(FuriMutex* instance);
/** Acquire mutex
*
* @param instance The pointer to FuriMutex instance
* @param[in] timeout The timeout
*
* @return The furi status.
*/
FuriStatus furi_mutex_acquire(FuriMutex* instance, uint32_t timeout);
/** Release mutex
*
* @param instance The pointer to FuriMutex instance
*
* @return The furi status.
*/
FuriStatus furi_mutex_release(FuriMutex* instance);
/** Get mutex owner thread id
*
* @param instance The pointer to FuriMutex instance
*
* @return The furi thread identifier.
*/
FuriThreadId furi_mutex_get_owner(FuriMutex* instance);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,94 @@
#include "pubsub.h"
#include "check.h"
#include "mutex.h"
#include <m-list.h>
struct FuriPubSubSubscription {
FuriPubSubCallback callback;
void* callback_context;
};
LIST_DEF(FuriPubSubSubscriptionList, FuriPubSubSubscription, M_POD_OPLIST);
struct FuriPubSub {
FuriPubSubSubscriptionList_t items;
FuriMutex* mutex;
};
FuriPubSub* furi_pubsub_alloc() {
FuriPubSub* pubsub = malloc(sizeof(FuriPubSub));
pubsub->mutex = furi_mutex_alloc(FuriMutexTypeNormal);
furi_assert(pubsub->mutex);
FuriPubSubSubscriptionList_init(pubsub->items);
return pubsub;
}
void furi_pubsub_free(FuriPubSub* pubsub) {
furi_assert(pubsub);
furi_check(FuriPubSubSubscriptionList_size(pubsub->items) == 0);
FuriPubSubSubscriptionList_clear(pubsub->items);
furi_mutex_free(pubsub->mutex);
free(pubsub);
}
FuriPubSubSubscription*
furi_pubsub_subscribe(FuriPubSub* pubsub, FuriPubSubCallback callback, void* callback_context) {
furi_check(furi_mutex_acquire(pubsub->mutex, FuriWaitForever) == FuriStatusOk);
// put uninitialized item to the list
FuriPubSubSubscription* item = FuriPubSubSubscriptionList_push_raw(pubsub->items);
// initialize item
item->callback = callback;
item->callback_context = callback_context;
furi_check(furi_mutex_release(pubsub->mutex) == FuriStatusOk);
return item;
}
void furi_pubsub_unsubscribe(FuriPubSub* pubsub, FuriPubSubSubscription* pubsub_subscription) {
furi_assert(pubsub);
furi_assert(pubsub_subscription);
furi_check(furi_mutex_acquire(pubsub->mutex, FuriWaitForever) == FuriStatusOk);
bool result = false;
// iterate over items
FuriPubSubSubscriptionList_it_t it;
for(FuriPubSubSubscriptionList_it(it, pubsub->items); !FuriPubSubSubscriptionList_end_p(it);
FuriPubSubSubscriptionList_next(it)) {
const FuriPubSubSubscription* item = FuriPubSubSubscriptionList_cref(it);
// if the iterator is equal to our element
if(item == pubsub_subscription) {
FuriPubSubSubscriptionList_remove(pubsub->items, it);
result = true;
break;
}
}
furi_check(furi_mutex_release(pubsub->mutex) == FuriStatusOk);
furi_check(result);
}
void furi_pubsub_publish(FuriPubSub* pubsub, void* message) {
furi_check(furi_mutex_acquire(pubsub->mutex, FuriWaitForever) == FuriStatusOk);
// iterate over subscribers
FuriPubSubSubscriptionList_it_t it;
for(FuriPubSubSubscriptionList_it(it, pubsub->items); !FuriPubSubSubscriptionList_end_p(it);
FuriPubSubSubscriptionList_next(it)) {
const FuriPubSubSubscription* item = FuriPubSubSubscriptionList_cref(it);
item->callback(message, item->callback_context);
}
furi_check(furi_mutex_release(pubsub->mutex) == FuriStatusOk);
}

View File

@ -0,0 +1,68 @@
/**
* @file pubsub.h
* FuriPubSub
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/** FuriPubSub Callback type */
typedef void (*FuriPubSubCallback)(const void* message, void* context);
/** FuriPubSub type */
typedef struct FuriPubSub FuriPubSub;
/** FuriPubSubSubscription type */
typedef struct FuriPubSubSubscription FuriPubSubSubscription;
/** Allocate FuriPubSub
*
* Reentrable, Not threadsafe, one owner
*
* @return pointer to FuriPubSub instance
*/
FuriPubSub* furi_pubsub_alloc();
/** Free FuriPubSub
*
* @param pubsub FuriPubSub instance
*/
void furi_pubsub_free(FuriPubSub* pubsub);
/** Subscribe to FuriPubSub
*
* Threadsafe, Reentrable
*
* @param pubsub pointer to FuriPubSub instance
* @param[in] callback The callback
* @param callback_context The callback context
*
* @return pointer to FuriPubSubSubscription instance
*/
FuriPubSubSubscription*
furi_pubsub_subscribe(FuriPubSub* pubsub, FuriPubSubCallback callback, void* callback_context);
/** Unsubscribe from FuriPubSub
*
* No use of `pubsub_subscription` allowed after call of this method
* Threadsafe, Reentrable.
*
* @param pubsub pointer to FuriPubSub instance
* @param pubsub_subscription pointer to FuriPubSubSubscription instance
*/
void furi_pubsub_unsubscribe(FuriPubSub* pubsub, FuriPubSubSubscription* pubsub_subscription);
/** Publish message to FuriPubSub
*
* Threadsafe, Reentrable.
*
* @param pubsub pointer to FuriPubSub instance
* @param message message pointer to publish
*/
void furi_pubsub_publish(FuriPubSub* pubsub, void* message);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,145 @@
#include "record.h"
#include "check.h"
#include "mutex.h"
#include "event_flag.h"
#include <m-dict.h>
#include "m_cstr_dup.h"
#define FURI_RECORD_FLAG_READY (0x1)
typedef struct {
FuriEventFlag* flags;
void* data;
size_t holders_count;
} FuriRecordData;
DICT_DEF2(FuriRecordDataDict, const char*, M_CSTR_DUP_OPLIST, FuriRecordData, M_POD_OPLIST)
typedef struct {
FuriMutex* mutex;
FuriRecordDataDict_t records;
} FuriRecord;
static FuriRecord* furi_record = NULL;
static FuriRecordData* furi_record_get(const char* name) {
return FuriRecordDataDict_get(furi_record->records, name);
}
static void furi_record_put(const char* name, FuriRecordData* record_data) {
FuriRecordDataDict_set_at(furi_record->records, name, *record_data);
}
static void furi_record_erase(const char* name, FuriRecordData* record_data) {
furi_event_flag_free(record_data->flags);
FuriRecordDataDict_erase(furi_record->records, name);
}
void furi_record_init() {
furi_record = malloc(sizeof(FuriRecord));
furi_record->mutex = furi_mutex_alloc(FuriMutexTypeNormal);
furi_check(furi_record->mutex);
FuriRecordDataDict_init(furi_record->records);
}
static FuriRecordData* furi_record_data_get_or_create(const char* name) {
furi_assert(furi_record);
FuriRecordData* record_data = furi_record_get(name);
if(!record_data) {
FuriRecordData new_record;
new_record.flags = furi_event_flag_alloc();
new_record.data = NULL;
new_record.holders_count = 0;
furi_record_put(name, &new_record);
record_data = furi_record_get(name);
}
return record_data;
}
static void furi_record_lock() {
furi_check(furi_mutex_acquire(furi_record->mutex, FuriWaitForever) == FuriStatusOk);
}
static void furi_record_unlock() {
furi_check(furi_mutex_release(furi_record->mutex) == FuriStatusOk);
}
bool furi_record_exists(const char* name) {
furi_assert(furi_record);
furi_assert(name);
bool ret = false;
furi_record_lock();
ret = (furi_record_get(name) != NULL);
furi_record_unlock();
return ret;
}
void furi_record_create(const char* name, void* data) {
furi_assert(furi_record);
furi_record_lock();
// Get record data and fill it
FuriRecordData* record_data = furi_record_data_get_or_create(name);
furi_assert(record_data->data == NULL);
record_data->data = data;
furi_event_flag_set(record_data->flags, FURI_RECORD_FLAG_READY);
furi_record_unlock();
}
bool furi_record_destroy(const char* name) {
furi_assert(furi_record);
bool ret = false;
furi_record_lock();
FuriRecordData* record_data = furi_record_get(name);
furi_assert(record_data);
if(record_data->holders_count == 0) {
furi_record_erase(name, record_data);
ret = true;
}
furi_record_unlock();
return ret;
}
void* furi_record_open(const char* name) {
furi_assert(furi_record);
furi_record_lock();
FuriRecordData* record_data = furi_record_data_get_or_create(name);
record_data->holders_count++;
furi_record_unlock();
// Wait for record to become ready
furi_check(
furi_event_flag_wait(
record_data->flags,
FURI_RECORD_FLAG_READY,
FuriFlagWaitAny | FuriFlagNoClear,
FuriWaitForever) == FURI_RECORD_FLAG_READY);
return record_data->data;
}
void furi_record_close(const char* name) {
furi_assert(furi_record);
furi_record_lock();
FuriRecordData* record_data = furi_record_get(name);
furi_assert(record_data);
record_data->holders_count--;
furi_record_unlock();
}

View File

@ -0,0 +1,67 @@
/**
* @file record.h
* Furi: record API
*/
#pragma once
#include <stdbool.h>
#include "core_defines.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Initialize record storage For internal use only.
*/
void furi_record_init();
/** Check if record exists
*
* @param name record name
* @note Thread safe. Create and destroy must be executed from the same
* thread.
*/
bool furi_record_exists(const char* name);
/** Create record
*
* @param name record name
* @param data data pointer
* @note Thread safe. Create and destroy must be executed from the same
* thread.
*/
void furi_record_create(const char* name, void* data);
/** Destroy record
*
* @param name record name
*
* @return true if successful, false if still have holders or thread is not
* owner.
* @note Thread safe. Create and destroy must be executed from the same
* thread.
*/
bool furi_record_destroy(const char* name);
/** Open record
*
* @param name record name
*
* @return pointer to the record
* @note Thread safe. Open and close must be executed from the same
* thread. Suspends caller thread till record is available
*/
FURI_RETURNS_NONNULL void* furi_record_open(const char* name);
/** Close record
*
* @param name record name
* @note Thread safe. Open and close must be executed from the same
* thread.
*/
void furi_record_close(const char* name);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,116 @@
#include "semaphore.h"
#include "check.h"
#include "common_defines.h"
#include <freertos/FreeRTOS.h>
#include <freertos/semphr.h>
FuriSemaphore* furi_semaphore_alloc(uint32_t max_count, uint32_t initial_count) {
furi_assert(!FURI_IS_IRQ_MODE());
furi_assert((max_count > 0U) && (initial_count <= max_count));
SemaphoreHandle_t hSemaphore = NULL;
if(max_count == 1U) {
hSemaphore = xSemaphoreCreateBinary();
if((hSemaphore != NULL) && (initial_count != 0U)) {
if(xSemaphoreGive(hSemaphore) != pdPASS) {
vSemaphoreDelete(hSemaphore);
hSemaphore = NULL;
}
}
} else {
hSemaphore = xSemaphoreCreateCounting(max_count, initial_count);
}
furi_check(hSemaphore);
/* Return semaphore ID */
return ((FuriSemaphore*)hSemaphore);
}
void furi_semaphore_free(FuriSemaphore* instance) {
furi_assert(instance);
furi_assert(!FURI_IS_IRQ_MODE());
SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)instance;
vSemaphoreDelete(hSemaphore);
}
FuriStatus furi_semaphore_acquire(FuriSemaphore* instance, uint32_t timeout) {
furi_assert(instance);
SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)instance;
FuriStatus stat;
BaseType_t yield;
stat = FuriStatusOk;
if(FURI_IS_IRQ_MODE()) {
if(timeout != 0U) {
stat = FuriStatusErrorParameter;
} else {
yield = pdFALSE;
if(xSemaphoreTakeFromISR(hSemaphore, &yield) != pdPASS) {
stat = FuriStatusErrorResource;
} else {
portYIELD_FROM_ISR(yield);
}
}
} else {
if(xSemaphoreTake(hSemaphore, (TickType_t)timeout) != pdPASS) {
if(timeout != 0U) {
stat = FuriStatusErrorTimeout;
} else {
stat = FuriStatusErrorResource;
}
}
}
/* Return execution status */
return (stat);
}
FuriStatus furi_semaphore_release(FuriSemaphore* instance) {
furi_assert(instance);
SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)instance;
FuriStatus stat;
BaseType_t yield;
stat = FuriStatusOk;
if(FURI_IS_IRQ_MODE()) {
yield = pdFALSE;
if(xSemaphoreGiveFromISR(hSemaphore, &yield) != pdTRUE) {
stat = FuriStatusErrorResource;
} else {
portYIELD_FROM_ISR(yield);
}
} else {
if(xSemaphoreGive(hSemaphore) != pdPASS) {
stat = FuriStatusErrorResource;
}
}
/* Return execution status */
return (stat);
}
//uint32_t furi_semaphore_get_count(FuriSemaphore* instance) {
// furi_assert(instance);
//
// SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)instance;
// uint32_t count;
//
// if(FURI_IS_IRQ_MODE()) {
// count = (uint32_t)uxSemaphoreGetCountFromISR(hSemaphore);
// } else {
// count = (uint32_t)uxSemaphoreGetCount(hSemaphore);
// }
//
// /* Return number of tokens */
// return (count);
//}

View File

@ -0,0 +1,58 @@
/**
* @file semaphore.h
* FuriSemaphore
*/
#pragma once
#include "base.h"
#include "thread.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void FuriSemaphore;
/** Allocate semaphore
*
* @param[in] max_count The maximum count
* @param[in] initial_count The initial count
*
* @return pointer to FuriSemaphore instance
*/
FuriSemaphore* furi_semaphore_alloc(uint32_t max_count, uint32_t initial_count);
/** Free semaphore
*
* @param instance The pointer to FuriSemaphore instance
*/
void furi_semaphore_free(FuriSemaphore* instance);
/** Acquire semaphore
*
* @param instance The pointer to FuriSemaphore instance
* @param[in] timeout The timeout
*
* @return The furi status.
*/
FuriStatus furi_semaphore_acquire(FuriSemaphore* instance, uint32_t timeout);
/** Release semaphore
*
* @param instance The pointer to FuriSemaphore instance
*
* @return The furi status.
*/
FuriStatus furi_semaphore_release(FuriSemaphore* instance);
///** Get semaphore count
// *
// * @param instance The pointer to FuriSemaphore instance
// *
// * @return Semaphore count
// */
//uint32_t furi_semaphore_get_count(FuriSemaphore* instance);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,86 @@
#include "base.h"
#include "check.h"
#include "stream_buffer.h"
#include "common_defines.h"
#include <freertos/FreeRTOS.h>
#include <freertos/stream_buffer.h>
FuriStreamBuffer* furi_stream_buffer_alloc(size_t size, size_t trigger_level) {
furi_assert(size != 0);
StreamBufferHandle_t handle = xStreamBufferCreate(size, trigger_level);
furi_check(handle);
return handle;
};
void furi_stream_buffer_free(FuriStreamBuffer* stream_buffer) {
furi_assert(stream_buffer);
vStreamBufferDelete(stream_buffer);
};
bool furi_stream_set_trigger_level(FuriStreamBuffer* stream_buffer, size_t trigger_level) {
furi_assert(stream_buffer);
return xStreamBufferSetTriggerLevel(stream_buffer, trigger_level) == pdTRUE;
};
size_t furi_stream_buffer_send(
FuriStreamBuffer* stream_buffer,
const void* data,
size_t length,
uint32_t timeout) {
size_t ret;
if(FURI_IS_IRQ_MODE()) {
BaseType_t yield;
ret = xStreamBufferSendFromISR(stream_buffer, data, length, &yield);
portYIELD_FROM_ISR(yield);
} else {
ret = xStreamBufferSend(stream_buffer, data, length, timeout);
}
return ret;
};
size_t furi_stream_buffer_receive(
FuriStreamBuffer* stream_buffer,
void* data,
size_t length,
uint32_t timeout) {
size_t ret;
if(FURI_IS_IRQ_MODE()) {
BaseType_t yield;
ret = xStreamBufferReceiveFromISR(stream_buffer, data, length, &yield);
portYIELD_FROM_ISR(yield);
} else {
ret = xStreamBufferReceive(stream_buffer, data, length, timeout);
}
return ret;
}
size_t furi_stream_buffer_bytes_available(FuriStreamBuffer* stream_buffer) {
return xStreamBufferBytesAvailable(stream_buffer);
};
size_t furi_stream_buffer_spaces_available(FuriStreamBuffer* stream_buffer) {
return xStreamBufferSpacesAvailable(stream_buffer);
};
bool furi_stream_buffer_is_full(FuriStreamBuffer* stream_buffer) {
return xStreamBufferIsFull(stream_buffer) == pdTRUE;
};
bool furi_stream_buffer_is_empty(FuriStreamBuffer* stream_buffer) {
return (xStreamBufferIsEmpty(stream_buffer) == pdTRUE);
};
FuriStatus furi_stream_buffer_reset(FuriStreamBuffer* stream_buffer) {
if(xStreamBufferReset(stream_buffer) == pdPASS) {
return FuriStatusOk;
} else {
return FuriStatusError;
}
}

View File

@ -0,0 +1,152 @@
/**
* @file stream_buffer.h
* Furi stream buffer primitive.
*
* Stream buffers are used to send a continuous stream of data from one task or
* interrupt to another. Their implementation is light weight, making them
* particularly suited for interrupt to task and core to core communication
* scenarios.
*
* ***NOTE***: Stream buffer implementation assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader).
*/
#pragma once
#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void FuriStreamBuffer;
/**
* @brief Allocate stream buffer instance.
* Stream buffer implementation assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader).
*
* @param size The total number of bytes the stream buffer will be able to hold at any one time.
* @param trigger_level The number of bytes that must be in the stream buffer
* before a task that is blocked on the stream buffer to wait for data is moved out of the blocked state.
* @return The stream buffer instance.
*/
FuriStreamBuffer* furi_stream_buffer_alloc(size_t size, size_t trigger_level);
/**
* @brief Free stream buffer instance
*
* @param stream_buffer The stream buffer instance.
*/
void furi_stream_buffer_free(FuriStreamBuffer* stream_buffer);
/**
* @brief Set trigger level for stream buffer.
* A stream buffer's trigger level is the number of bytes that must be in the
* stream buffer before a task that is blocked on the stream buffer to
* wait for data is moved out of the blocked state.
*
* @param stream_buffer The stream buffer instance
* @param trigger_level The new trigger level for the stream buffer.
* @return true if trigger level can be be updated (new trigger level was less than or equal to the stream buffer's length).
* @return false if trigger level can't be be updated (new trigger level was greater than the stream buffer's length).
*/
bool furi_stream_set_trigger_level(FuriStreamBuffer* stream_buffer, size_t trigger_level);
/**
* @brief Sends bytes to a stream buffer. The bytes are copied into the stream buffer.
* Wakes up task waiting for data to become available if called from ISR.
*
* @param stream_buffer The stream buffer instance.
* @param data A pointer to the data that is to be copied into the stream buffer.
* @param length The maximum number of bytes to copy from data into the stream buffer.
* @param timeout The maximum amount of time the task should remain in the
* Blocked state to wait for space to become available if the stream buffer is full.
* Will return immediately if timeout is zero.
* Setting timeout to FuriWaitForever will cause the task to wait indefinitely.
* Ignored if called from ISR.
* @return The number of bytes actually written to the stream buffer.
*/
size_t furi_stream_buffer_send(
FuriStreamBuffer* stream_buffer,
const void* data,
size_t length,
uint32_t timeout);
/**
* @brief Receives bytes from a stream buffer.
* Wakes up task waiting for space to become available if called from ISR.
*
* @param stream_buffer The stream buffer instance.
* @param data A pointer to the buffer into which the received bytes will be
* copied.
* @param length The length of the buffer pointed to by the data parameter.
* @param timeout The maximum amount of time the task should remain in the
* Blocked state to wait for data to become available if the stream buffer is empty.
* Will return immediately if timeout is zero.
* Setting timeout to FuriWaitForever will cause the task to wait indefinitely.
* Ignored if called from ISR.
* @return The number of bytes read from the stream buffer, if any.
*/
size_t furi_stream_buffer_receive(
FuriStreamBuffer* stream_buffer,
void* data,
size_t length,
uint32_t timeout);
/**
* @brief Queries a stream buffer to see how much data it contains, which is equal to
* the number of bytes that can be read from the stream buffer before the stream
* buffer would be empty.
*
* @param stream_buffer The stream buffer instance.
* @return The number of bytes that can be read from the stream buffer before
* the stream buffer would be empty.
*/
size_t furi_stream_buffer_bytes_available(FuriStreamBuffer* stream_buffer);
/**
* @brief Queries a stream buffer to see how much free space it contains, which is
* equal to the amount of data that can be sent to the stream buffer before it
* is full.
*
* @param stream_buffer The stream buffer instance.
* @return The number of bytes that can be written to the stream buffer before
* the stream buffer would be full.
*/
size_t furi_stream_buffer_spaces_available(FuriStreamBuffer* stream_buffer);
/**
* @brief Queries a stream buffer to see if it is full.
*
* @param stream_buffer stream buffer instance.
* @return true if the stream buffer is full.
* @return false if the stream buffer is not full.
*/
bool furi_stream_buffer_is_full(FuriStreamBuffer* stream_buffer);
/**
* @brief Queries a stream buffer to see if it is empty.
*
* @param stream_buffer The stream buffer instance.
* @return true if the stream buffer is empty.
* @return false if the stream buffer is not empty.
*/
bool furi_stream_buffer_is_empty(FuriStreamBuffer* stream_buffer);
/**
* @brief Resets a stream buffer to its initial, empty, state. Any data that was
* in the stream buffer is discarded. A stream buffer can only be reset if there
* are no tasks blocked waiting to either send to or receive from the stream buffer.
*
* @param stream_buffer The stream buffer instance.
* @return FuriStatusOk if the stream buffer is reset.
* @return FuriStatusError if there was a task blocked waiting to send to or read
* from the stream buffer then the stream buffer is not reset.
*/
FuriStatus furi_stream_buffer_reset(FuriStreamBuffer* stream_buffer);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,655 @@
#include "thread.h"
#include "kernel.h"
#include "check.h"
#include "common_defines.h"
#include "furi_string.h"
#include <esp_log.h>
#include <furi_hal_console.h>
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#define TAG "FuriThread"
#define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
typedef struct FuriThreadStdout FuriThreadStdout;
struct FuriThreadStdout {
FuriThreadStdoutWriteCallback write_callback;
FuriString* buffer;
};
struct FuriThread {
FuriThreadState state;
int32_t ret;
FuriThreadCallback callback;
void* context;
FuriThreadStateCallback state_callback;
void* state_context;
char* name;
char* appid;
FuriThreadPriority priority;
TaskHandle_t task_handle;
size_t heap_size;
FuriThreadStdout output;
// Keep all non-alignable byte types in one place,
// this ensures that the size of this structure is minimal
bool is_service;
bool heap_trace_enabled;
configSTACK_DEPTH_TYPE stack_size;
};
static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size);
static int32_t __furi_thread_stdout_flush(FuriThread* thread);
/** Catch threads that are trying to exit wrong way */
__attribute__((__noreturn__)) void furi_thread_catch() { //-V1082
// If you're here it means you're probably doing something wrong
// with critical sections or with scheduler state
asm volatile("nop"); // extra magic
furi_crash("You are doing it wrong"); //-V779
__builtin_unreachable();
}
static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
furi_assert(thread);
thread->state = state;
if(thread->state_callback) {
thread->state_callback(state, thread->state_context);
}
}
static void furi_thread_body(void* context) {
furi_assert(context);
FuriThread* thread = context;
// store thread instance to thread local storage
furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) == NULL);
vTaskSetThreadLocalStoragePointer(NULL, 0, thread);
furi_assert(thread->state == FuriThreadStateStarting);
furi_thread_set_state(thread, FuriThreadStateRunning);
TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
// if(thread->heap_trace_enabled == true) {
// memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
// }
thread->ret = thread->callback(thread->context);
// if(thread->heap_trace_enabled == true) {
// furi_delay_ms(33);
// thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
// furi_log_print_format(
// thread->heap_size ? FuriLogLevelError : FuriLogLevelInfo,
// TAG,
// "%s allocation balance: %zu",
// thread->name ? thread->name : "Thread",
// thread->heap_size);
// memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
// }
furi_assert(thread->state == FuriThreadStateRunning);
if(thread->is_service) {
ESP_LOGI(
TAG,
"%s service thread TCB memory will not be reclaimed",
thread->name ? thread->name : "<unnamed service>");
}
// flush stdout
__furi_thread_stdout_flush(thread);
furi_thread_set_state(thread, FuriThreadStateStopped);
vTaskDelete(NULL);
furi_thread_catch();
}
FuriThread* furi_thread_alloc() {
FuriThread* thread = malloc(sizeof(FuriThread));
// TODO: create default struct instead of using memset()
memset(thread, 0, sizeof(FuriThread));
thread->output.buffer = furi_string_alloc();
thread->is_service = false;
FuriThread* parent = NULL;
if(xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
// TLS is not available, if we called not from thread context
parent = pvTaskGetThreadLocalStoragePointer(NULL, 0);
if(parent && parent->appid) {
furi_thread_set_appid(thread, parent->appid);
} else {
furi_thread_set_appid(thread, "unknown");
}
} else {
// if scheduler is not started, we are starting driver thread
furi_thread_set_appid(thread, "driver");
}
/*FuriHalRtcHeapTrackMode mode = furi_hal_rtc_get_heap_track_mode();
if(mode == FuriHalRtcHeapTrackModeAll) {
thread->heap_trace_enabled = true;
} else if(mode == FuriHalRtcHeapTrackModeTree && furi_thread_get_current_id()) {
if(parent) thread->heap_trace_enabled = parent->heap_trace_enabled;
} else */{
thread->heap_trace_enabled = false;
}
return thread;
}
FuriThread* furi_thread_alloc_ex(
const char* name,
uint32_t stack_size,
FuriThreadCallback callback,
void* context) {
FuriThread* thread = furi_thread_alloc();
furi_thread_set_name(thread, name);
furi_thread_set_stack_size(thread, stack_size);
furi_thread_set_callback(thread, callback);
furi_thread_set_context(thread, context);
return thread;
}
void furi_thread_free(FuriThread* thread) {
furi_assert(thread);
// Ensure that use join before free
furi_assert(thread->state == FuriThreadStateStopped);
furi_assert(thread->task_handle == NULL);
if(thread->name) free(thread->name);
if(thread->appid) free(thread->appid);
furi_string_free(thread->output.buffer);
free(thread);
}
void furi_thread_set_name(FuriThread* thread, const char* name) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
if(thread->name) free(thread->name);
thread->name = name ? strdup(name) : NULL;
}
void furi_thread_set_appid(FuriThread* thread, const char* appid) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
if(thread->appid) free(thread->appid);
thread->appid = appid ? strdup(appid) : NULL;
}
void furi_thread_mark_as_service(FuriThread* thread) {
thread->is_service = true;
}
bool furi_thread_mark_is_service(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
assert(!FURI_IS_IRQ_MODE() && (hTask != NULL));
FuriThread* thread = (FuriThread*)pvTaskGetThreadLocalStoragePointer(hTask, 0);
assert(thread != NULL);
return thread->is_service;
}
void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
furi_assert(stack_size % 4 == 0);
thread->stack_size = stack_size;
}
void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->callback = callback;
}
void furi_thread_set_context(FuriThread* thread, void* context) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->context = context;
}
void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
thread->priority = priority;
}
void furi_thread_set_current_priority(FuriThreadPriority priority) {
UBaseType_t new_priority = priority ? priority : FuriThreadPriorityNormal;
vTaskPrioritySet(NULL, new_priority);
}
FuriThreadPriority furi_thread_get_current_priority() {
return (FuriThreadPriority)uxTaskPriorityGet(NULL);
}
void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->state_callback = callback;
}
void furi_thread_set_state_context(FuriThread* thread, void* context) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->state_context = context;
}
FuriThreadState furi_thread_get_state(FuriThread* thread) {
furi_assert(thread);
return thread->state;
}
void furi_thread_start(FuriThread* thread) {
furi_assert(thread);
furi_assert(thread->callback);
furi_assert(thread->state == FuriThreadStateStopped);
furi_assert(thread->stack_size > 0 && thread->stack_size < (UINT16_MAX * sizeof(StackType_t)));
furi_thread_set_state(thread, FuriThreadStateStarting);
uint32_t stack = thread->stack_size / sizeof(StackType_t);
UBaseType_t priority = thread->priority ? thread->priority : FuriThreadPriorityNormal;
if(thread->is_service) {
thread->task_handle = xTaskCreateStatic(
furi_thread_body,
thread->name,
stack,
thread,
priority,
malloc(sizeof(StackType_t) * stack),
malloc(sizeof(StaticTask_t)));
} else {
BaseType_t ret = xTaskCreate(
furi_thread_body, thread->name, stack, thread, priority, &thread->task_handle);
furi_check(ret == pdPASS);
}
furi_check(thread->task_handle);
}
void furi_thread_cleanup_tcb_event(TaskHandle_t task) {
FuriThread* thread = pvTaskGetThreadLocalStoragePointer(task, 0);
if(thread) {
// clear thread local storage
vTaskSetThreadLocalStoragePointer(task, 0, NULL);
furi_assert(thread->task_handle == task);
thread->task_handle = NULL;
}
}
bool furi_thread_join(FuriThread* thread) {
furi_assert(thread);
furi_check(furi_thread_get_current() != thread);
// !!! IMPORTANT NOTICE !!!
//
// If your thread exited, but your app stuck here: some other thread uses
// all cpu time, which delays kernel from releasing task handle
while(thread->task_handle) {
furi_delay_ms(10);
}
return true;
}
FuriThreadId furi_thread_get_id(FuriThread* thread) {
furi_assert(thread);
return thread->task_handle;
}
void furi_thread_enable_heap_trace(FuriThread* thread) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->heap_trace_enabled = true;
}
void furi_thread_disable_heap_trace(FuriThread* thread) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
thread->heap_trace_enabled = false;
}
size_t furi_thread_get_heap_size(FuriThread* thread) {
furi_assert(thread);
furi_assert(thread->heap_trace_enabled == true);
return thread->heap_size;
}
int32_t furi_thread_get_return_code(FuriThread* thread) {
furi_assert(thread);
furi_assert(thread->state == FuriThreadStateStopped);
return thread->ret;
}
FuriThreadId furi_thread_get_current_id() {
return xTaskGetCurrentTaskHandle();
}
FuriThread* furi_thread_get_current() {
FuriThread* thread = pvTaskGetThreadLocalStoragePointer(NULL, 0);
return thread;
}
void furi_thread_yield() {
furi_assert(!FURI_IS_IRQ_MODE());
taskYIELD();
}
/* Limits */
#define MAX_BITS_TASK_NOTIFY 31U
#define MAX_BITS_EVENT_GROUPS 24U
#define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
#define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
uint32_t rflags;
BaseType_t yield;
if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
rflags = (uint32_t)FuriStatusErrorParameter;
} else {
rflags = (uint32_t)FuriStatusError;
if(FURI_IS_IRQ_MODE()) {
yield = pdFALSE;
(void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
(void)xTaskNotifyAndQueryIndexedFromISR(
hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
portYIELD_FROM_ISR(yield);
} else {
(void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
(void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
}
}
/* Return flags after setting */
return (rflags);
}
uint32_t furi_thread_flags_clear(uint32_t flags) {
TaskHandle_t hTask;
uint32_t rflags, cflags;
if(FURI_IS_IRQ_MODE()) {
rflags = (uint32_t)FuriStatusErrorISR;
} else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
rflags = (uint32_t)FuriStatusErrorParameter;
} else {
hTask = xTaskGetCurrentTaskHandle();
if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
pdPASS) {
rflags = cflags;
cflags &= ~flags;
if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
pdPASS) {
rflags = (uint32_t)FuriStatusError;
}
} else {
rflags = (uint32_t)FuriStatusError;
}
}
/* Return flags before clearing */
return (rflags);
}
uint32_t furi_thread_flags_get(void) {
TaskHandle_t hTask;
uint32_t rflags;
if(FURI_IS_IRQ_MODE()) {
rflags = (uint32_t)FuriStatusErrorISR;
} else {
hTask = xTaskGetCurrentTaskHandle();
if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
pdPASS) {
rflags = (uint32_t)FuriStatusError;
}
}
return (rflags);
}
uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
uint32_t rflags, nval;
uint32_t clear;
TickType_t t0, td, tout;
BaseType_t rval;
if(FURI_IS_IRQ_MODE()) {
rflags = (uint32_t)FuriStatusErrorISR;
} else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
rflags = (uint32_t)FuriStatusErrorParameter;
} else {
if((options & FuriFlagNoClear) == FuriFlagNoClear) {
clear = 0U;
} else {
clear = flags;
}
rflags = 0U;
tout = timeout;
t0 = xTaskGetTickCount();
do {
rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
if(rval == pdPASS) {
rflags &= flags;
rflags |= nval;
if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
if((flags & rflags) == flags) {
break;
} else {
if(timeout == 0U) {
rflags = (uint32_t)FuriStatusErrorResource;
break;
}
}
} else {
if((flags & rflags) != 0) {
break;
} else {
if(timeout == 0U) {
rflags = (uint32_t)FuriStatusErrorResource;
break;
}
}
}
/* Update timeout */
td = xTaskGetTickCount() - t0;
if(td > tout) {
tout = 0;
} else {
tout -= td;
}
} else {
if(timeout == 0) {
rflags = (uint32_t)FuriStatusErrorResource;
} else {
rflags = (uint32_t)FuriStatusErrorTimeout;
}
}
} while(rval != pdFAIL);
}
/* Return flags before clearing */
return (rflags);
}
uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
uint32_t i, count;
TaskStatus_t* task;
if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
count = 0U;
} else {
vTaskSuspendAll();
count = uxTaskGetNumberOfTasks();
task = pvPortMalloc(count * sizeof(TaskStatus_t));
if(task != NULL) {
count = uxTaskGetSystemState(task, count, NULL);
for(i = 0U; (i < count) && (i < array_items); i++) {
thread_array[i] = (FuriThreadId)task[i].xHandle;
}
count = i;
}
(void)xTaskResumeAll();
vPortFree(task);
}
return (count);
}
const char* furi_thread_get_name(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
const char* name;
if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
name = NULL;
} else {
name = pcTaskGetName(hTask);
}
return (name);
}
const char* furi_thread_get_appid(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
const char* appid = "system";
if(!FURI_IS_IRQ_MODE() && (hTask != NULL)) {
FuriThread* thread = (FuriThread*)pvTaskGetThreadLocalStoragePointer(hTask, 0);
if(thread) {
appid = thread->appid;
}
}
return (appid);
}
uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
uint32_t sz;
if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
sz = 0U;
} else {
sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
}
return (sz);
}
static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size) {
if(thread->output.write_callback != NULL) {
thread->output.write_callback(data, size);
} else {
furi_hal_console_tx((const uint8_t*)data, size);
}
return size;
}
static int32_t __furi_thread_stdout_flush(FuriThread* thread) {
FuriString* buffer = thread->output.buffer;
size_t size = furi_string_size(buffer);
if(size > 0) {
__furi_thread_stdout_write(thread, furi_string_get_cstr(buffer), size);
furi_string_reset(buffer);
}
return 0;
}
void furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback) {
FuriThread* thread = furi_thread_get_current();
furi_assert(thread);
__furi_thread_stdout_flush(thread);
thread->output.write_callback = callback;
}
FuriThreadStdoutWriteCallback furi_thread_get_stdout_callback() {
FuriThread* thread = furi_thread_get_current();
furi_assert(thread);
return thread->output.write_callback;
}
size_t furi_thread_stdout_write(const char* data, size_t size) {
FuriThread* thread = furi_thread_get_current();
furi_assert(thread);
if(size == 0 || data == NULL) {
return __furi_thread_stdout_flush(thread);
} else {
if(data[size - 1] == '\n') {
// if the last character is a newline, we can flush buffer and write data as is, wo buffers
__furi_thread_stdout_flush(thread);
__furi_thread_stdout_write(thread, data, size);
} else {
// string_cat doesn't work here because we need to write the exact size data
for(size_t i = 0; i < size; i++) {
furi_string_push_back(thread->output.buffer, data[i]);
if(data[i] == '\n') {
__furi_thread_stdout_flush(thread);
}
}
}
}
return size;
}
int32_t furi_thread_stdout_flush() {
FuriThread* thread = furi_thread_get_current();
furi_assert(thread);
return __furi_thread_stdout_flush(thread);
}
void furi_thread_suspend(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
vTaskSuspend(hTask);
}
void furi_thread_resume(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
if(FURI_IS_IRQ_MODE()) {
xTaskResumeFromISR(hTask);
} else {
vTaskResume(hTask);
}
}
bool furi_thread_is_suspended(FuriThreadId thread_id) {
TaskHandle_t hTask = (TaskHandle_t)thread_id;
return eTaskGetState(hTask) == eSuspended;
}

View File

@ -0,0 +1,338 @@
/**
* @file thread.h
* Furi: Furi Thread API
*/
#pragma once
#include "base.h"
#include "common_defines.h"
#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/** FuriThreadState */
typedef enum {
FuriThreadStateStopped,
FuriThreadStateStarting,
FuriThreadStateRunning,
} FuriThreadState;
/** FuriThreadPriority */
typedef enum {
FuriThreadPriorityNone = 0, /**< Uninitialized, choose system default */
FuriThreadPriorityIdle = 1, /**< Idle priority */
FuriThreadPriorityLowest = 14, /**< Lowest */
FuriThreadPriorityLow = 15, /**< Low */
FuriThreadPriorityNormal = 16, /**< Normal */
FuriThreadPriorityHigh = 17, /**< High */
FuriThreadPriorityHighest = 18, /**< Highest */
FuriThreadPriorityIsr =
(FURI_CONFIG_THREAD_MAX_PRIORITIES - 1), /**< Deferred ISR (highest possible) */
} FuriThreadPriority;
/** FuriThread anonymous structure */
typedef struct FuriThread FuriThread;
/** FuriThreadId proxy type to OS low level functions */
typedef void* FuriThreadId;
/** FuriThreadCallback Your callback to run in new thread
* @warning never use osThreadExit in FuriThread
*/
typedef int32_t (*FuriThreadCallback)(void* context);
/** Write to stdout callback
* @param data pointer to data
* @param size data size @warning your handler must consume everything
*/
typedef void (*FuriThreadStdoutWriteCallback)(const char* data, size_t size);
/** FuriThread state change callback called upon thread state change
* @param state new thread state
* @param context callback context
*/
typedef void (*FuriThreadStateCallback)(FuriThreadState state, void* context);
/** Allocate FuriThread
*
* @return FuriThread instance
*/
FuriThread* furi_thread_alloc();
/** Allocate FuriThread, shortcut version
*
* @param name
* @param stack_size
* @param callback
* @param context
* @return FuriThread*
*/
FuriThread* furi_thread_alloc_ex(
const char* name,
uint32_t stack_size,
FuriThreadCallback callback,
void* context);
/** Release FuriThread
*
* @warning see furi_thread_join
*
* @param thread FuriThread instance
*/
void furi_thread_free(FuriThread* thread);
/** Set FuriThread name
*
* @param thread FuriThread instance
* @param name string
*/
void furi_thread_set_name(FuriThread* thread, const char* name);
/**
* @brief Set FuriThread appid
* Technically, it is like a "process id", but it is not a system-wide unique identifier.
* All threads spawned by the same app will have the same appid.
*
* @param thread
* @param appid
*/
void furi_thread_set_appid(FuriThread* thread, const char* appid);
/** Mark thread as service
* The service cannot be stopped or removed, and cannot exit from the thread body
*
* @param thread
*/
void furi_thread_mark_as_service(FuriThread* thread);
/** Set FuriThread stack size
*
* @param thread FuriThread instance
* @param stack_size stack size in bytes
*/
void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size);
/** Set FuriThread callback
*
* @param thread FuriThread instance
* @param callback FuriThreadCallback, called upon thread run
*/
void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback);
/** Set FuriThread context
*
* @param thread FuriThread instance
* @param context pointer to context for thread callback
*/
void furi_thread_set_context(FuriThread* thread, void* context);
/** Set FuriThread priority
*
* @param thread FuriThread instance
* @param priority FuriThreadPriority value
*/
void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority);
/** Set current thread priority
*
* @param priority FuriThreadPriority value
*/
void furi_thread_set_current_priority(FuriThreadPriority priority);
/** Get current thread priority
*
* @return FuriThreadPriority value
*/
FuriThreadPriority furi_thread_get_current_priority();
/** Set FuriThread state change callback
*
* @param thread FuriThread instance
* @param callback state change callback
*/
void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback);
/** Set FuriThread state change context
*
* @param thread FuriThread instance
* @param context pointer to context
*/
void furi_thread_set_state_context(FuriThread* thread, void* context);
/** Get FuriThread state
*
* @param thread FuriThread instance
*
* @return thread state from FuriThreadState
*/
FuriThreadState furi_thread_get_state(FuriThread* thread);
/** Start FuriThread
*
* @param thread FuriThread instance
*/
void furi_thread_start(FuriThread* thread);
/** Join FuriThread
*
* @warning Use this method only when CPU is not busy(Idle task receives
* control), otherwise it will wait forever.
*
* @param thread FuriThread instance
*
* @return bool
*/
bool furi_thread_join(FuriThread* thread);
/** Get FreeRTOS FuriThreadId for FuriThread instance
*
* @param thread FuriThread instance
*
* @return FuriThreadId or NULL
*/
FuriThreadId furi_thread_get_id(FuriThread* thread);
/** Enable heap tracing
*
* @param thread FuriThread instance
*/
void furi_thread_enable_heap_trace(FuriThread* thread);
/** Disable heap tracing
*
* @param thread FuriThread instance
*/
void furi_thread_disable_heap_trace(FuriThread* thread);
/** Get thread heap size
*
* @param thread FuriThread instance
*
* @return size in bytes
*/
size_t furi_thread_get_heap_size(FuriThread* thread);
/** Get thread return code
*
* @param thread FuriThread instance
*
* @return return code
*/
int32_t furi_thread_get_return_code(FuriThread* thread);
/** Thread related methods that doesn't involve FuriThread directly */
/** Get FreeRTOS FuriThreadId for current thread
*
* @param thread FuriThread instance
*
* @return FuriThreadId or NULL
*/
FuriThreadId furi_thread_get_current_id();
/** Get FuriThread instance for current thread
*
* @return pointer to FuriThread or NULL if this thread doesn't belongs to Furi
*/
FuriThread* furi_thread_get_current();
/** Return control to scheduler */
void furi_thread_yield();
uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags);
uint32_t furi_thread_flags_clear(uint32_t flags);
uint32_t furi_thread_flags_get(void);
uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout);
/**
* @brief Enumerate threads
*
* @param thread_array array of FuriThreadId, where thread ids will be stored
* @param array_items array size
* @return uint32_t threads count
*/
uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items);
/**
* @brief Get thread name
*
* @param thread_id
* @return const char* name or NULL
*/
const char* furi_thread_get_name(FuriThreadId thread_id);
/**
* @brief Get thread appid
*
* @param thread_id
* @return const char* appid
*/
const char* furi_thread_get_appid(FuriThreadId thread_id);
/**
* @brief Get thread stack watermark
*
* @param thread_id
* @return uint32_t
*/
uint32_t furi_thread_get_stack_space(FuriThreadId thread_id);
/** Get STDOUT callback for thead
*
* @return STDOUT callback
*/
FuriThreadStdoutWriteCallback furi_thread_get_stdout_callback();
/** Set STDOUT callback for thread
*
* @param callback callback or NULL to clear
*/
void furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback);
/** Write data to buffered STDOUT
*
* @param data input data
* @param size input data size
*
* @return size_t written data size
*/
size_t furi_thread_stdout_write(const char* data, size_t size);
/** Flush data to STDOUT
*
* @return int32_t error code
*/
int32_t furi_thread_stdout_flush();
/** Suspend thread
*
* @param thread_id thread id
*/
void furi_thread_suspend(FuriThreadId thread_id);
/** Resume thread
*
* @param thread_id thread id
*/
void furi_thread_resume(FuriThreadId thread_id);
/** Get thread suspended state
*
* @param thread_id thread id
* @return true if thread is suspended
*/
bool furi_thread_is_suspended(FuriThreadId thread_id);
bool furi_thread_mark_is_service(FuriThreadId thread_id);
#ifdef __cplusplus
}
#endif

172
components/furi/src/timer.c Normal file
View File

@ -0,0 +1,172 @@
#include "timer.h"
#include "check.h"
#include "kernel.h"
#include <freertos/FreeRTOS.h>
#include <freertos/timers.h>
typedef struct {
FuriTimerCallback func;
void* context;
} TimerCallback_t;
static void TimerCallback(TimerHandle_t hTimer) {
TimerCallback_t* callb;
/* Retrieve pointer to callback function and context */
callb = (TimerCallback_t*)pvTimerGetTimerID(hTimer);
/* Remove dynamic allocation flag */
callb = (TimerCallback_t*)((uint32_t)callb & ~1U);
if(callb != NULL) {
callb->func(callb->context);
}
}
FuriTimer* furi_timer_alloc(FuriTimerCallback func, FuriTimerType type, void* context) {
furi_assert((furi_kernel_is_irq_or_masked() == 0U) && (func != NULL));
TimerHandle_t hTimer;
TimerCallback_t* callb;
UBaseType_t reload;
hTimer = NULL;
/* Dynamic memory allocation is available: if memory for callback and */
/* its context is not provided, allocate it from dynamic memory pool */
callb = (TimerCallback_t*)malloc(sizeof(TimerCallback_t));
callb->func = func;
callb->context = context;
if(type == FuriTimerTypeOnce) {
reload = pdFALSE;
} else {
reload = pdTRUE;
}
/* Store callback memory dynamic allocation flag */
callb = (TimerCallback_t*)((uint32_t)callb | 1U);
// TimerCallback function is always provided as a callback and is used to call application
// specified function with its context both stored in structure callb.
hTimer = xTimerCreate(NULL, portMAX_DELAY, reload, callb, TimerCallback);
furi_check(hTimer);
/* Return timer ID */
return ((FuriTimer*)hTimer);
}
void furi_timer_free(FuriTimer* instance) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
TimerHandle_t hTimer = (TimerHandle_t)instance;
TimerCallback_t* callb;
callb = (TimerCallback_t*)pvTimerGetTimerID(hTimer);
furi_check(xTimerDelete(hTimer, portMAX_DELAY) == pdPASS);
while(furi_timer_is_running(instance)) furi_delay_tick(2);
if((uint32_t)callb & 1U) {
/* Callback memory was allocated from dynamic pool, clear flag */
callb = (TimerCallback_t*)((uint32_t)callb & ~1U);
/* Return allocated memory to dynamic pool */
free(callb);
}
}
FuriStatus furi_timer_start(FuriTimer* instance, uint32_t ticks) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
furi_assert(ticks < portMAX_DELAY);
TimerHandle_t hTimer = (TimerHandle_t)instance;
FuriStatus stat;
if(xTimerChangePeriod(hTimer, ticks, portMAX_DELAY) == pdPASS) {
stat = FuriStatusOk;
} else {
stat = FuriStatusErrorResource;
}
/* Return execution status */
return (stat);
}
FuriStatus furi_timer_restart(FuriTimer* instance, uint32_t ticks) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
furi_assert(ticks < portMAX_DELAY);
TimerHandle_t hTimer = (TimerHandle_t)instance;
FuriStatus stat;
if(xTimerChangePeriod(hTimer, ticks, portMAX_DELAY) == pdPASS &&
xTimerReset(hTimer, portMAX_DELAY) == pdPASS) {
stat = FuriStatusOk;
} else {
stat = FuriStatusErrorResource;
}
/* Return execution status */
return (stat);
}
FuriStatus furi_timer_stop(FuriTimer* instance) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
TimerHandle_t hTimer = (TimerHandle_t)instance;
furi_check(xTimerStop(hTimer, portMAX_DELAY) == pdPASS);
return FuriStatusOk;
}
uint32_t furi_timer_is_running(FuriTimer* instance) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
TimerHandle_t hTimer = (TimerHandle_t)instance;
/* Return 0: not running, 1: running */
return (uint32_t)xTimerIsTimerActive(hTimer);
}
uint32_t furi_timer_get_expire_time(FuriTimer* instance) {
furi_assert(!furi_kernel_is_irq_or_masked());
furi_assert(instance);
TimerHandle_t hTimer = (TimerHandle_t)instance;
return (uint32_t)xTimerGetExpiryTime(hTimer);
}
void furi_timer_pending_callback(FuriTimerPendigCallback callback, void* context, uint32_t arg) {
BaseType_t ret = pdFAIL;
if(furi_kernel_is_irq_or_masked()) {
ret = xTimerPendFunctionCallFromISR(callback, context, arg, NULL);
} else {
ret = xTimerPendFunctionCall(callback, context, arg, FuriWaitForever);
}
furi_check(ret == pdPASS);
}
void furi_timer_set_thread_priority(FuriTimerThreadPriority priority) {
furi_assert(!furi_kernel_is_irq_or_masked());
TaskHandle_t task_handle = xTimerGetTimerDaemonTaskHandle();
furi_check(task_handle); // Don't call this method before timer task start
if(priority == FuriTimerThreadPriorityNormal) {
vTaskPrioritySet(task_handle, configTIMER_TASK_PRIORITY);
} else if(priority == FuriTimerThreadPriorityElevated) {
vTaskPrioritySet(task_handle, configMAX_PRIORITIES - 1);
} else {
furi_crash();
}
}

106
components/furi/src/timer.h Normal file
View File

@ -0,0 +1,106 @@
#pragma once
#include "base.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*FuriTimerCallback)(void* context);
typedef enum {
FuriTimerTypeOnce = 0, ///< One-shot timer.
FuriTimerTypePeriodic = 1 ///< Repeating timer.
} FuriTimerType;
typedef void FuriTimer;
/** Allocate timer
*
* @param[in] func The callback function
* @param[in] type The timer type
* @param context The callback context
*
* @return The pointer to FuriTimer instance
*/
FuriTimer* furi_timer_alloc(FuriTimerCallback func, FuriTimerType type, void* context);
/** Free timer
*
* @param instance The pointer to FuriTimer instance
*/
void furi_timer_free(FuriTimer* instance);
/** Start timer
*
* @warning This is asynchronous call, real operation will happen as soon as
* timer service process this request.
*
* @param instance The pointer to FuriTimer instance
* @param[in] ticks The interval in ticks
*
* @return The furi status.
*/
FuriStatus furi_timer_start(FuriTimer* instance, uint32_t ticks);
/** Restart timer with previous timeout value
*
* @warning This is asynchronous call, real operation will happen as soon as
* timer service process this request.
*
* @param instance The pointer to FuriTimer instance
* @param[in] ticks The interval in ticks
*
* @return The furi status.
*/
FuriStatus furi_timer_restart(FuriTimer* instance, uint32_t ticks);
/** Stop timer
*
* @warning This is asynchronous call, real operation will happen as soon as
* timer service process this request.
*
* @param instance The pointer to FuriTimer instance
*
* @return The furi status.
*/
FuriStatus furi_timer_stop(FuriTimer* instance);
/** Is timer running
*
* @warning This cal may and will return obsolete timer state if timer
* commands are still in the queue. Please read FreeRTOS timer
* documentation first.
*
* @param instance The pointer to FuriTimer instance
*
* @return 0: not running, 1: running
*/
uint32_t furi_timer_is_running(FuriTimer* instance);
/** Get timer expire time
*
* @param instance The Timer instance
*
* @return expire tick
*/
uint32_t furi_timer_get_expire_time(FuriTimer* instance);
typedef void (*FuriTimerPendigCallback)(void* context, uint32_t arg);
void furi_timer_pending_callback(FuriTimerPendigCallback callback, void* context, uint32_t arg);
typedef enum {
FuriTimerThreadPriorityNormal, /**< Lower then other threads */
FuriTimerThreadPriorityElevated, /**< Same as other threads */
} FuriTimerThreadPriority;
/** Set Timer thread priority
*
* @param[in] priority The priority
*/
void furi_timer_set_thread_priority(FuriTimerThreadPriority priority);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,3 @@
idf_component_register(
INCLUDE_DIRS "."
)

25
components/mlib/LICENSE Normal file
View File

@ -0,0 +1,25 @@
BSD 2-Clause License
Copyright (c) 2017-2023, Patrick Pelissier
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,3 @@
This folder is a partial git clone from:
https://github.com/P-p-H-d/mlib/commit/d9401371a6bc1c0f240161514549976bcdd98999

1240
components/mlib/m-algo.h Normal file

File diff suppressed because it is too large Load Diff

1132
components/mlib/m-array.h Normal file

File diff suppressed because it is too large Load Diff

342
components/mlib/m-atomic.h Normal file
View File

@ -0,0 +1,342 @@
/*
* M*LIB - Thin stdatomic wrapper for C++ compatibility
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_ATOMIC_H
#define MSTARLIB_ATOMIC_H
/* NOTE: Due to the C++ not having recognized stdatomic.h officialy,
it is hard to use this header directly with a C++ compiler.
See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932
clang++ has no issue with this header but if someone includes
atomic from C++, there is incompatibility between atomic & stdatomic.
Moreover some compilers lack a working stdatomic header.
GCC 4.9 doesn't have a working implementation of 'atomic'.
APPLE Clang defines __GNUC__ to be only 4 despite having full support
for atomic.
*/
#if defined(__cplusplus) && __cplusplus >= 201103L \
&& !(defined(__GNUC__) && __GNUC__ < 5 && !defined(__APPLE__))
/* NOTE: This is what the stdatomic.h header shall do in C++ mode. */
#include <atomic>
using std::memory_order;
using std::atomic_bool;
using std::atomic_char;
using std::atomic_short;
using std::atomic_int;
using std::atomic_long;
using std::atomic_llong;
using std::atomic_uchar;
using std::atomic_schar;
using std::atomic_ushort;
using std::atomic_uint;
using std::atomic_ulong;
using std::atomic_ullong;
using std::atomic_intptr_t;
using std::atomic_uintptr_t;
using std::atomic_size_t;
using std::atomic_ptrdiff_t;
using std::atomic_intmax_t;
using std::atomic_uintmax_t;
using std::atomic_flag;
using std::kill_dependency;
using std::atomic_thread_fence;
using std::atomic_signal_fence;
using std::atomic_is_lock_free;
using std::atomic_store_explicit;
using std::atomic_store;
using std::atomic_load_explicit;
using std::atomic_load;
using std::atomic_exchange_explicit;
using std::atomic_exchange;
using std::atomic_compare_exchange_strong_explicit;
using std::atomic_compare_exchange_strong;
using std::atomic_compare_exchange_weak_explicit;
using std::atomic_compare_exchange_weak;
using std::atomic_fetch_add;
using std::atomic_fetch_add_explicit;
using std::atomic_fetch_sub;
using std::atomic_fetch_sub_explicit;
using std::atomic_fetch_or;
using std::atomic_fetch_or_explicit;
using std::atomic_fetch_xor;
using std::atomic_fetch_xor_explicit;
using std::atomic_fetch_and;
using std::atomic_fetch_and_explicit;
using std::atomic_flag_test_and_set;
using std::atomic_flag_test_and_set_explicit;
using std::atomic_flag_clear;
using std::atomic_flag_clear_explicit;
using std::memory_order_relaxed;
using std::memory_order_consume;
using std::memory_order_acquire;
using std::memory_order_release;
using std::memory_order_acq_rel;
using std::memory_order_seq_cst;
/* CLANG provides a warning on defining _Atomic as it sees it
* as a reserved system macro. It is true. However, the goal of this
* header is to provide stdatomic semantic, so it needs to define
* _Atomic macro.
*
* So, this warning has to be ignored.
*
* It cannot use M_BEGIN_PROTECTED_CODE as this header is normally
* independent of m-core.h
*/
#if defined(__clang__) && __clang_major__ >= 4
_Pragma("clang diagnostic push")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#define _Atomic(T) std::atomic< T >
#if defined(__clang__) && __clang_major__ >= 4
_Pragma("clang diagnostic pop")
#endif
/* C11 with working stdatomic
STDATOMIC doesn't work with C++ except for clang but is incompatible with atomic.
GCC < 4.9 doesn't provide a compliant stdatomic.h
CLANG 3.5 has issues with GCC's stdatomic.h and doesn't provide its own
ICC < 18 doesn't provide a compliant stdatomic.h
*/
#elif (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) ) \
|| (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(__cplusplus) && (__GNUC__*100 + __GNUC_MINOR__) >= 409) \
|| (defined(__clang__) && (__clang_major__ * 100 + __clang_minor__) >= 308) \
|| (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800)
#include <stdatomic.h>
/* MSYS2 has a conflict between cdefs.h which defines a _Atomic macro (if not C11)
not compatible with the used stdatomic.h (from GCC).
Provide a configurable mechanism to undef it with auto-detection of msys2 / gcc */
#ifndef M_USE_UNDEF_ATOMIC
# if defined(__MSYS__) && defined(__GNUC__) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L)
# define M_USE_UNDEF_ATOMIC 1
# endif
#endif
#if defined(M_USE_UNDEF_ATOMIC) && M_USE_UNDEF_ATOMIC == 1
# undef _Atomic
#endif
/* Non working C++ atomic header, nor working stdatomic.h found.
Write a compatible layer using mutex as slin as possible.
Supports only up to 64-bits atomic (sizeof long long to be more precise).
The locks are never properly cleared and remain active until
the end of the program.
We also assume that the call to the atomic_* interface is "macro clean".
*/
#else
#include "m-thread.h"
#include "m-core.h"
M_BEGIN_PROTECTED_CODE
/* _Atomic qualifier for a type (emulation).
The structure is quite large:
_val : value of the atomic type,
_zero : zero value of the atomic type (constant),
_previous: temporary value used within the mutex lock,
_lock : the mutex lock.
Support up to sizeof (long long) type.
*/
#define _Atomic(T) \
struct { \
T volatile _val; \
T _zero; \
T _previous; \
m_mutex_t _lock; \
}
/* Define the supported memory order.
Even if memory order is defined, only the strongest constraint is used */
typedef enum {
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
typedef _Atomic(bool) atomic_bool;
typedef _Atomic(char) atomic_char;
typedef _Atomic(short) atomic_short;
typedef _Atomic(int) atomic_int;
typedef _Atomic(long) atomic_long;
typedef _Atomic(long long) atomic_llong;
typedef _Atomic(unsigned char) atomic_uchar;
typedef _Atomic(signed char) atomic_schar;
typedef _Atomic(unsigned short) atomic_ushort;
typedef _Atomic(unsigned int) atomic_uint;
typedef _Atomic(unsigned long) atomic_ulong;
typedef _Atomic(unsigned long long) atomic_ullong;
typedef _Atomic(intptr_t) atomic_intptr_t;
typedef _Atomic(uintptr_t) atomic_uintptr_t;
typedef _Atomic(size_t) atomic_size_t;
typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
/* Define the minimum size supported by the architecture
for an atomic read or write.
This can help a lot since it avoids locking for atomic_load and
atomic_store.
*/
#if defined(_M_X64) || defined(_M_AMD64) || defined(__x86_64__)
# define ATOMICI_MIN_RW_SIZE 8
#elif defined(_M_86) || defined (__i386__)
# define ATOMICI_MIN_RW_SIZE 4
#else
# define ATOMICI_MIN_RW_SIZE 0
#endif
/* Detect if stdint.h was included */
#if (defined (INTMAX_C) && defined (UINTMAX_C) && !defined(__cplusplus)) || \
defined (_STDINT_H) || defined (_STDINT_H_) || defined (_STDINT) || \
defined (_SYS_STDINT_H_)
/* Define additional atomic types */
typedef _Atomic(intmax_t) atomic_intmax_t;
typedef _Atomic(uintmax_t) atomic_uintmax_t;
#endif
/* (INTERNAL) Unlock the mutex and return the given value */
M_INLINE long long atomic_fetch_unlock (m_mutex_t *lock, long long val)
{
m_mutex_unlock (*lock);
return val;
}
/* (INTERNAL) This is the heart of the wrapper:
lock the atomic value, read it and returns the value.
In order to avoid any compiler extension, we need to transform the
atomic type into 'long long' then convert it back to its value.
This is because _previous can't be read after the lock, and we can't
generate temporary variable within a macro.
The trick is computing _val - _zero within the lock, then
returns retvalue + _zero after the release of the lock.
*/
#define atomic_fetch_op(ptr, val, op) \
(m_mutex_lock((ptr)->_lock), \
(ptr)->_previous = (ptr)->_val, \
(ptr)->_val op (val), \
atomic_fetch_unlock(&(ptr)->_lock, (long long)((ptr)->_previous-(ptr)->_zero))+(ptr)->_zero)
/* Perform an atomic add (EMULATION) */
#define atomic_fetch_add(ptr, val) atomic_fetch_op(ptr, val, +=)
/* Perform an atomic sub (EMULATION) */
#define atomic_fetch_sub(ptr, val) atomic_fetch_op(ptr, val, -=)
/* Perform an atomic or (EMULATION) */
#define atomic_fetch_or(ptr, val) atomic_fetch_op(ptr, val, |=)
/* Perform an atomic xor (EMULATION) */
#define atomic_fetch_xor(ptr, val) atomic_fetch_op(ptr, val, ^=)
/* Perform an atomic and (EMULATION) */
#define atomic_fetch_and(ptr, val) atomic_fetch_op(ptr, val, &=)
/* Perform an atomic exchange (EMULATION) */
#define atomic_exchange(ptr, val) atomic_fetch_op(ptr, val, =)
/* Initialize an atomic GLOBAL variable */
#define ATOMIC_VAR_INIT(val) { val, 0, 0, M_MUTEXI_INIT_VALUE }
/* Initialize an atomic variable */
#define atomic_init(ptr, val) \
(m_mutex_init((ptr)->_lock), (ptr)->_val = val, (ptr)->_zero = 0)
/* (INTERNAL) Load an atomic variable within a lock
(needed for variable greater than CPU atomic size) */
#define atomic_load_lock(ptr) \
(m_mutex_lock((ptr)->_lock), \
(ptr)->_previous = (ptr)->_val, \
atomic_fetch_unlock(&(ptr)->_lock, (long long) ((ptr)->_previous-(ptr)->_zero))+(ptr)->_zero)
/* (INTERNAL) Store an atomic variable within a lock
(needed for variable greater than CPU atomic size) */
#define atomic_store_lock(ptr, val) \
(m_mutex_lock((ptr)->_lock), \
(ptr)->_val = (val), \
m_mutex_unlock((ptr)->_lock))
/* Atomic load of a variable (EMULATION)
If the atomic type size is not greater than the CPU atomic size,
we can perform a direct read of the variable (much faster) */
#define atomic_load(ptr) \
( sizeof ((ptr)->_val) <= ATOMICI_MIN_RW_SIZE \
? (ptr)->_val \
: atomic_load_lock(ptr))
/* Atomic store of a variable (EMULATION)
If the atomic type size is not greater than the CPU atomic size,
we can perform a direct write of the variable (much faster) */
#define atomic_store(ptr, val) do { \
if ( sizeof ((ptr)->_val) <= ATOMICI_MIN_RW_SIZE) { \
(ptr)->_val = (val); \
} else { \
long long _offset = (long long) ((val) - (ptr)->_zero); \
atomic_store_lock(ptr, (ptr)->_zero + _offset); \
} \
} while (0)
/* Perform a CAS (Compare and swap) operation (EMULATION) */
#define atomic_compare_exchange_strong(ptr, exp, val) \
(m_mutex_lock((ptr)->_lock), \
atomic_fetch_unlock(&(ptr)->_lock, \
(ptr)->_val == *(exp) \
? ((ptr)->_val = (val), true) \
: (*(exp) = (ptr)->_val, false)))
#define atomic_fetch_add_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, +=)
#define atomic_fetch_sub_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, -=)
#define atomic_fetch_or_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, |=)
#define atomic_fetch_xor_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, ^=)
#define atomic_fetch_and_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, &=)
#define atomic_exchange_explicit(ptr, val, mem) atomic_fetch_op(ptr, val, =)
#define atomic_load_explicit(ptr, mem) atomic_load(ptr)
#define atomic_store_explicit(ptr, val, mem) atomic_store(ptr, val)
#define kill_dependency(ptr) atomic_load(ptr)
#define atomic_thread_fence(mem) (void) 0
#define atomic_signal_fence(mem) (void) 0
#define atomic_is_lock_free(ptr) false
#define atomic_compare_exchange_strong_explicit(ptr, exp, val, mem1, mem2) atomic_compare_exchange_strong(ptr, exp, val)
#define atomic_compare_exchange_weak_explicit(ptr, exp, val, mem1, mem2) atomic_compare_exchange_strong(ptr, exp, val)
#define atomic_compare_exchange_weak(ptr, exp, val) atomic_compare_exchange_strong(ptr, exp, val)
/* TODO: Missing atomic_flag. Problem: it is supposed to be lock free! */
M_END_PROTECTED_CODE
#endif
// C17 deprecated ATOMIC_VAR_INIT
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L
# define M_ATOMIC_VAR_INIT(x) (x)
#else
# define M_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
#endif
#endif

981
components/mlib/m-bitset.h Normal file
View File

@ -0,0 +1,981 @@
/*
* M*LIB - BITSET module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_BITSET_H
#define MSTARLIB_BITSET_H
#include <stdint.h>
#include "m-core.h"
/********************************** INTERNAL ************************************/
M_BEGIN_PROTECTED_CODE
// Define the basic limb of a bitset
typedef uint64_t m_b1tset_limb_ct;
// And its size in bits
#define M_B1TSET_LIMB_BIT (sizeof(m_b1tset_limb_ct) * CHAR_BIT)
// bitset grow policy. n is limb size
#define M_B1TSET_INC_ALLOC_SIZE(n) ((n) < 4 ? 4 : (n) * 2)
// Compute the number of allocated limbs needed to handle 'n' bits.
#define M_B1TSET_TO_ALLOC(n) (((n) + M_B1TSET_LIMB_BIT - 1) / M_B1TSET_LIMB_BIT)
// Compute the number of bits available from the allocated size in limbs
#define M_B1TSET_FROM_ALLOC(n) ((n) * M_B1TSET_LIMB_BIT)
// Contract of a bitset
#define M_B1TSET_CONTRACT(t) do { \
M_ASSERT (t != NULL); \
M_ASSERT (t->size <= M_B1TSET_FROM_ALLOC (t->alloc)); \
M_ASSERT (t->alloc <= ((size_t)-1) / M_B1TSET_LIMB_BIT); \
M_ASSERT (t->size < ((size_t)-1) - M_B1TSET_LIMB_BIT); \
M_ASSERT (t->size == 0 || t->ptr != NULL); \
M_ASSERT (t->alloc == 0 || t->ptr != NULL); \
M_ASSERT ((t->size % M_B1TSET_LIMB_BIT) == 0 || (t->ptr[ (t->size-1) / M_B1TSET_LIMB_BIT] & ~(((((m_b1tset_limb_ct)1)<<(t->size % M_B1TSET_LIMB_BIT))<<1)-1)) == 0); \
} while (0)
/********************************** EXTERNAL ************************************/
/* Define a type of variable 'bits' or array of packed booleans */
typedef struct m_bitset_s {
size_t size; // Size is the number of bits
size_t alloc; // Alloc is the number of allocated limbs
m_b1tset_limb_ct *ptr; // Pointer to the allocated limbs
} m_bitset_t[1];
/* Pointer to a m_bitset_t */
typedef struct m_bitset_s *m_bitset_ptr;
/* Constant Pointer to a m_bitset_t */
typedef const struct m_bitset_s *m_bitset_srcptr;
/* Iterator on a bitset */
typedef struct m_bitset_it_s {
size_t index; // index to the array of bit
bool value; // value used for _ref & _cref to store the value
struct m_bitset_s *set; // the associated bitset
} m_bitset_it_t[1];
/* Initialize a bitset (CONSTRUCTOR) */
M_INLINE void
m_bitset_init(m_bitset_t t)
{
M_ASSERT (t != NULL);
M_STATIC_ASSERT (M_POWEROF2_P(M_B1TSET_LIMB_BIT), MLIB_INTERNAL, "M*LIB: BITSET LIMB shall be a power of 2.");
t->size = 0;
t->alloc = 0;
t->ptr = NULL;
M_B1TSET_CONTRACT(t);
}
/* Clean a bitset */
M_INLINE void
m_bitset_reset(m_bitset_t t)
{
M_B1TSET_CONTRACT(t);
t->size = 0;
}
/* Clear a bitset (DESTRUCTOR) */
M_INLINE void
m_bitset_clear(m_bitset_t t)
{
m_bitset_reset(t);
M_MEMORY_FREE(t->ptr);
// This is not really needed, but is safer
// This representation is invalid and will be detected by the contract.
// A C compiler should be able to optimize out theses initializations.
t->alloc = 1;
t->ptr = NULL;
}
/* Set a bitset to another one */
M_INLINE void
m_bitset_set(m_bitset_t d, const m_bitset_t s)
{
M_B1TSET_CONTRACT(d);
M_B1TSET_CONTRACT(s);
if (M_UNLIKELY (d == s)) return;
const size_t needAlloc = M_B1TSET_TO_ALLOC (s->size);
if (M_LIKELY (s->size > 0)) {
// Test if enough space in target
if (s->size > M_B1TSET_FROM_ALLOC (d->alloc)) {
m_b1tset_limb_ct *ptr = M_MEMORY_REALLOC (m_b1tset_limb_ct, d->ptr, needAlloc);
if (M_UNLIKELY_NOMEM (ptr == NULL)) {
M_MEMORY_FULL(needAlloc);
return ;
}
d->ptr = ptr;
d->alloc = needAlloc;
}
M_ASSERT(d->ptr != NULL);
M_ASSERT(s->ptr != NULL);
memcpy (d->ptr, s->ptr, needAlloc * sizeof(m_b1tset_limb_ct) );
}
d->size = s->size;
M_B1TSET_CONTRACT(d);
}
/* Initialize & set a bitset to another one (CONSTRUCTOR) */
M_INLINE void
m_bitset_init_set(m_bitset_t d, const m_bitset_t s)
{
M_ASSERT (d != s);
m_bitset_init(d);
m_bitset_set(d, s);
}
/* Initialize & move a bitset (CONSTRUCTOR) from another one (DESTRUCTOR) */
M_INLINE void
m_bitset_init_move(m_bitset_t d, m_bitset_t s)
{
M_B1TSET_CONTRACT(s);
d->size = s->size;
d->alloc = s->alloc;
d->ptr = s->ptr;
// Illegal representation of a bitset, to be detectable
s->alloc = 1;
s->ptr = NULL;
M_B1TSET_CONTRACT(d);
}
/* Move a bitset from another one (DESTRUCTOR) */
M_INLINE void
m_bitset_move(m_bitset_t d, m_bitset_t s)
{
m_bitset_clear(d);
m_bitset_init_move (d, s);
}
/* Set the bit 'i' in the bitset to the value 'x' */
M_INLINE void
m_bitset_set_at(m_bitset_t v, size_t i, bool x)
{
M_B1TSET_CONTRACT(v);
M_ASSERT (v->ptr != NULL);
M_ASSERT_INDEX(i, v->size);
const size_t offset = i / M_B1TSET_LIMB_BIT;
const size_t index = i % M_B1TSET_LIMB_BIT;
// This is a branchless version as x can only be 0 or 1 with only one variable shift.
const m_b1tset_limb_ct mask = ((m_b1tset_limb_ct)1)<<index;
v->ptr[offset] = (v->ptr[offset] & ~mask) | (mask & (0-(m_b1tset_limb_ct)x));
M_B1TSET_CONTRACT (v);
}
/* Flip the bit 'i' in the bitset */
M_INLINE void
m_bitset_flip_at(m_bitset_t v, size_t i)
{
M_B1TSET_CONTRACT(v);
M_ASSERT (v->ptr != NULL);
M_ASSERT_INDEX(i, v->size);
size_t offset = i / M_B1TSET_LIMB_BIT;
size_t index = i % M_B1TSET_LIMB_BIT;
v->ptr[offset] ^= ((m_b1tset_limb_ct)1)<<index;
M_B1TSET_CONTRACT (v);
}
/* Push back the boolean 'x' in the bitset (increasing the bitset) */
M_INLINE void
m_bitset_push_back (m_bitset_t v, bool x)
{
M_B1TSET_CONTRACT (v);
if (M_UNLIKELY (v->size >= M_B1TSET_FROM_ALLOC (v->alloc))) {
// Compute the needed allocation.
const size_t needAlloc = M_B1TSET_INC_ALLOC_SIZE(v->alloc);
// Check for integer overflow
if (M_UNLIKELY_NOMEM (needAlloc <= v->alloc)) {
M_MEMORY_FULL(needAlloc * sizeof(m_b1tset_limb_ct));
return;
}
// Alloc memory
m_b1tset_limb_ct *ptr = M_MEMORY_REALLOC (m_b1tset_limb_ct, v->ptr, needAlloc);
// Check if success
if (M_UNLIKELY_NOMEM (ptr == NULL) ) {
M_MEMORY_FULL(needAlloc * sizeof(m_b1tset_limb_ct));
return;
}
v->ptr = ptr;
v->alloc = needAlloc;
}
M_ASSERT(v->ptr != NULL);
const size_t i = v->size;
const size_t offset = i / M_B1TSET_LIMB_BIT;
const size_t index = i % M_B1TSET_LIMB_BIT;
if (M_UNLIKELY(index == 0)) {
// A new limb if used. Clear it before using it.
v->ptr[offset] = 0;
}
// This is a branchless version as x can only be 0 or 1 with only one variable shift.
const m_b1tset_limb_ct mask = ((m_b1tset_limb_ct)1)<<index;
v->ptr[offset] = (v->ptr[offset] & ~mask) | (mask & (0-(m_b1tset_limb_ct)x));
v->size ++;
M_B1TSET_CONTRACT (v);
}
/* Resize the bitset to have exactly 'size' bits */
M_INLINE void
m_bitset_resize (m_bitset_t v, size_t size)
{
M_B1TSET_CONTRACT (v);
// Check for overflow
if (M_UNLIKELY_NOMEM (size >= ((size_t)-1) - M_B1TSET_LIMB_BIT)) {
M_MEMORY_FULL((size_t) -1);
return;
}
// Compute the needed allocation.
size_t newAlloc = M_B1TSET_TO_ALLOC (size);
if (newAlloc > v->alloc) {
// Allocate more limbs to store the bitset.
m_b1tset_limb_ct *ptr = M_MEMORY_REALLOC (m_b1tset_limb_ct, v->ptr, newAlloc);
if (M_UNLIKELY_NOMEM (ptr == NULL) ) {
M_MEMORY_FULL(newAlloc * sizeof(m_b1tset_limb_ct));
return;
}
v->ptr = ptr;
v->alloc = newAlloc;
}
// Resize the bitsets
const size_t old_size = v->size;
const size_t offset = size / M_B1TSET_LIMB_BIT;
const size_t index = size % M_B1TSET_LIMB_BIT;
const m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1)<<index)-1;
if (size < old_size) {
// Resize down the bitset: clear unused bits
if (M_LIKELY(index != 0)) {
// Mask the last limb to clear the last bits
v->ptr[offset] &= mask;
}
} else if (size > old_size) {
// Resize up the bitset: set to 0 new bits.
const size_t old_offset = (old_size + M_B1TSET_LIMB_BIT - 1)/ M_B1TSET_LIMB_BIT;
for(size_t i = old_offset ; i < offset; i++) {
v->ptr[i] = 0;
}
if (M_LIKELY(index != 0)) {
// Mask the last limb to clear the last bits
v->ptr[offset] = 0;
}
}
v->size = size;
M_B1TSET_CONTRACT (v);
}
/* Reserve allocation in the bitset to accomodate at least 'size' bits without reallocation */
M_INLINE void
m_bitset_reserve (m_bitset_t v, size_t alloc)
{
M_B1TSET_CONTRACT (v);
size_t oldAlloc = M_B1TSET_TO_ALLOC (v->size);
size_t newAlloc = M_B1TSET_TO_ALLOC (alloc);
// We refuse to reduce allocation below current size
if (oldAlloc > newAlloc) {
newAlloc = oldAlloc;
}
if (M_UNLIKELY (newAlloc == 0)) {
// Free all memory used by the bitsets
M_MEMORY_FREE (v->ptr);
v->size = v->alloc = 0;
v->ptr = NULL;
} else {
// Allocate more memory or reduce memory usage
m_b1tset_limb_ct *ptr = M_MEMORY_REALLOC (m_b1tset_limb_ct, v->ptr, newAlloc);
if (M_UNLIKELY_NOMEM (ptr == NULL) ) {
M_MEMORY_FULL(newAlloc * sizeof(m_b1tset_limb_ct));
return;
}
v->ptr = ptr;
v->alloc = newAlloc;
}
M_B1TSET_CONTRACT (v);
}
/* Return the value of the boolean at index 'i'.
* NOTE: Interface is a little bit different:
* It doesn't return a pointer to the data, but the data itself.
*/
M_INLINE bool
m_bitset_get(const m_bitset_t v, size_t i)
{
M_B1TSET_CONTRACT(v);
M_ASSERT (v->ptr != NULL);
M_ASSERT_INDEX(i, v->size);
size_t offset = i / M_B1TSET_LIMB_BIT;
size_t index = i % M_B1TSET_LIMB_BIT;
return ( v->ptr[offset] & (((m_b1tset_limb_ct)1) << index) ) != 0;
}
/* m_bitset_cget is the exact same service than m_bitset_get */
#define m_bitset_cget m_bitset_get
/* Pop back the last bit in the bitset */
M_INLINE void
m_bitset_pop_back(bool *dest, m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
M_ASSERT_INDEX (0, v->size);
// Remove one item from the bitset
v->size--;
// Prepare clearing popped bit
const size_t offset = v->size / M_B1TSET_LIMB_BIT;
const size_t index = v->size % M_B1TSET_LIMB_BIT;
const m_b1tset_limb_ct mask = ((m_b1tset_limb_ct)1)<<index;
if (dest) {
// Read popped bit
*dest = (v->ptr[offset] & mask) != 0;
}
v->ptr[offset] &= mask-1;
M_B1TSET_CONTRACT (v);
}
/* Return the front bit value in the bitset */
M_INLINE bool
m_bitset_front(m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
M_ASSERT_INDEX (0, v->size);
return m_bitset_get(v, 0);
}
/* Return the back bit value in the bitset */
M_INLINE bool
m_bitset_back(m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
M_ASSERT_INDEX (0, v->size);
return m_bitset_get(v, v->size-1);
}
/* Test if the bitset is empty (no bits stored)*/
M_INLINE bool
m_bitset_empty_p(m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
return v->size == 0;
}
/* Return the number of bits of the bitset */
M_INLINE size_t
m_bitset_size(m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
return v->size;
}
/* Return the capacity in limbs of the bitset */
M_INLINE size_t
m_bitset_capacity(m_bitset_t v)
{
M_B1TSET_CONTRACT (v);
return M_B1TSET_FROM_ALLOC (v->alloc);
}
/* Swap the bit at index i and j of the bitset */
M_INLINE void
m_bitset_swap_at (m_bitset_t v, size_t i, size_t j)
{
M_ASSERT_INDEX(i, v->size);
M_ASSERT_INDEX(j, v->size);
bool i_val = m_bitset_get(v, i);
bool j_val = m_bitset_get(v, j);
m_bitset_set_at (v, i, j_val);
m_bitset_set_at (v, j, i_val);
}
/* Swap the bitsets */
M_INLINE void
m_bitset_swap (m_bitset_t v1, m_bitset_t v2)
{
M_B1TSET_CONTRACT (v1);
M_B1TSET_CONTRACT (v2);
M_SWAP (size_t, v1->size, v2->size);
M_SWAP (size_t, v1->alloc, v2->alloc);
M_SWAP (m_b1tset_limb_ct *, v1->ptr, v2->ptr);
M_B1TSET_CONTRACT (v1);
M_B1TSET_CONTRACT (v2);
}
/* (INTERNAL) Left shift of the bitset (ptr+size) by 1 bit,
* integrating the carry in the lowest position.
* Return the new carry.
*/
M_INLINE m_b1tset_limb_ct
m_b1tset_lshift(m_b1tset_limb_ct ptr[], size_t n, m_b1tset_limb_ct carry)
{
for(size_t i = 0; i < n; i++) {
m_b1tset_limb_ct v = ptr[i];
ptr[i] = (v << 1) | carry;
carry = (v >> (M_B1TSET_LIMB_BIT-1) );
}
return carry;
}
/* (INTERNAL) Right shift of the bitset (ptr+size) by 1 bit,
* integrating the carry in the lowest position.
* Return the new carry.
*/
M_INLINE m_b1tset_limb_ct
m_b1tset_rshift(m_b1tset_limb_ct ptr[], size_t n, m_b1tset_limb_ct carry)
{
for(size_t i = n - 1; i < n; i--) {
m_b1tset_limb_ct v = ptr[i];
ptr[i] = (v >> 1) | (carry << (M_B1TSET_LIMB_BIT-1) );
carry = v & 1;
}
return carry;
}
/* Insert a new bit at position 'key' of value 'value' in the bitset 'set'
shifting the set accordingly */
M_INLINE void
m_bitset_push_at(m_bitset_t set, size_t key, bool value)
{
M_B1TSET_CONTRACT (set);
// First push another value to extend the array to the right size
m_bitset_push_back(set, false);
M_ASSERT (set->ptr != NULL);
M_ASSERT_INDEX(key, set->size);
// Then shift it
size_t offset = key / M_B1TSET_LIMB_BIT;
size_t index = key % M_B1TSET_LIMB_BIT;
m_b1tset_limb_ct v = set->ptr[offset];
m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1)<<index)-1;
m_b1tset_limb_ct carry = (v >> (M_B1TSET_LIMB_BIT-1) );
v = (v & mask) | ((unsigned int) value << index) | ((v & ~mask) << 1);
set->ptr[offset] = v;
size_t size = (set->size + M_B1TSET_LIMB_BIT - 1) / M_B1TSET_LIMB_BIT;
M_ASSERT (size >= offset + 1);
v = m_b1tset_lshift(&set->ptr[offset+1], size - offset - 1, carry);
// v is unused as it should be zero.
M_ASSERT(v == 0);
(void) v;
M_B1TSET_CONTRACT (set);
}
/* Pop a new bit at position 'key' in the bitset
* and return in *dest its value if *dest exists */
M_INLINE void
m_bitset_pop_at(bool *dest, m_bitset_t set, size_t key)
{
M_B1TSET_CONTRACT (set);
M_ASSERT (set->ptr != NULL);
M_ASSERT_INDEX(key, set->size);
if (dest) {
*dest = m_bitset_get (set, key);
}
// Shift it
size_t offset = key / M_B1TSET_LIMB_BIT;
size_t index = key % M_B1TSET_LIMB_BIT;
size_t size = (set->size + M_B1TSET_LIMB_BIT - 1) / M_B1TSET_LIMB_BIT;
m_b1tset_limb_ct v, mask, carry;
carry = m_b1tset_rshift(&set->ptr[offset+1], size - offset - 1, false);
v = set->ptr[offset];
mask = (((m_b1tset_limb_ct)1)<<index)-1;
v = (v & mask) | ((v>>1) & ~mask) | (carry << (M_B1TSET_LIMB_BIT-1)) ;
set->ptr[offset] = v;
// Decrease size
set->size --;
M_B1TSET_CONTRACT (set);
}
/* Test if two bitsets are equal */
M_INLINE bool
m_bitset_equal_p (const m_bitset_t set1, const m_bitset_t set2)
{
M_B1TSET_CONTRACT (set1);
M_B1TSET_CONTRACT (set2);
if (set1->size != set2->size)
return false;
/* We won't compare each bit individualy,
but instead compare them per limb */
const size_t limbSize = (set1->size + M_B1TSET_LIMB_BIT -1) / M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < limbSize;i++)
if (set1->ptr[i] != set2->ptr[i])
return false;
return true;
}
/* Initialize an iterator to the first bit of the biset */
M_INLINE void
m_bitset_it(m_bitset_it_t it, m_bitset_t set)
{
M_B1TSET_CONTRACT (set);
it->index = 0;
it->set = set;
}
/* Initialize an iterator to reference the same bit as the given one*/
M_INLINE void
m_bitset_it_set(m_bitset_it_t it, const m_bitset_it_t itorg)
{
M_ASSERT (it != NULL && itorg != NULL);
it->index = itorg->index;
it->set = itorg->set;
}
/* Initialize an iterator to reference the last bit of the bitset*/
M_INLINE void
m_bitset_it_last(m_bitset_it_t it, m_bitset_t set)
{
M_B1TSET_CONTRACT (set);
it->index = set->size-1;
it->set = set;
}
/* Initialize an iterator to reference no valid bit of the bitset*/
M_INLINE void
m_bitset_it_end(m_bitset_it_t it, m_bitset_t set)
{
M_B1TSET_CONTRACT (set);
it->index = set->size;
it->set = set;
}
/* Test if an iterator references no valid bit of the bitset anymore */
M_INLINE bool
m_bitset_end_p(const m_bitset_it_t it)
{
M_ASSERT (it != NULL && it->set != NULL);
return (it->index) >= (it->set->size);
}
/* Test if an iterator references the last (or end) bit of the bitset anymore */
M_INLINE bool
m_bitset_last_p(const m_bitset_it_t it)
{
M_ASSERT (it != NULL && it->set != NULL);
/* NOTE: Can not compute 'size-1' due to potential overflow
if size is 0 */
return (it->index+1) >= (it->set->size);
}
/* Test if both iterators reference the same bit */
M_INLINE bool
m_bitset_it_equal_p(const m_bitset_it_t it1, const m_bitset_it_t it2)
{
M_ASSERT (it1 != NULL && it2 != NULL);
return it1->index == it2->index && it1->set == it2->set;
}
/* Move the iterator to the next bit */
M_INLINE void
m_bitset_next(m_bitset_it_t it)
{
M_ASSERT (it != NULL && it->set != NULL);
it->index++;
}
/* Move the iterator to the previous bit */
M_INLINE void
m_bitset_previous(m_bitset_it_t it)
{
M_ASSERT (it != NULL && it->set != NULL);
it->index--;
}
// There is no _ref as it is not possible to modify the value using the IT interface
/* Return a pointer to the bit referenced by the iterator
* Only one reference is possible at a time per iterator */
M_INLINE const bool *
m_bitset_cref(m_bitset_it_t it)
{
M_ASSERT (it != NULL && it->set != NULL);
it->value = m_bitset_get(it->set, it->index);
return &it->value;
}
/* Output the bitset as a formatted text in a FILE */
M_INLINE void
m_bitset_out_str(FILE *file, const m_bitset_t set)
{
M_B1TSET_CONTRACT (set);
M_ASSERT(file != NULL);
fputc ('[', file);
for(size_t i = 0; i < set->size; i++) {
const bool b = m_bitset_get (set, i);
const char c = b ? '1' : '0';
fputc (c, file);
}
fputc (']', file);
}
/* Input the bitset from a formatted text in a FILE */
M_INLINE bool
m_bitset_in_str(m_bitset_t set, FILE *file)
{
M_B1TSET_CONTRACT (set);
M_ASSERT(file != NULL);
m_bitset_reset(set);
int c = fgetc(file);
if (M_UNLIKELY (c != '[')) return false;
c = fgetc(file);
while (c == '0' || c == '1') {
const bool b = (c == '1');
m_bitset_push_back (set, b);
c = fgetc(file);
}
M_B1TSET_CONTRACT (set);
return c == ']';
}
/* Parse the bitset from a formatted text in a C string */
M_INLINE bool
m_bitset_parse_str(m_bitset_t set, const char str[], const char **endptr)
{
M_B1TSET_CONTRACT (set);
M_ASSERT(str != NULL);
bool success = false;
m_bitset_reset(set);
char c = *str++;
if (M_UNLIKELY(c != '[')) goto exit;
c = *str++;
do {
if (M_UNLIKELY(c != '0' && c != '1')) goto exit;
const bool b = (c == '1');
m_bitset_push_back (set, b);
c = *str++;
} while (c != ']' && c != 0);
M_B1TSET_CONTRACT (set);
success = (c == ']');
exit:
if (endptr) *endptr = str;
return success;
}
/* Set the bitset from a formatted text in a C string */
M_INLINE bool
m_bitset_set_str(m_bitset_t dest, const char str[])
{
return m_bitset_parse_str(dest, str, NULL);
}
/* Perform an AND operation between the bitsets,
* up to the minimum size of both bitsets */
M_INLINE void
m_bitset_and(m_bitset_t dest, const m_bitset_t src)
{
M_B1TSET_CONTRACT(dest);
M_B1TSET_CONTRACT(src);
size_t s = M_MIN(dest->size, src->size);
size_t n = (s + M_B1TSET_LIMB_BIT -1) / M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < n; i++)
dest->ptr[i] &= src->ptr[i];
// Reduce the dest size to the minimum size between both
dest->size = s;
M_B1TSET_CONTRACT(dest);
}
/* Perform an OR operation between the bitsets,
* up to the minimum size of both bitsets */
M_INLINE void
m_bitset_or(m_bitset_t dest, const m_bitset_t src)
{
M_B1TSET_CONTRACT(dest);
M_B1TSET_CONTRACT(src);
size_t s = M_MIN(dest->size, src->size);
size_t n = (s + M_B1TSET_LIMB_BIT - 1) / M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < n; i++)
dest->ptr[i] |= src->ptr[i];
// Reduce the dest size to the minimum size between both
dest->size = s;
M_B1TSET_CONTRACT(dest);
}
/* Perform an XOR operation between the bitsets,
* up to the minimum size of both bitsets */
M_INLINE void
m_bitset_xor(m_bitset_t dest, const m_bitset_t src)
{
M_B1TSET_CONTRACT(dest);
M_B1TSET_CONTRACT(src);
size_t s = M_MIN(dest->size, src->size);
size_t n = s / M_B1TSET_LIMB_BIT;
size_t m = s % M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < n; i++)
dest->ptr[i] ^= src->ptr[i];
if (M_LIKELY(m)) {
// Last limb needs to be masked too
m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1) << m) - 1;
dest->ptr[n] = (dest->ptr[n] ^ src->ptr[n]) & mask;
}
// Reduce the dest size to the minimum size between both
dest->size = s;
M_B1TSET_CONTRACT(dest);
}
/* Perform a NOT operation of the bitset */
M_INLINE void
m_bitset_not(m_bitset_t dest)
{
M_B1TSET_CONTRACT(dest);
size_t s = dest->size;
size_t n = s / M_B1TSET_LIMB_BIT;
size_t m = s % M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < n; i++)
dest->ptr[i] = ~ (dest->ptr[i]);
if (M_LIKELY(m)) {
// Last limb needs to be masked too
m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1) << m) - 1;
dest->ptr[n] = (~ dest->ptr[n]) & mask;
}
M_B1TSET_CONTRACT(dest);
}
/* Copute a hash of the bitset */
M_INLINE size_t
m_bitset_hash(const m_bitset_t set)
{
M_B1TSET_CONTRACT(set);
size_t s = set->size;
size_t n = (s + M_B1TSET_LIMB_BIT-1) / M_B1TSET_LIMB_BIT;
M_HASH_DECL(hash);
for(size_t i = 0 ; i < n; i++)
M_HASH_UP(hash, set->ptr[i]);
return M_HASH_FINAL (hash);
}
/* Count the number of leading zero */
M_INLINE size_t
m_bitset_clz(const m_bitset_t set)
{
M_B1TSET_CONTRACT(set);
size_t s = set->size;
if (M_UNLIKELY (s == 0)) {
return 0;
}
size_t n = (s -1) / M_B1TSET_LIMB_BIT;
size_t m = s % M_B1TSET_LIMB_BIT;
m_b1tset_limb_ct limb = set->ptr[n];
if (m) {
m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1) << m) - 1;
limb &= mask;
} else {
m = M_B1TSET_LIMB_BIT;
}
s = 0;
while (limb == 0 && n > 0) {
s += m;
limb = set->ptr[--n];
m = M_B1TSET_LIMB_BIT;
}
s += m_core_clz64(limb) - (M_B1TSET_LIMB_BIT - m);
return s;
}
/* Count the number of trailing zero */
M_INLINE size_t
m_bitset_ctz(const m_bitset_t set)
{
M_B1TSET_CONTRACT(set);
size_t s = set->size;
if (M_UNLIKELY (s == 0)) {
return 0;
}
size_t i = 0, n = (s -1) / M_B1TSET_LIMB_BIT;
size_t m = s % M_B1TSET_LIMB_BIT;
m_b1tset_limb_ct limb = set->ptr[0];
s = 0;
while (limb == 0 && i < n) {
s += M_B1TSET_LIMB_BIT;
limb = set->ptr[++i];
}
if (i == n && m != 0) {
m_b1tset_limb_ct mask = (((m_b1tset_limb_ct)1) << m) - 1;
limb &= mask;
}
unsigned ctz = m_core_ctz64(limb);
s += (ctz == 64) ? m : ctz;
return s;
}
// For GCC or CLANG or ICC
#if defined(__GNUC__)
M_INLINE size_t m_b1tset_popcount64(m_b1tset_limb_ct limb)
{
return (size_t) __builtin_popcountll(limb);
}
#else
// MSVC __popcnt64 may not exist on the target architecture (no emulation layer)
// Use emulation layer: https://en.wikipedia.org/wiki/Hamming_weight
M_INLINE size_t m_b1tset_popcount64(m_b1tset_limb_ct limb)
{
limb = limb - ((limb >> 1) & 0x5555555555555555ULL);
limb = (limb & 0x3333333333333333ULL) + ((limb >> 2) & 0x3333333333333333ULL);
limb = (limb + (limb >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
return (limb * 0x0101010101010101ULL) >> 56;
}
#endif
/* Count the number of 1 */
M_INLINE size_t
m_bitset_popcount(const m_bitset_t set)
{
M_B1TSET_CONTRACT(set);
size_t s = 0;
size_t n = (set->size + M_B1TSET_LIMB_BIT - 1) / M_B1TSET_LIMB_BIT;
for(size_t i = 0 ; i < n; i++)
s += m_b1tset_popcount64(set->ptr[i]);
return s;
}
/* Oplist for a bitset */
#define M_BITSET_OPLIST \
(INIT(m_bitset_init) \
,INIT_SET(m_bitset_init_set) \
,INIT_WITH(API_1(M_INIT_VAI)) \
,SET(m_bitset_set) \
,CLEAR(m_bitset_clear) \
,INIT_MOVE(m_bitset_init_move) \
,MOVE(m_bitset_move) \
,SWAP(m_bitset_swap) \
,TYPE(m_bitset_t) \
,SUBTYPE(bool) \
,EMPTY_P(m_bitset_empty_p), \
,GET_SIZE(m_bitset_size) \
,IT_TYPE(m_bitset_it_t) \
,IT_FIRST(m_bitset_it) \
,IT_SET(m_bitset_it_set) \
,IT_LAST(m_bitset_it_last) \
,IT_END(m_bitset_it_end) \
,IT_END_P(m_bitset_end_p) \
,IT_LAST_P(m_bitset_last_p) \
,IT_EQUAL_P(m_bitset_it_equal_p) \
,IT_NEXT(m_bitset_next) \
,IT_PREVIOUS(m_bitset_previous) \
,IT_CREF(m_bitset_cref) \
,RESET(m_bitset_reset) \
,PUSH(m_bitset_push_back) \
,POP(m_bitset_pop_back) \
,HASH(m_bitset_hash) \
,GET_STR(m_bitset_get_str) \
,OUT_STR(m_bitset_out_str) \
,PARSE_STR(m_bitset_parse_str) \
,IN_STR(m_bitset_in_str) \
,EQUAL(m_bitset_equal_p) \
)
/* Register the OPLIST as a global one */
#define M_OPL_m_bitset_t() M_BITSET_OPLIST
// TODO: set_at2, insert_v, remove_v
#if M_USE_SMALL_NAME
#define bitset_s m_bitset_s
#define bitset_t m_bitset_t
#define bitset_ptr m_bitset_ptr
#define bitset_srcptr m_bitset_srcptr
#define bitset_it_s m_bitset_it_s
#define bitset_it_t m_bitset_it_t
#define bitset_init m_bitset_init
#define bitset_reset m_bitset_reset
#define bitset_clear m_bitset_clear
#define bitset_set m_bitset_set
#define bitset_init_set m_bitset_init_set
#define bitset_init_move m_bitset_init_move
#define bitset_move m_bitset_move
#define bitset_set_at m_bitset_set_at
#define bitset_flip_at m_bitset_flip_at
#define bitset_push_back m_bitset_push_back
#define bitset_resize m_bitset_resize
#define bitset_reserve m_bitset_reserve
#define bitset_get m_bitset_get
#define bitset_pop_back m_bitset_pop_back
#define bitset_front m_bitset_front
#define bitset_back m_bitset_back
#define bitset_empty_p m_bitset_empty_p
#define bitset_size m_bitset_size
#define bitset_capacity m_bitset_capacity
#define bitset_swap_at m_bitset_swap_at
#define bitset_swap m_bitset_swap
#define bitset_push_at m_bitset_push_at
#define bitset_pop_at m_bitset_pop_at
#define bitset_equal_p m_bitset_equal_p
#define bitset_it m_bitset_it
#define bitset_it_set m_bitset_it_set
#define bitset_it_last m_bitset_it_last
#define bitset_it_end m_bitset_it_end
#define bitset_end_p m_bitset_end_p
#define bitset_last_p m_bitset_last_p
#define bitset_it_equal_p m_bitset_it_equal_p
#define bitset_next m_bitset_next
#define bitset_previous m_bitset_previous
#define bitset_cref m_bitset_cref
#define bitset_out_str m_bitset_out_str
#define bitset_in_str m_bitset_in_str
#define bitset_parse_str m_bitset_parse_str
#define bitset_set_str m_bitset_set_str
#define bitset_and m_bitset_and
#define bitset_or m_bitset_or
#define bitset_xor m_bitset_xor
#define bitset_not m_bitset_not
#define bitset_hash m_bitset_hash
#define bitset_clz m_bitset_clz
#define bitset_ctz m_bitset_ctz
#define bitset_popcount m_bitset_popcount
#define bitset_get_str m_bitset_get_str
#define BITSET_OPLIST M_BITSET_OPLIST
#define M_OPL_bitset_t M_OPL_m_bitset_t
#endif
M_END_PROTECTED_CODE
#endif
// NOTE: Define this function only if m-string has been included
#if !defined(MSTARLIB_BITSET_STRING_H) && defined(MSTARLIB_STRING_H)
#define MSTARLIB_BITSET_STRING_H
M_BEGIN_PROTECTED_CODE
/* Output to a m_string_t 'str' the formatted text representation of the bitset 'set'
or append it to the strinf (append=true) */
M_INLINE void
m_bitset_get_str(m_string_t str, const m_bitset_t set, bool append)
{
M_B1TSET_CONTRACT (set);
M_ASSERT(str != NULL);
(append ? m_string_cat_cstr : m_string_set_cstr) (str, "[");
for(size_t i = 0; i < set->size; i++) {
const bool b = m_bitset_get (set, i);
const char c = b ? '1' : '0';
m_string_push_back (str, c);
}
m_string_push_back (str, ']');
}
M_END_PROTECTED_CODE
#endif

1499
components/mlib/m-bptree.h Normal file

File diff suppressed because it is too large Load Diff

1350
components/mlib/m-buffer.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,835 @@
/*
* M*LIB - Concurrent memory pool allocator
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_CONCURRENT_MEMPOOL_H
#define MSTARLIB_CONCURRENT_MEMPOOL_H
#include "m-core.h"
#include "m-atomic.h"
#include "m-genint.h"
M_BEGIN_PROTECTED_CODE
/* Minimum number of nodes per group of nodes */
#define M_CMEMP00L_MIN_NODE_PER_GROUP 16
#define M_C_MEMPOOL_DEF(name, type_t) \
M_BEGIN_PROTECTED_CODE \
M_CMEMP00L_DEF_SINGLY_LIST(name, type_t) \
M_CMEMP00L_DEF_LF_QUEUE(name, type_t) \
M_CMEMP00L_DEF_LFMP_THREAD_MEMPOOL(name, type_t) \
M_CMEMP00L_DEF_SYSTEM_ALLOC(name, type_t) \
M_CMEMP00L_DEF_LF_MEMPOOL(name, type_t) \
M_END_PROTECTED_CODE
/* Classic internal Singly List without allocation */
#define M_CMEMP00L_DEF_SINGLY_LIST(name, type_t) \
\
typedef struct M_F(name, _slist_node_s) { \
struct M_F(name, _slist_node_s) *next; \
type_t data; \
} M_F(name, _slist_node_ct); \
\
typedef struct M_F(name, _slist_node_s) *M_F(name, _slist_ct)[1]; \
\
M_INLINE void \
M_F(name, _slist_init)(M_F(name, _slist_ct) list) \
{ \
*list = NULL; \
} \
\
M_INLINE void \
M_F(name, _slist_push)(M_F(name, _slist_ct) list, \
M_F(name, _slist_node_ct) *node) \
{ \
node->next = *list; \
*list = node; \
} \
\
M_INLINE M_F(name, _slist_node_ct) * \
M_F(name, _slist_pop)(M_F(name, _slist_ct) list) \
{ \
M_ASSERT (*list != NULL); \
M_F(name, _slist_node_ct) *node = *list; \
*list = node->next; \
M_IF_DEBUG(node->next = NULL;) \
return node; \
} \
\
M_INLINE bool \
M_F(name, _slist_empty_p)(M_F(name, _slist_ct) list) \
{ \
return *list == NULL; \
} \
\
M_INLINE void \
M_F(name, _slist_move)(M_F(name, _slist_ct) list, \
M_F(name, _slist_ct) src) \
{ \
*list = *src; \
*src = NULL; \
} \
\
M_INLINE void \
M_F(name, _slist_clear)(M_F(name, _slist_ct) list) \
{ \
M_F(name, _slist_node_ct) *it = *list, *next; \
while (it) { \
next = it->next; \
M_MEMORY_DEL(it); \
it = next; \
} \
*list = NULL; \
} \
/* Lock Free free queue list (not generic one) of lists without allocation
Based on Michael & Scott Lock Free Queue List algorithm.
Each list is considered empty if there is only one node within.
This LF Queue List doesn't try to prevent the ABA problem. It is up to the
caller to avoid recycling the nodes too fast.
Each list has its own unique NIL ptr in order to avoid issues when
migrating a node from a Q to another: in the following scenario,
- Thread 1 performs a PUSH of N in Q1 with Q1 empty (only node is NA)
NA.next is NIL.
- Thread 1 is interrupted just before the CAS on NA.next
- Thread 2 performs a sucessfull push of NB in Q1. NA.next is set to NB.
- Thread 2 performs a sucessfull pop of NA in Q1
- Thread 2 performs a sucessfull push of NA in Q2. NA.next is set to NIL.
- Thread 1 is restored and will succeed as NA.next is once again NIL.
In order to prevent the last CAS to succeed, each queue uses its own NIL pointer.
It is a derived problem of the ABA problem.
*/
/* TODO: Optimize alignement to reduce memory consumption. NIL object can use []
to reduce memory consumption too (non compatible with C++ ...) */
#define M_CMEMP00L_DEF_LF_QUEUE(name, type_t) \
\
typedef struct M_F(name, _lf_node_s) { \
M_ATTR_EXTENSION _Atomic(struct M_F(name, _lf_node_s) *) next; \
m_gc_atomic_ticket_ct cpt; \
M_F(name, _slist_ct) list; \
} M_F(name, _lf_node_t); \
\
typedef struct M_F(name, _lflist_s) { \
M_ATTR_EXTENSION _Atomic(M_F(name, _lf_node_t) *) head; \
char align1[M_ALIGN_FOR_CACHELINE_EXCLUSION]; \
M_ATTR_EXTENSION _Atomic(M_F(name, _lf_node_t) *) tail; \
char align2[M_ALIGN_FOR_CACHELINE_EXCLUSION]; \
M_F(name, _lf_node_t) nil; \
} M_F(name, _lflist_ct)[1]; \
\
M_INLINE void \
M_F(name, _lflist_init)(M_F(name, _lflist_ct) list, \
M_F(name, _lf_node_t) *node) \
{ \
atomic_init(&list->head, node); \
atomic_init(&list->tail, node); \
atomic_store_explicit(&node->next, &list->nil, memory_order_relaxed); \
} \
\
M_INLINE bool \
M_F(name, _lflist_empty_p)(M_F(name, _lflist_ct) list) \
{ \
return atomic_load(&list->tail) == atomic_load(&list->head); \
} \
\
M_INLINE void \
M_F(name, _lflist_push)(M_F(name, _lflist_ct) list, \
M_F(name, _lf_node_t) *node, m_core_backoff_ct bkoff) \
{ \
M_F(name, _lf_node_t) *tail; \
M_F(name, _lf_node_t) *next; \
\
atomic_store_explicit(&node->next, &list->nil, memory_order_relaxed); \
m_core_backoff_reset(bkoff); \
while (true) { \
tail = atomic_load(&list->tail); \
next = atomic_load_explicit(&tail->next, memory_order_acquire); \
if (M_UNLIKELY(next != &list->nil)) { \
/* Tail was not pointing to the last node \
Try to swing Tail to the next node */ \
atomic_compare_exchange_weak_explicit(&list->tail, \
&tail, next, \
memory_order_release, \
memory_order_relaxed); \
} else { \
/* Try to link node at the end of the linked list */ \
if (atomic_compare_exchange_strong_explicit(&tail->next, \
&next, node, \
memory_order_release, \
memory_order_relaxed)) \
break; \
m_core_backoff_wait(bkoff); \
} \
} \
/* Enqueue is done. Try to swing Tail to the inserted node \
If it fails, someone else will do it or has already did it. */ \
atomic_compare_exchange_strong_explicit(&list->tail, &tail, node, \
memory_order_acq_rel, \
memory_order_relaxed); \
} \
\
M_INLINE M_F(name, _lf_node_t) * \
M_F(name, _lflist_pop)(M_F(name, _lflist_ct) list, m_core_backoff_ct bkoff) \
{ \
M_F(name, _lf_node_t) *head; \
M_F(name, _lf_node_t) *tail; \
M_F(name, _lf_node_t) *next; \
\
/* Reinitialize backoff */ \
m_core_backoff_reset(bkoff); \
while (true) { \
head = atomic_load(&list->head); \
tail = atomic_load(&list->tail); \
next = atomic_load(&head->next); \
/* Are head, tail, and next consistent?*/ \
if (M_LIKELY(head == \
atomic_load_explicit(&list->head, memory_order_relaxed))) \
{ \
/* Is queue empty or Tail falling behind? */ \
if (head == tail) { \
/* Is queue empty? */ \
if (next == &list->nil) \
return NULL; \
/* Tail is falling behind. Try to advance it */ \
atomic_compare_exchange_strong_explicit(&list->tail, &tail, \
next, \
memory_order_release, \
memory_order_relaxed); \
} else { \
/* Try to swing Head to the next node */ \
if (atomic_compare_exchange_strong_explicit(&list->head, \
&head, next, \
memory_order_release, \
memory_order_relaxed)) { \
break; \
} \
/* Failure: perform a random exponential backoff */ \
m_core_backoff_wait(bkoff); \
} \
} \
} \
/* dequeue returns an element that becomes the new dummy element (the new head), \
and the former dummy element (the former head) is removed: \
Since we want a link of free list, and we don't care about the content itsef, \
provided that the node we return is older than the one we should return, \
Therefore, we return the previous dummy head. \
As such, it is not the original MSqueue algorithm. */ \
M_IF_DEBUG(atomic_store(&head->next, (M_F(name, _lf_node_t) *) 0);) \
return head; \
} \
\
/* Dequeue a node if the node is old enough */ \
M_INLINE M_F(name, _lf_node_t) * \
M_F(name, _lflist_pop_if)(M_F(name, _lflist_ct) list, \
m_gc_ticket_ct age, m_core_backoff_ct bkoff) \
{ \
M_F(name, _lf_node_t) *head; \
M_F(name, _lf_node_t) *tail; \
M_F(name, _lf_node_t) *next; \
\
m_core_backoff_reset(bkoff); \
while (true) { \
head = atomic_load(&list->head); \
tail = atomic_load(&list->tail); \
next = atomic_load(&head->next); \
if (M_LIKELY(head == atomic_load_explicit(&list->head, memory_order_relaxed))) \
{ \
if (head == tail) { \
if (next == &list->nil) \
return NULL; \
atomic_compare_exchange_strong_explicit(&list->tail, &tail, next, \
memory_order_release, \
memory_order_relaxed); \
} else { \
/* Test if the node is old enought to be popped */ \
if (atomic_load_explicit(&next->cpt, memory_order_relaxed) >= age) \
return NULL; \
/* Try to swing Head to the next node */ \
if (atomic_compare_exchange_strong_explicit(&list->head, \
&head, next, \
memory_order_release, \
memory_order_relaxed)) { \
break; \
} \
m_core_backoff_wait(bkoff); \
} \
} \
} \
M_IF_DEBUG(atomic_store(&head->next, (M_F(name, _lf_node_t) *) 0);) \
return head; \
} \
\
M_INLINE void \
M_F(name, _lflist_clear)(M_F(name, _lflist_ct) list) \
{ \
m_core_backoff_ct bkoff; \
m_core_backoff_init(bkoff); \
while (true) { \
M_F(name, _lf_node_t) *node = M_F(name, _lflist_pop)(list, bkoff); \
if (node == NULL) break; \
M_F(name, _lf_node_t) *next = atomic_load_explicit(&node->next, \
memory_order_relaxed); \
M_F(name, _slist_clear)(node->list); \
M_MEMORY_DEL(node); \
node = next; \
} \
/* Dummy node to free too */ \
M_F(name, _lf_node_t) *dummy; \
dummy = atomic_load_explicit(&list->head, memory_order_relaxed); \
M_F(name, _slist_clear)(dummy->list); \
M_MEMORY_DEL(dummy); \
} \
/* System node allocator: request memory to the system.
As such it is a non Lock-Free path. */
#define M_CMEMP00L_DEF_SYSTEM_ALLOC(name, type_t) \
\
M_INLINE M_F(name, _lf_node_t) * \
M_F(name, _alloc_node)(unsigned int initial) \
{ \
M_F(name, _lf_node_t) * node; \
node = M_MEMORY_ALLOC(M_F(name, _lf_node_t)); \
if (M_UNLIKELY_NOMEM (node == NULL)) { \
M_MEMORY_FULL(sizeof(M_F(name, _lf_node_t))); \
return NULL; \
} \
atomic_init(&node->next, (M_F(name, _lf_node_t) *) 0); \
atomic_init(&node->cpt, 0UL); \
M_F(name, _slist_init)(node->list); \
for(unsigned i = 0; i < initial; i++) { \
M_F(name, _slist_node_ct) *n; \
n = M_MEMORY_ALLOC(M_F(name, _slist_node_ct)); \
if (M_UNLIKELY_NOMEM (n == NULL)) { \
M_MEMORY_FULL(sizeof(M_F(name, _lf_node_t))); \
return NULL; \
} \
M_F(name, _slist_push)(node->list, n); \
} \
return node; \
} \
/* Concurrent Memory pool
The data structure is the following.
Each thread has its own pool of nodes (local) that only it can
access (it is a singly list). If there is no longer any node in this
pool, it requests a new pool to the lock free queue of pool (group of
nodes). If it fails, it requests a new pool to the system allocator
(and from there it is no longer lock free).
This memory pool can only be lock free if the initial state is
sufficiently dimensionned to avoid calling the system allocator during
the normal processing.
Then each thread pushs its deleted node into another pool of nodes,
where the node is logically deleted (no contain of the node is destroyed
at this point and the node can be freely accessed by other threads).
Once the thread mempool is put to sleep, the age of the pool of logical
deleted nodes is computed and this pool is move to the Lock Free Queue
List of pools to be reclaimed. Then A Garbage Collector is performed
on this Lock Free Queue list to reclaim all pools thare are sufficiently
aged (taking into account the grace period of the pool) to be moved back
to the Lock Free Queue of the free pools.
Each pool of nodes can be in the following state:
* FREE state if it is present in the Lock Free Queue of free pools.
* EMPTY state if it is present in the Lock Free Queue of empty pools
which means that the nodes present in it has been USED directly by a thread,
* TO_BE_RECLAIMED state if it is present in the Lock Free Queue of TBR pools
A pool of nodes will go to the following state:
FREE --> EMPTY --> TO_BE_RECLAIMED
^ |
+----------------------+
The ABA problem is taken into account as a node cannot be reused in the
same queue without performing a full cycle of its state. Moreover
it can only move from TO_BE_RECLAIMED to FREE if and only if a grace
period is finished (and then we are sure that no thread references any
older node).
Each thread has its own backoff structure (with local pseudo-random
generator).
The grace period is detected through a global age counter (ticket)
that is incremented each time a thread is awaken / sleep.
Each thread has its own age that is set to the global ticket on sleep/awaken.
The age of the pool to be reclaimed is also set to this global age counter.
To ensure that the grace period is finished, it tests if all threads
are younger than the age of the pool to be reclaimed.
From a performance point of view, this puts a bottleneck on the global
age counter that is shared and incremented by all threads. However,
the sleep/awaken operations are much less frequent than other operations.
Thus, it shall not have a huge impact on the performance if the user
code is intelligent with the sleep/awaken operations.
As such it won't support more than ULONG_MAX sleep for all threads.
*/
#define M_CMEMP00L_DEF_LFMP_THREAD_MEMPOOL(name, type_t) \
\
typedef struct M_F(name, _lfmp_thread_s) { \
M_F(name, _slist_ct) free; \
M_F(name, _slist_ct) to_be_reclaimed; \
M_CACHELINE_ALIGN(align1, M_F(name, _slist_ct), M_F(name, _slist_ct)); \
} M_F(name, _lfmp_thread_ct); \
\
M_INLINE void \
M_F(name, _lfmp_thread_init)(M_F(name, _lfmp_thread_ct) *t) \
{ \
M_F(name, _slist_init)(t->free); \
M_F(name, _slist_init)(t->to_be_reclaimed); \
} \
\
M_INLINE void \
M_F(name, _lfmp_thread_clear)(M_F(name, _lfmp_thread_ct) *t) \
{ \
M_ASSERT(M_F(name, _slist_empty_p)(t->to_be_reclaimed)); \
M_F(name, _slist_clear)(t->free); \
M_F(name, _slist_clear)(t->to_be_reclaimed); \
} \
/* NOTE: once a node is deleted, its data are kept readable until the future GC */
#define M_CMEMP00L_DEF_LF_MEMPOOL(name, type_t) \
\
typedef struct M_F(name, _s) { \
unsigned initial; \
M_F(name, _lfmp_thread_ct) *thread_data; \
M_F(name, _lflist_ct) free; \
M_F(name, _lflist_ct) to_be_reclaimed; \
M_F(name, _lflist_ct) empty; \
m_cmemp00l_list_ct mempool_node; \
struct m_gc_s *gc_mem; \
} M_F(name, _t)[1]; \
\
/* Garbage collect of the nodes of the mempool on sleep */ \
M_INLINE void \
M_C3(m_cmemp00l_,name,_gc_on_sleep)(m_gc_t gc_mem, m_cmemp00l_list_ct *data, \
m_gc_tid_t id, m_gc_ticket_ct ticket, m_gc_ticket_ct min_ticket) \
{ \
/* Get back the mempool from the node */ \
struct M_F(name, _s) *mempool = \
M_TYPE_FROM_FIELD(struct M_F(name, _s), data, m_cmemp00l_list_ct, mempool_node); \
\
/* Move the local nodes of the mempool to be reclaimed to the thread into the global pool */ \
if (!M_F(name, _slist_empty_p)(mempool->thread_data[id].to_be_reclaimed)) { \
M_F(name, _lf_node_t) *node; \
/* Get a new empty group of nodes */ \
node = M_F(name, _lflist_pop)(mempool->empty, gc_mem->thread_data[id].bkoff); \
if (M_UNLIKELY (node == NULL)) { \
/* Fail to get an empty group of node. \
Alloc a new one from the system */ \
node = M_F(name, _alloc_node)(0); \
M_ASSERT(node != NULL); \
} \
M_ASSERT(M_F(name, _slist_empty_p)(node->list)); \
M_F(name, _slist_move)(node->list, mempool->thread_data[id].to_be_reclaimed); \
atomic_store_explicit(&node->cpt, ticket, memory_order_relaxed); \
M_F(name, _lflist_push)(mempool->to_be_reclaimed, node, gc_mem->thread_data[id].bkoff); \
} \
\
/* Perform a GC of the freelist of nodes */ \
while (true) { \
M_F(name, _lf_node_t) *node; \
node = M_F(name, _lflist_pop_if)(mempool->to_be_reclaimed, \
min_ticket, gc_mem->thread_data[id].bkoff); \
if (node == NULL) break; \
M_F(name, _lflist_push)(mempool->free, node, gc_mem->thread_data[id].bkoff); \
} \
} \
\
M_INLINE void \
M_F(name, _init)(M_F(name, _t) mem, m_gc_t gc_mem, \
unsigned init_node_count, unsigned init_group_count) \
{ \
const size_t max_thread = gc_mem->max_thread; \
/* Initialize the thread data of the mempool */ \
mem->thread_data = M_MEMORY_REALLOC(M_F(name, _lfmp_thread_ct), NULL, max_thread); \
if (M_UNLIKELY_NOMEM (mem->thread_data == NULL)) { \
M_MEMORY_FULL(max_thread * sizeof(M_F(name, _lfmp_thread_ct))); \
return; \
} \
for(unsigned i = 0; i < max_thread;i++) { \
M_F(name, _lfmp_thread_init)(&mem->thread_data[i]); \
} \
/* Preallocate some group of nodes for the mempool */ \
mem->initial = M_MAX(M_CMEMP00L_MIN_NODE_PER_GROUP, init_node_count); \
M_F(name, _lflist_init)(mem->free, M_F(name, _alloc_node)(init_node_count)); \
M_F(name, _lflist_init)(mem->to_be_reclaimed, M_F(name, _alloc_node)(init_node_count)); \
M_F(name, _lflist_init)(mem->empty, M_F(name, _alloc_node)(0)); \
for(unsigned i = 1; i < init_group_count; i++) { \
M_F(name, _lflist_push)(mem->free, M_F(name, _alloc_node)(init_node_count), \
gc_mem->thread_data[0].bkoff); \
M_F(name, _lflist_push)(mem->empty, M_F(name, _alloc_node)(0), \
gc_mem->thread_data[0].bkoff); \
} \
/* Register the mempool in the GC */ \
mem->mempool_node.gc_on_sleep = M_C3(m_cmemp00l_,name,_gc_on_sleep); \
mem->mempool_node.next = gc_mem->mempool_list; \
gc_mem->mempool_list = &mem->mempool_node; \
mem->gc_mem = gc_mem; \
} \
\
M_INLINE void \
M_F(name, _clear)(M_F(name, _t) mem) \
{ \
const unsigned max_thread = mem->gc_mem->max_thread; \
for(unsigned i = 0; i < max_thread;i++) { \
M_F(name, _lfmp_thread_clear)(&mem->thread_data[i]); \
} \
M_MEMORY_FREE(mem->thread_data); \
mem->thread_data = NULL; \
M_F(name, _lflist_clear)(mem->empty); \
M_F(name, _lflist_clear)(mem->free); \
M_ASSERT(M_F(name, _lflist_empty_p)(mem->to_be_reclaimed)); \
M_F(name, _lflist_clear)(mem->to_be_reclaimed); \
/* TODO: Unregister from the GC? */ \
} \
\
M_INLINE type_t * \
M_F(name, _new)(M_F(name, _t) mem, m_gc_tid_t id) \
{ \
M_F(name, _slist_node_ct) *snode; \
M_F(name, _lf_node_t) *node; \
while (true) { \
/* Fast & likely path where we access the thread pool of nodes */ \
if (M_LIKELY(!M_F(name, _slist_empty_p)(mem->thread_data[id].free))) { \
snode = M_F(name, _slist_pop)(mem->thread_data[id].free); \
return &snode->data; \
} \
/* Request a group node to the freelist of groups */ \
node = M_F(name, _lflist_pop)(mem->free, mem->gc_mem->thread_data[id].bkoff); \
if (M_UNLIKELY (node == NULL)) { \
/* Request a new group to the system. Non Lock Free path */ \
M_ASSERT(mem->initial > 0); \
node = M_F(name, _alloc_node)(mem->initial); \
M_ASSERT(node != NULL); \
M_ASSERT(!M_F(name, _slist_empty_p)(node->list)); \
} \
M_F(name, _slist_move)(mem->thread_data[id].free, node->list); \
/* Push back the empty group */ \
M_ASSERT (M_F(name, _slist_empty_p)(node->list)); \
M_F(name, _lflist_push)(mem->empty, node, mem->gc_mem->thread_data[id].bkoff); \
} \
} \
\
M_INLINE void \
M_F(name, _del)(M_F(name, _t) mem, type_t *d, m_gc_tid_t id) \
{ \
M_F(name, _slist_node_ct) *snode; \
M_ASSERT( d != NULL); \
snode = M_TYPE_FROM_FIELD(M_F(name, _slist_node_ct), d, type_t, data); \
M_F(name, _slist_push)(mem->thread_data[id].to_be_reclaimed, snode); \
} \
/***********************************************************************/
/* Define the ID of a thread */
typedef unsigned int m_gc_tid_t;
/* Define the age of a node */
/* TODO: Compute if sufficient (worst cast ULONG_MAX is 32 bits) */
typedef unsigned long m_gc_ticket_ct;
typedef atomic_ulong m_gc_atomic_ticket_ct;
/* Define the Linked List of mempools that are registered in the GC */
struct m_gc_s;
typedef struct m_cmemp00l_list_s {
struct m_cmemp00l_list_s *next;
void (*gc_on_sleep)(struct m_gc_s *gc_mem,
struct m_cmemp00l_list_s *data, m_gc_tid_t id,
m_gc_ticket_ct ticket, m_gc_ticket_ct min_ticket);
void *data;
} m_cmemp00l_list_ct;
/* Define the Garbage collector thread data */
typedef struct m_gc_lfmp_thread_s {
m_gc_atomic_ticket_ct ticket;
m_core_backoff_ct bkoff;
M_CACHELINE_ALIGN(align1, atomic_ulong, m_core_backoff_ct);
} m_gc_lfmp_thread_ct;
/* Define the Garbage collector coordinator */
typedef struct m_gc_s {
m_gc_atomic_ticket_ct ticket;
m_gc_tid_t max_thread;
m_genint_t thread_alloc;
m_gc_lfmp_thread_ct *thread_data;
m_cmemp00l_list_ct *mempool_list;
} m_gc_t[1];
M_INLINE void
m_gc_init(m_gc_t gc_mem, size_t max_thread)
{
M_ASSERT(gc_mem != NULL);
M_ASSERT(max_thread > 0 && max_thread < INT_MAX);
atomic_init(&gc_mem->ticket, 0UL);
m_genint_init(gc_mem->thread_alloc, (unsigned int) max_thread);
gc_mem->thread_data = M_MEMORY_REALLOC(m_gc_lfmp_thread_ct, NULL, max_thread);
if (M_UNLIKELY_NOMEM (gc_mem->thread_data == NULL)) {
M_MEMORY_FULL(max_thread * sizeof(m_gc_lfmp_thread_ct));
return;
}
for(unsigned i = 0; i < max_thread;i++) {
atomic_init(&gc_mem->thread_data[i].ticket, ULONG_MAX);
m_core_backoff_init(gc_mem->thread_data[i].bkoff);
}
gc_mem->max_thread = (unsigned int) max_thread;
gc_mem->mempool_list = NULL;
}
M_INLINE void
m_gc_clear(m_gc_t gc_mem)
{
M_ASSERT(gc_mem != NULL && gc_mem->max_thread > 0);
for(m_gc_tid_t i = 0; i < gc_mem->max_thread;i++) {
m_core_backoff_clear(gc_mem->thread_data[i].bkoff);
}
M_MEMORY_FREE(gc_mem->thread_data);
gc_mem->thread_data = NULL;
m_genint_clear(gc_mem->thread_alloc);
}
M_INLINE m_gc_tid_t
m_gc_attach_thread(m_gc_t gc_mem)
{
M_ASSERT(gc_mem != NULL && gc_mem->max_thread > 0);
unsigned id = m_genint_pop(gc_mem->thread_alloc);
return M_ASSIGN_CAST(m_gc_tid_t, id);
}
M_INLINE void
m_gc_detach_thread(m_gc_t gc_mem, m_gc_tid_t id)
{
M_ASSERT(gc_mem != NULL && gc_mem->max_thread > 0);
M_ASSERT(id < gc_mem->max_thread);
M_ASSERT(atomic_load(&gc_mem->thread_data[id].ticket) == ULONG_MAX);
m_genint_push(gc_mem->thread_alloc, id);
}
M_INLINE void
m_gc_awake(m_gc_t gc_mem, m_gc_tid_t id)
{
M_ASSERT(gc_mem != NULL && gc_mem->max_thread > 0);
M_ASSERT(id < gc_mem->max_thread);
M_ASSERT(atomic_load(&gc_mem->thread_data[id].ticket) == ULONG_MAX);
m_gc_ticket_ct t = atomic_fetch_add(&gc_mem->ticket, 1UL) + 1;
atomic_store(&gc_mem->thread_data[id].ticket, t);
}
M_INLINE m_gc_ticket_ct
m_cmemp00l_gc_min_ticket(m_gc_t gc_mem)
{
m_gc_ticket_ct min = atomic_load(&gc_mem->thread_data[0].ticket);
for(m_gc_tid_t i = 1; i < gc_mem->max_thread; i++) {
m_gc_ticket_ct t = atomic_load(&gc_mem->thread_data[i].ticket);
min = M_MIN(t, min);
}
return min;
}
M_INLINE void
m_gc_sleep(m_gc_t gc_mem, m_gc_tid_t id)
{
/* Increase life time of the thread */
m_gc_ticket_ct t = atomic_fetch_add(&gc_mem->ticket, 1UL);
atomic_store(&gc_mem->thread_data[id].ticket, t+1);
const m_gc_ticket_ct min_ticket = m_cmemp00l_gc_min_ticket(gc_mem);
/* Iterate over all registered mempools */
m_cmemp00l_list_ct *it = gc_mem->mempool_list;
while (it) {
/* Perform a garbage collect of the mempool */
it->gc_on_sleep(gc_mem, it, id, t, min_ticket);
/* Next mempool to scan for GC */
it = it->next;
}
/* Sleep the thread */
atomic_store(&gc_mem->thread_data[id].ticket, ULONG_MAX);
}
/***********************************************************************/
/* */
/* Variable Length Array MEMPOOL */
/* */
/***********************************************************************/
M_CMEMP00L_DEF_SINGLY_LIST(m_vlapool, char)
M_CMEMP00L_DEF_LF_QUEUE(m_vlapool, char)
M_CMEMP00L_DEF_SYSTEM_ALLOC(m_vlapool, char)
typedef struct m_vlapool_lfmp_thread_s {
m_vlapool_slist_ct to_be_reclaimed;
M_CACHELINE_ALIGN(align1, m_vlapool_slist_ct);
} m_vlapool_lfmp_thread_ct;
M_INLINE void
m_vlapool_lfmp_thread_init(m_vlapool_lfmp_thread_ct *t)
{
m_vlapool_slist_init(t->to_be_reclaimed);
}
M_INLINE void
m_vlapool_lfmp_thread_clear(m_vlapool_lfmp_thread_ct *t)
{
M_ASSERT(m_vlapool_slist_empty_p(t->to_be_reclaimed));
m_vlapool_slist_clear(t->to_be_reclaimed);
}
typedef struct m_vlapool_s {
m_vlapool_lflist_ct to_be_reclaimed;
m_vlapool_lflist_ct empty;
m_vlapool_lfmp_thread_ct *thread_data;
m_cmemp00l_list_ct mvla_node;
struct m_gc_s *gc_mem;
} m_vlapool_t[1];
/* Garbage collect of the nodes of the vla mempool on sleep */
M_INLINE void
m_cmemp00l_vlapool_on_sleep(m_gc_t gc_mem, m_cmemp00l_list_ct *data,
m_gc_tid_t id, m_gc_ticket_ct ticket, m_gc_ticket_ct min_ticket)
{
/* Get back the mempool from the node */
struct m_vlapool_s *vlapool =
M_TYPE_FROM_FIELD(struct m_vlapool_s, data, m_cmemp00l_list_ct, mvla_node);
/* Move the local nodes of the vlapool to be reclaimed to the thread into the global pool */
if (!m_vlapool_slist_empty_p(vlapool->thread_data[id].to_be_reclaimed)) {
m_vlapool_lf_node_t *node;
/* Get a new empty group of nodes */
node = m_vlapool_lflist_pop(vlapool->empty, gc_mem->thread_data[id].bkoff);
if (M_UNLIKELY (node == NULL)) {
/* Fail to get an empty group of node.
Alloc a new one from the system */
node = m_vlapool_alloc_node(0);
M_ASSERT(node != NULL);
}
M_ASSERT(m_vlapool_slist_empty_p(node->list));
m_vlapool_slist_move(node->list, vlapool->thread_data[id].to_be_reclaimed);
atomic_store_explicit(&node->cpt, ticket, memory_order_relaxed);
m_vlapool_lflist_push(vlapool->to_be_reclaimed, node, gc_mem->thread_data[id].bkoff);
}
/* Perform a GC of the freelist of nodes */
while (true) {
m_vlapool_lf_node_t *node;
node = m_vlapool_lflist_pop_if(vlapool->to_be_reclaimed,
min_ticket, gc_mem->thread_data[id].bkoff);
if (node == NULL) break;
// No reuse of VLA nodes. Free physically the node back to the system
m_vlapool_slist_clear(node->list);
// Add back the empty group of nodes
m_vlapool_slist_init(node->list);
m_vlapool_lflist_push(vlapool->empty, node, gc_mem->thread_data[id].bkoff);
}
}
M_INLINE void
m_vlapool_init(m_vlapool_t mem, m_gc_t gc_mem)
{
const size_t max_thread = gc_mem->max_thread;
/* Initialize the thread data of the vlapool */
mem->thread_data = M_MEMORY_REALLOC(m_vlapool_lfmp_thread_ct, NULL, max_thread);
if (M_UNLIKELY_NOMEM (mem->thread_data == NULL)) {
M_MEMORY_FULL(max_thread * sizeof(m_vlapool_lfmp_thread_ct));
return;
}
for(unsigned i = 0; i < max_thread;i++) {
m_vlapool_lfmp_thread_init(&mem->thread_data[i]);
}
/* Initialize the lists */
m_vlapool_lflist_init(mem->to_be_reclaimed, m_vlapool_alloc_node(0));
m_vlapool_lflist_init(mem->empty, m_vlapool_alloc_node(0));
/* Register the mempool in the GC */
mem->mvla_node.gc_on_sleep = m_cmemp00l_vlapool_on_sleep;
mem->mvla_node.next = gc_mem->mempool_list;
gc_mem->mempool_list = &mem->mvla_node;
mem->gc_mem = gc_mem;
}
M_INLINE void
m_vlapool_clear(m_vlapool_t mem)
{
const unsigned max_thread = mem->gc_mem->max_thread;
for(unsigned i = 0; i < max_thread;i++) {
m_vlapool_lfmp_thread_clear(&mem->thread_data[i]);
}
M_MEMORY_FREE(mem->thread_data);
mem->thread_data = NULL;
m_vlapool_lflist_clear(mem->empty);
M_ASSERT(m_vlapool_lflist_empty_p(mem->to_be_reclaimed));
m_vlapool_lflist_clear(mem->to_be_reclaimed);
/* TODO: Unregister from the GC? */
}
M_INLINE void *
m_vlapool_new(m_vlapool_t mem, m_gc_tid_t id, size_t size)
{
M_ASSERT(mem != NULL && mem->gc_mem != NULL);
M_ASSERT(id < mem->gc_mem->max_thread);
M_ASSERT( atomic_load(&mem->gc_mem->thread_data[id].ticket) != ULONG_MAX);
// Nothing to do with theses parameters yet
(void) mem;
(void) id;
// Ensure the size is big enough to also represent a node
size += offsetof(struct m_vlapool_slist_node_s, data);
// Simply wrap around a system call to get the memory
char *ptr = M_MEMORY_REALLOC(char, NULL, size);
return (ptr == NULL) ? NULL : M_ASSIGN_CAST(void *, ptr + offsetof(struct m_vlapool_slist_node_s, data));
}
M_INLINE void
m_vlapool_del(m_vlapool_t mem, void *d, m_gc_tid_t id)
{
M_ASSERT(mem != NULL && mem->gc_mem != NULL);
M_ASSERT(id < mem->gc_mem->max_thread);
M_ASSERT(atomic_load(&mem->gc_mem->thread_data[id].ticket) != ULONG_MAX);
M_ASSERT(d != NULL);
// Get back the pointer to a struct m_vlapool_slist_node_s.
d = M_ASSIGN_CAST(void *, M_ASSIGN_CAST(char *, d) - offsetof(struct m_vlapool_slist_node_s, data));
m_vlapool_slist_node_ct *snode = M_ASSIGN_CAST(m_vlapool_slist_node_ct *, d);
// Push the logicaly free memory into the list of the nodes to be reclaimed.
m_vlapool_slist_push(mem->thread_data[id].to_be_reclaimed, snode);
}
M_END_PROTECTED_CODE
#if M_USE_SMALL_NAME
#define C_MEMPOOL_DEF M_C_MEMPOOL_DEF
#endif
#endif

View File

@ -0,0 +1,925 @@
/*
* M*LIB - Basic Protected Concurrent module over container.
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_CONCURRENT_H
#define MSTARLIB_CONCURRENT_H
#include "m-core.h"
#include "m-thread.h"
#include "m-atomic.h"
/* Define a protected concurrent container and its associated functions
based on the given container.
USAGE: CONCURRENT_DEF(name, type [, oplist_of_the_type]) */
#define M_CONCURRENT_DEF(name, ...) \
M_CONCURRENT_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a protected concurrent container and its associated functions
based on the given container as the given name name_t
USAGE: CONCURRENT_DEF_AS(name, name_t, type [, oplist_of_the_type]) */
#define M_CONCURRENT_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_C0NCURRENT_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__, name_t ))) \
M_END_PROTECTED_CODE
/* Define a protected concurrent container and its associated functions
based on its given container. Operations that perform only read of the container
can be done in parallel.
USAGE: CONCURRENT_RP_DEF(name, type [, oplist_of_the_type]) */
#define M_CONCURRENT_RP_DEF(name, ...) \
M_CONCURRENT_RP_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a protected concurrent container and its associated functions
as the given name name_t
based on its given container. Operations that perform only read of the container
can be done in parallel.
USAGE: CONCURRENT_RP_DEF_AS(name, name_t, type [, oplist_of_the_type]) */
#define M_CONCURRENT_RP_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_C0NCURRENT_RP_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__, name_t ))) \
M_END_PROTECTED_CODE
/* Define the oplist of a protected concurrent container given its name and its oplist.
USAGE: CONCURRENT_OPLIST(name[, oplist of the type]) */
#define M_CONCURRENT_OPLIST(...) \
M_C0NCURRENT_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_BASIC_OPLIST), \
(__VA_ARGS__ )))
/*****************************************************************************/
/******************************** INTERNAL ***********************************/
/*****************************************************************************/
/* Deferred evaluation for the oplist definition,
so that all arguments are evaluated before further expansion */
#define M_C0NCURRENT_OPLIST_P1(arg) M_C0NCURRENT_OPLIST_P2 arg
/* Validation of the given oplist */
#define M_C0NCURRENT_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_C0NCURRENT_OPLIST_P3, M_C0NCURRENT_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_C0NCURRENT_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_CONCURRENT_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
/* OPLIST definition
GET_KEY is not present as its interface is not compatible with a concurrent
container (_get returns a pointer to an internal data, data that may be
destroyed by another thread).
*/
#define M_C0NCURRENT_OPLIST_P3(name, oplist) \
(M_IF_METHOD(INIT, oplist)(INIT(M_F(name, _init)),) \
,M_IF_METHOD(INIT_SET, oplist)(INIT_SET(M_F(name, _init_set)),) \
,M_IF_METHOD(SET, oplist)(SET(M_F(name, _set)),) \
,M_IF_METHOD(CLEAR, oplist)(CLEAR(M_F(name, _clear)),) \
,M_IF_METHOD(INIT_MOVE, oplist)(INIT_MOVE(M_F(name, _init_move)),) \
,M_IF_METHOD(MOVE, oplist)(MOVE(M_F(name, _move)),) \
,M_IF_METHOD(SWAP,oplist)(SWAP(M_F(name, _swap)),) \
,NAME(name) \
,TYPE(M_F(name,_ct)) \
,SUBTYPE(M_F(name, _subtype_ct)) \
,OPLIST(oplist) \
,M_IF_METHOD(EMPTY_P, oplist)(EMPTY_P(M_F(name,_empty_p)),) \
,M_IF_METHOD(GET_SIZE, oplist)(GET_SIZE(M_F(name,_size)),) \
,M_IF_METHOD(RESET, oplist)(RESET(M_F(name,_reset)),) \
,M_IF_METHOD(KEY_TYPE, oplist)(KEY_TYPE(M_GET_KEY_TYPE oplist),) \
,M_IF_METHOD(VALUE_TYPE, oplist)(VALUE_TYPE(M_GET_VALUE_TYPE oplist),) \
,M_IF_METHOD(KEY_TYPE, oplist)(KEY_OPLIST(M_GET_KEY_OPLIST oplist),) \
,M_IF_METHOD(VALUE_TYPE, oplist)(VALUE_OPLIST(M_GET_VALUE_OPLIST oplist), ) \
,M_IF_METHOD(SET_KEY, oplist)(SET_KEY(M_F(name, _set_at)),) \
,M_IF_METHOD(ERASE_KEY, oplist)(ERASE_KEY(M_F(name, _erase)),) \
,M_IF_METHOD(PUSH, oplist)(PUSH(M_F(name,_push)),) \
,M_IF_METHOD(POP, oplist)(POP(M_F(name,_pop)),) \
,M_IF_METHOD(PUSH_MOVE, oplist)(PUSH_MOVE(M_F(name,_push_move)),) \
,M_IF_METHOD(POP_MOVE, oplist)(POP_MOVE(M_F(name,_pop_move)),) \
,M_IF_METHOD(GET_STR, oplist)(GET_STR(M_F(name, _get_str)),) \
,M_IF_METHOD(PARSE_STR, oplist)(PARSE_STR(M_F(name, _parse_str)),) \
,M_IF_METHOD(OUT_STR, oplist)(OUT_STR(M_F(name, _out_str)),) \
,M_IF_METHOD(IN_STR, oplist)(IN_STR(M_F(name, _in_str)),) \
,M_IF_METHOD(OUT_SERIAL, oplist)(OUT_SERIAL(M_F(name, _out_serial)),) \
,M_IF_METHOD(IN_SERIAL, oplist)(IN_SERIAL(M_F(name, _in_serial)),) \
,M_IF_METHOD(EQUAL, oplist)(EQUAL(M_F(name, _equal_p)),) \
,M_IF_METHOD(HASH, oplist)(HASH(M_F(name, _hash)),) \
)
/******************************** INTERNAL ***********************************/
/* Internal contract
NOTE: Can't check too much without locking the container itself
*/
#define M_C0NCURRENT_CONTRACT(c) do { \
M_ASSERT ((c) != NULL); \
M_ASSERT ((c)->self == (c)); \
} while (0)
/* Deferred evaluation for the concurrent definition,
so that all arguments are evaluated before further expansion */
#define M_C0NCURRENT_DEF_P1(arg) M_ID( M_C0NCURRENT_DEF_P2 arg )
/* Validate the value oplist before going further */
#define M_C0NCURRENT_DEF_P2(name, type, oplist, concurrent_t) \
M_IF_OPLIST(oplist)(M_C0NCURRENT_DEF_P3, M_C0NCURRENT_DEF_FAILURE)(name, type, oplist, concurrent_t)
/* Stop processing with a compilation failure */
#define M_C0NCURRENT_DEF_FAILURE(name, type, oplist, concurrent_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(CONCURRENT_DEF): the given argument is not a valid oplist: " M_AS_STR(oplist))
/* Internal concurrent definition
- name: prefix to be used
- type: type of the sub container
- oplist: oplist of the type of the sub container
- concurrent_t: alias for M_F(name, _t) [ type of the container ]
*/
#define M_C0NCURRENT_DEF_P3(name, type, oplist, concurrent_t) \
M_C0NCURRENT_DEF_TYPE(name, type, oplist, concurrent_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_C0NCURRENT_DEF_CORE(name, type, oplist, concurrent_t) \
M_C0NCURRENT_DEF_COMMON(name, type, oplist, concurrent_t)
/* Define the type of a concurrent container */
#define M_C0NCURRENT_DEF_TYPE(name, type, oplist, concurrent_t) \
\
/* Define a concurrent container using a lock */ \
typedef struct M_F(name, _s) { \
struct M_F(name, _s) *self; \
m_mutex_t lock; \
m_cond_t there_is_data; /* condition raised when there is data */ \
type data; \
} concurrent_t[1]; \
\
/* Define alias for pointer types */ \
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
/* Internal types for oplist */ \
typedef concurrent_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
\
/* Cannot define iterator as it cannot be reliable in a concurrent type */ \
/* Define the internal services used for the lock strategy */
#define M_C0NCURRENT_DEF_CORE(name, type, oplist, concurrent_t) \
\
/* Initial the fields of the concurrent object not associated to the \
sub-container. */ \
M_INLINE void \
M_F(name, _internal_init)(concurrent_t out) \
{ \
m_mutex_init(out->lock); \
m_cond_init(out->there_is_data); \
out->self = out; \
M_C0NCURRENT_CONTRACT(out); \
} \
\
/* Clear the fields of the concurrent object not associated to the \
sub-container. */ \
M_INLINE void \
M_F(name, _internal_clear)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_clear(out->lock); \
m_cond_clear(out->there_is_data); \
out->self = NULL; \
} \
\
/* Get the read lock. Multiple threads can get it, but only for reading. \
write lock is exclusive. \
NOTE: This instance doesn't implement the read/write strategy, \
and only get the lock */ \
M_INLINE void \
M_F(name, _read_lock)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->self->lock); \
} \
\
/* Free the read lock. See above. \
NOTE: This instance doesn't implement the read/write strategy, \
and only get the lock */ \
M_INLINE void \
M_F(name, _read_unlock)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_unlock (out->self->lock); \
} \
\
/* Wait for a thread pushing some data in the container. \
CONSTRAINT: the read lock shall be get before calling this service */ \
M_INLINE void \
M_F(name, _read_wait)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_cond_wait(out->self->there_is_data, out->self->lock); \
} \
\
/* Get the write lock. Only one threads can get it, and no other threads \
can get the read lock too. \
NOTE: This instance doesn't implement the read/write strategy, \
and only get the lock */ \
M_INLINE void \
M_F(name, _write_lock)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->lock); \
} \
\
/* Free the write lock. \
NOTE: This instance doesn't implement the read/write strategy, \
and only get the lock */ \
M_INLINE void \
M_F(name, _write_unlock)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_unlock (out->lock); \
} \
\
/* Wait for a thread pushing some data in the container. \
CONSTRAINT: the write lock shall be get before calling this service */ \
M_INLINE void \
M_F(name, _write_wait)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_cond_wait(out->self->there_is_data, out->self->lock); \
} \
\
/* Wait to all threads that some data are available in the container. \
CONSTRAINT: the write lock shall be get before calling this service */ \
M_INLINE void \
M_F(name, _write_signal)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
/* We need to signal this to ALL waiting threads as multiple threads \
may wait on a some data of this container. */ \
m_cond_broadcast(out->there_is_data); \
} \
/* Internal definition of the functions commons to concurrent and rp-concurrent
- name: prefix to be used
- type: type of the sub container
- oplist: oplist of the type of the sub container
- concurrent_t: alias for M_F(name, _t) [ type of the container ]
A function is defined only if the underlying container exports the needed
services. It is usually one service declared per service exported.
*/
#define M_C0NCURRENT_DEF_COMMON(name, type, oplist, concurrent_t) \
\
M_IF_METHOD(INIT, oplist)( \
M_INLINE void \
M_F(name, _init)(concurrent_t out) \
{ \
M_F(name, _internal_init)(out); \
M_CALL_INIT(oplist, out->data); \
M_C0NCURRENT_CONTRACT(out); \
} \
,) \
\
M_IF_METHOD(INIT_SET, oplist)( \
M_INLINE void \
M_F(name, _init_set)(concurrent_t out, concurrent_t const src) \
{ \
M_C0NCURRENT_CONTRACT(src); \
M_ASSERT (out != src); \
M_F(name, _internal_init)(out); \
M_F(name, _read_lock)(src); \
M_CALL_INIT_SET(oplist, out->data, src->data); \
M_F(name, _read_unlock)(src); \
M_C0NCURRENT_CONTRACT(out); \
} \
,) \
\
M_IF_METHOD(SET, oplist)( \
M_INLINE void \
M_F(name, _set)(concurrent_t out, concurrent_t const src) \
{ \
M_C0NCURRENT_CONTRACT(out); \
if (M_UNLIKELY (out == src)) return; \
/* Need to order the locks in a total way to avoid lock deadlock. \
Indeed, two call to _set can be done in two threads with : \
T1: A := B \
T2: B := A \
If we lock first the mutex of out, then the src, it could be possible \
in the previous scenario that both mutexs are locked: T1 has locked A \
and T2 has locked B, and T1 is waiting for locking B, and T2 is waiting \
for locking A, resulting in a deadlock. \
To avoid this problem, we **always** lock the mutex which address is \
the lowest. */ \
if (out < src) { \
M_F(name, _write_lock)(out); \
M_F(name, _read_lock)(src); \
} else { \
M_F(name, _read_lock)(src); \
M_F(name, _write_lock)(out); \
} \
M_CALL_SET(oplist, out->data, src->data); \
if (out < src) { \
M_F(name, _read_lock)(src); \
M_F(name, _write_unlock)(out); \
} else { \
M_F(name, _write_unlock)(out); \
M_F(name, _read_unlock)(src); \
} \
M_C0NCURRENT_CONTRACT(out); \
} \
,) \
\
M_IF_METHOD(CLEAR, oplist)( \
M_INLINE void \
M_F(name, _clear)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
/* No need to lock. A clear is supposed to be called when all operations \
of the container in other threads are terminated */ \
M_CALL_CLEAR(oplist, out->data); \
M_F(name, _internal_clear)(out); \
} \
,) \
\
M_IF_METHOD(INIT_MOVE, oplist)( \
M_INLINE void \
M_F(name, _init_move)(concurrent_t out, concurrent_t src) \
{ \
M_C0NCURRENT_CONTRACT(src); \
M_ASSERT (out != src); \
/* No need to lock 'src' ? */ \
M_F(name, _internal_init)(out); \
M_CALL_INIT_MOVE(oplist, out->data, src->data); \
M_F(name, _internal_clear)(src); \
M_C0NCURRENT_CONTRACT(out); \
} \
,) \
\
M_IF_METHOD(MOVE, oplist)( \
M_INLINE void \
M_F(name, _move)(concurrent_t out, concurrent_t src) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_C0NCURRENT_CONTRACT(src); \
/* No need to lock 'src' ? */ \
M_F(name, _write_lock)(out); \
M_CALL_MOVE(oplist, out->data, src->data); \
M_F(name, _write_unlock)(out); \
M_F(name, _internal_clear)(src); \
M_C0NCURRENT_CONTRACT(out); \
} \
,) \
\
M_IF_METHOD(SWAP, oplist)( \
M_INLINE void \
M_F(name, _swap)(concurrent_t out, concurrent_t src) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_C0NCURRENT_CONTRACT(src); \
if (M_UNLIKELY (out == src)) return; \
/* See comment above */ \
if (out < src) { \
M_F(name, _write_lock)(out); \
M_F(name, _write_lock)(src); \
} else { \
M_F(name, _write_lock)(src); \
M_F(name, _write_lock)(out); \
} \
M_CALL_SWAP(oplist, out->data, src->data); \
if (out < src) { \
M_F(name, _write_unlock)(src); \
M_F(name, _write_unlock)(out); \
} else { \
M_F(name, _write_unlock)(out); \
M_F(name, _write_unlock)(src); \
} \
} \
,) \
\
M_IF_METHOD(RESET, oplist)( \
M_INLINE void \
M_F(name, _reset)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_RESET(oplist, out->data); \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(EMPTY_P, oplist)( \
M_INLINE bool \
M_F(name, _empty_p)(concurrent_t const out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
bool b = M_CALL_EMPTY_P(oplist, out->data); \
M_F(name, _read_unlock)(out); \
return b; \
} \
,) \
\
M_IF_METHOD(GET_SIZE, oplist)( \
M_INLINE size_t \
M_F(name, _size)(concurrent_t const out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
size_t r = M_CALL_GET_SIZE(oplist, out->data); \
M_F(name, _read_unlock)(out); \
return r; \
} \
,) \
\
M_IF_METHOD(SET_KEY, oplist)( \
M_INLINE void \
M_F(name, _set_at)(concurrent_t out, M_GET_KEY_TYPE oplist const key, M_GET_VALUE_TYPE oplist const data) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_SET_KEY(oplist, out->data, key, data); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(GET_KEY, oplist)( \
M_INLINE bool \
M_F(name, _get_copy)(M_GET_VALUE_TYPE oplist *out_data, const concurrent_t out, M_GET_KEY_TYPE oplist const key) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_ASSERT (out_data != NULL); \
M_F(name, _read_lock)(out); \
M_GET_VALUE_TYPE oplist *p = M_CALL_GET_KEY(oplist, out->data, key); \
if (p != NULL) { \
M_CALL_SET(M_GET_VALUE_OPLIST oplist, *out_data, *p); \
} \
M_F(name, _read_unlock)(out); \
return p != NULL; \
} \
,) \
\
M_IF_METHOD(SAFE_GET_KEY, oplist)( \
M_INLINE void \
M_F(name, _safe_get_copy)(M_GET_VALUE_TYPE oplist *out_data, concurrent_t out, M_GET_KEY_TYPE oplist const key) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_ASSERT (out_data != NULL); \
M_F(name, _write_lock)(out); \
M_GET_VALUE_TYPE oplist *p = M_CALL_SAFE_GET_KEY(oplist, out->data, key); \
M_ASSERT (p != NULL); \
M_CALL_SET(M_GET_VALUE_OPLIST oplist, *out_data, *p); \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(ERASE_KEY, oplist)( \
M_INLINE bool \
M_F(name, _erase)(concurrent_t out, M_GET_KEY_TYPE oplist const key) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
bool b = M_CALL_ERASE_KEY(oplist, out->data, key); \
/* We suppose that the container has 'infinite' capacity, so \
we won't signal that a free space has been created */ \
M_F(name, _write_unlock)(out); \
return b; \
} \
,) \
\
M_IF_METHOD(PUSH, oplist)( \
M_INLINE void \
M_F(name, _push)(concurrent_t out, M_GET_SUBTYPE oplist const data) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_PUSH(oplist, out->data, data); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
} \
\
M_EMPLACE_QUEUE_DEF(name, concurrent_t, M_F(name, _emplace), M_GET_OPLIST oplist, M_EMPLACE_QUEUE_GENE) \
,) \
\
M_IF_METHOD(POP, oplist)( \
M_INLINE void \
M_F(name, _pop)(M_GET_SUBTYPE oplist *p, concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_POP(oplist, p, out->data); \
/* See comment above */ \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(PUSH_MOVE, oplist)( \
M_INLINE void \
M_F(name, _push_move)(concurrent_t out, M_GET_SUBTYPE oplist *data) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_PUSH_MOVE(oplist, out->data, data); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(POP_MOVE, oplist)( \
M_INLINE void \
M_F(name, _pop_move)(M_GET_SUBTYPE oplist *p, concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
M_CALL_POP_MOVE(oplist, p, out->data); \
/* See comment above */ \
M_F(name, _write_unlock)(out); \
} \
,) \
\
M_IF_METHOD(GET_STR, oplist)( \
M_INLINE void \
M_F(name, _get_str)(m_string_t str, concurrent_t const out, bool a) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
M_CALL_GET_STR(oplist, str, out->data, a); \
M_F(name, _read_unlock)(out); \
} \
,) \
\
M_IF_METHOD(OUT_STR, oplist)( \
M_INLINE void \
M_F(name, _out_str)(FILE *f, concurrent_t const out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
M_CALL_OUT_STR(oplist, f, out->data); \
M_F(name, _read_unlock)(out); \
} \
,) \
\
M_IF_METHOD(PARSE_STR, oplist)( \
M_INLINE bool \
M_F(name, _parse_str)(concurrent_t out, const char str[], const char **e) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
bool b = M_CALL_PARSE_STR(oplist, out->data, str, e); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
return b; \
} \
,) \
\
M_IF_METHOD(IN_STR, oplist)( \
M_INLINE bool \
M_F(name, _in_str)(concurrent_t out, FILE *f) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
bool b = M_CALL_IN_STR(oplist, out->data, f); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
return b; \
} \
,) \
\
M_IF_METHOD(OUT_SERIAL, oplist)( \
M_INLINE m_serial_return_code_t \
M_F(name, _out_serial)(m_serial_write_t f, concurrent_t const out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
m_serial_return_code_t r = M_CALL_OUT_SERIAL(oplist, f, out->data); \
M_F(name, _read_unlock)(out); \
return r; \
} \
,) \
\
M_IF_METHOD(IN_SERIAL, oplist)( \
M_INLINE m_serial_return_code_t \
M_F(name, _in_serial)(concurrent_t out, m_serial_read_t f) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _write_lock)(out); \
m_serial_return_code_t r = M_CALL_IN_SERIAL(oplist, out->data, f); \
M_F(name, _write_signal)(out); \
M_F(name, _write_unlock)(out); \
return r; \
} \
,) \
\
M_IF_METHOD(EQUAL, oplist)( \
M_INLINE bool \
M_F(name, _equal_p)(concurrent_t const out1, concurrent_t const out2) \
{ \
M_C0NCURRENT_CONTRACT(out1); \
M_C0NCURRENT_CONTRACT(out2); \
if (M_UNLIKELY (out1 == out2)) return true; \
/* See comment above on mutal mutexs */ \
if (out1 < out2) { \
M_F(name, _read_lock)(out1); \
M_F(name, _read_lock)(out2); \
} else { \
M_F(name, _read_lock)(out2); \
M_F(name, _read_lock)(out1); \
} \
bool b = M_CALL_EQUAL(oplist, out1->data, out2->data); \
if (out1 < out2) { \
M_F(name, _read_unlock)(out2); \
M_F(name, _read_unlock)(out1); \
} else { \
M_F(name, _read_unlock)(out1); \
M_F(name, _read_unlock)(out2); \
} \
return b; \
} \
,) \
\
M_IF_METHOD(GET_KEY, oplist)( \
M_INLINE bool \
M_F(name, _get_blocking)(M_GET_VALUE_TYPE oplist *out_data, const concurrent_t out, M_GET_KEY_TYPE oplist const key, bool blocking) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_ASSERT (out_data != NULL); \
bool ret = false; \
M_F(name, _read_lock)(out); \
while (true) { \
M_GET_VALUE_TYPE oplist *p = M_CALL_GET_KEY(oplist, out->data, key); \
if (p != NULL) { \
M_CALL_SET(M_GET_VALUE_OPLIST oplist, *out_data, *p); \
ret = true; \
break; \
} \
if (blocking == false) break; \
/* No data: wait for a write to signal some data */ \
M_F(name, _read_wait)(out); \
} \
M_F(name, _read_unlock)(out); \
return ret; \
} \
,) \
\
M_IF_METHOD2(POP, EMPTY_P, oplist)( \
M_INLINE bool \
M_F(name, _pop_blocking)(M_GET_SUBTYPE oplist *p, concurrent_t out, bool blocking) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_ASSERT (p != NULL); \
bool ret = false; \
M_F(name, _write_lock)(out); \
while (true) { \
if (!M_CALL_EMPTY_P(oplist, out->data)) { \
M_CALL_POP(oplist, p, out->data); \
ret = true; \
break; \
} \
if (blocking == false) break; \
/* No data: wait for a write to signal some data */ \
M_F(name, _write_wait)(out); \
} \
M_F(name, _write_unlock)(out); \
return ret; \
} \
,) \
\
M_IF_METHOD2(POP_MOVE, EMPTY_P, oplist)( \
M_INLINE bool \
M_F(name, _pop_move_blocking)(M_GET_SUBTYPE oplist *p, concurrent_t out, bool blocking) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_ASSERT (p != NULL); \
bool ret = false; \
M_F(name, _write_lock)(out); \
while (true) { \
if (!M_CALL_EMPTY_P(oplist, out->data)) { \
M_CALL_POP_MOVE(oplist, p, out->data); \
ret = true; \
break; \
} \
if (blocking == false) break; \
/* No data: wait for a write to signal some data */ \
M_F(name, _write_wait)(out); \
} \
M_F(name, _write_unlock)(out); \
return ret; \
} \
,) \
\
M_IF_METHOD(HASH, oplist)( \
M_INLINE size_t \
M_F(name, _hash)(concurrent_t const out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
M_F(name, _read_lock)(out); \
size_t h = M_CALL_HASH(oplist, out->data); \
M_F(name, _read_unlock)(out); \
/* The hash is unchanged by the concurrent container */ \
return h; \
} \
,) \
/******************************** INTERNAL ***********************************/
/* Deferred evaluation for the RP concurrent definition,
so that all arguments are evaluated before further expansion */
#define M_C0NCURRENT_RP_DEF_P1(arg) M_ID( M_C0NCURRENT_RP_DEF_P2 arg )
/* Validate the value oplist before going further */
#define M_C0NCURRENT_RP_DEF_P2(name, type, oplist, concurrent_t) \
M_IF_OPLIST(oplist)(M_C0NCURRENT_RP_DEF_P3, M_C0NCURRENT_RP_DEF_FAILURE)(name, type, oplist, concurrent_t)
/* Stop processing with a compilation failure */
#define M_C0NCURRENT_RP_DEF_FAILURE(name, type, oplist, concurrent_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(CONCURRENT_RP_DEF): the given argument is not a valid oplist: " M_AS_STR(oplist))
/* Internal RP concurrent definition
- name: prefix to be used
- type: type of the sub container
- oplist: oplist of the type of the sub container
- concurrent_t: alias for M_F(name, _t) [ type of the container ]
*/
#define M_C0NCURRENT_RP_DEF_P3(name, type, oplist, concurrent_t) \
M_C0NCURRENT_RP_DEF_TYPE(name, type, oplist, concurrent_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_C0NCURRENT_RP_DEF_CORE(name, type, oplist, concurrent_t) \
M_C0NCURRENT_DEF_COMMON(name, type, oplist, concurrent_t)
/* Define the type of a RP concurrent container */
#define M_C0NCURRENT_RP_DEF_TYPE(name, type, oplist, concurrent_t) \
\
typedef struct M_F(name, _s) { \
struct M_F(name, _s) *self; \
m_mutex_t lock; \
m_cond_t rw_done; \
size_t read_count; \
bool writer_waiting; \
m_cond_t there_is_data; /* condition raised when there is data */ \
type data; \
} concurrent_t[1]; \
\
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
typedef type M_F(name, _subtype_ct); \
/* Define the internal services for the lock strategy of a RP container */
#define M_C0NCURRENT_RP_DEF_CORE(name, type, oplist, concurrent_t) \
\
M_INLINE void \
M_F(name, _internal_init)(concurrent_t out) \
{ \
m_mutex_init(out->lock); \
m_cond_init(out->rw_done); \
m_cond_init(out->there_is_data); \
out->self = out; \
out->read_count = 0; \
out->writer_waiting = false; \
M_C0NCURRENT_CONTRACT(out); \
} \
\
M_INLINE void \
M_F(name, _internal_clear)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_clear(out->lock); \
m_cond_clear(out->rw_done); \
m_cond_clear(out->there_is_data); \
out->self = NULL; \
} \
\
M_INLINE void \
M_F(name, _read_lock)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
struct M_F(name, _s) *self = out->self; \
m_mutex_lock (self->lock); \
while (self->writer_waiting == true) { \
m_cond_wait(self->rw_done, self->lock); \
} \
self->read_count ++; \
m_mutex_unlock (self->lock); \
} \
\
M_INLINE void \
M_F(name, _read_unlock)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
struct M_F(name, _s) *self = out->self; \
m_mutex_lock (self->lock); \
self->read_count --; \
if (self->read_count == 0) { \
m_cond_broadcast (self->rw_done); \
} \
m_mutex_unlock (self->lock); \
} \
\
M_INLINE void \
M_F(name, _write_lock)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->lock); \
while (out->writer_waiting == true) { \
m_cond_wait(out->rw_done, out->lock); \
} \
out->writer_waiting = true; \
while (out->read_count > 0) { \
m_cond_wait(out->rw_done, out->lock); \
} \
m_mutex_unlock (out->lock); \
} \
\
M_INLINE void \
M_F(name, _write_unlock)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->lock); \
out->writer_waiting = false; \
m_cond_broadcast (out->rw_done); \
m_mutex_unlock (out->lock); \
} \
\
M_INLINE void \
M_F(name, _read_wait)(const concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
struct M_F(name, _s) *self = out->self; \
M_ASSERT (self == out); \
m_mutex_lock (out->self->lock); \
self->read_count --; \
if (self->read_count == 0) { \
m_cond_broadcast (self->rw_done); \
} \
m_cond_wait(self->there_is_data, self->lock); \
while (self->writer_waiting == true) { \
m_cond_wait(self->rw_done, self->lock); \
} \
self->read_count ++; \
m_mutex_unlock (out->self->lock); \
} \
\
M_INLINE void \
M_F(name, _write_wait)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->lock); \
out->writer_waiting = false; \
m_cond_broadcast (out->rw_done); \
m_cond_wait(out->there_is_data, out->lock); \
while (out->writer_waiting == true) { \
m_cond_wait(out->rw_done, out->lock); \
} \
out->writer_waiting = true; \
while (out->read_count > 0) { \
m_cond_wait(out->rw_done, out->lock); \
} \
m_mutex_unlock (out->lock); \
} \
\
M_INLINE void \
M_F(name, _write_signal)(concurrent_t out) \
{ \
M_C0NCURRENT_CONTRACT(out); \
m_mutex_lock (out->lock); \
m_cond_broadcast(out->there_is_data); \
m_mutex_unlock (out->lock); \
} \
/******************************** INTERNAL ***********************************/
#if M_USE_SMALL_NAME
#define CONCURRENT_DEF M_CONCURRENT_DEF
#define CONCURRENT_DEF_AS M_CONCURRENT_DEF_AS
#define CONCURRENT_RP_DEF M_CONCURRENT_RP_DEF
#define CONCURRENT_RP_DEF_AS M_CONCURRENT_RP_DEF_AS
#define CONCURRENT_OPLIST M_CONCURRENT_OPLIST
#endif
#endif

5297
components/mlib/m-core.h Normal file

File diff suppressed because it is too large Load Diff

1147
components/mlib/m-deque.h Normal file

File diff suppressed because it is too large Load Diff

1988
components/mlib/m-dict.h Normal file

File diff suppressed because it is too large Load Diff

415
components/mlib/m-funcobj.h Normal file
View File

@ -0,0 +1,415 @@
/*
* M*LIB - Function Object module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_FUNCOBJ_H
#define MSTARLIB_FUNCOBJ_H
#include "m-core.h"
/* Define a function object interface of name 'name'
* with a function like retcode, type of param1, type of param 2, ...
* USAGE:
* FUNC_OBJ_ITF_DEF(name, retcode type, type of param1, type of param 2, ...)
*/
#define M_FUNC_OBJ_ITF_DEF(name, ...) \
M_FUNC_OBJ_ITF_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a function object interface of name 'name'
* as the given name name_t
* USAGE:
* FUNC_OBJ_ITF_DEF_AS(name, name_t, retcode type, type of param1, type of param 2, ...)
*/
#define M_FUNC_OBJ_ITF_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_IF_NARGS_EQ1(__VA_ARGS__)(M_FUNC0BJ_ITF_NO_PARAM_DEF, M_FUNC0BJ_ITF_PARAM_DEF)(name, name_t, __VA_ARGS__) \
M_END_PROTECTED_CODE
/* Define a function object instance of name 'name' based on the interface 'base_name'
* The function is defined using:
* - the prototype of the inherited interface
* - the parameters of the function are named as per the list param_list
* - the core of the function given in 'callback_core'
* - optionals member attributes of the function object can be defined after the core
* (just like for tuple & variant: (name, type [, oplist])
*
* In the core of the function, parameters are accessible just like a normal function.
* A special variable named 'self' that refers to the function object itself
* can be used to access member attributes using the syntax self->param1, ...
*
* There shall be **exactly** the same number of parameters in 'param_list' than
* the number of parameters of the interface 'base_name'
*
* USAGE/EXAMPLE:
* FUNC_OBJ_INS_DEF(name, base_name, (param1, ...), { return param1 * self->member1 }, (member1, int), ...)
*/
#define M_FUNC_OBJ_INS_DEF(name, base_name, param_list, ...) \
M_FUNC_OBJ_INS_DEF_AS(name, M_F(name,_t), base_name, param_list, __VA_ARGS__)
/* Define a function object instance of name 'name' based on the interface 'base_name'
* as the given name name_t.
* See FUNC_OBJ_INS_DEF for additional details.
*
* USAGE/EXAMPLE:
* FUNC_OBJ_INS_DEF_AS(name, name_t, base_name, (param1, ...), { return param1 * self->member1 }, (member1, int), ...)
*/
#define M_FUNC_OBJ_INS_DEF_AS(name, name_t, base_name, param_list, ...) \
M_BEGIN_PROTECTED_CODE \
M_IF_NARGS_EQ1(__VA_ARGS__)(M_FUNC0BJ_INS_NO_ATTR_DEF, M_FUNC0BJ_INS_ATTR_DEF)(name, name_t, base_name, param_list, __VA_ARGS__) \
M_END_PROTECTED_CODE
/* OPLIST of the instanced function object
* USAGE:
* FUNC_OBJ_INS_OPLIST(name, oplist of the attr1, ...)
*/
#define M_FUNC_OBJ_INS_OPLIST(...) \
M_IF_NARGS_EQ1(__VA_ARGS__)(M_FUNC0BJ_INS_NO_ATTR_OPLIST, M_FUNC0BJ_INS_ATTR_OPLIST_P1)( __VA_ARGS__)
/*****************************************************************************/
/******************************** INTERNAL ***********************************/
/*****************************************************************************/
/* To be used by M_IF_FUNCOBJ macro defined in m-core.
NOTE: It is reversed (0 instead of 1) so that it can be used in M_IF reliabely.
*/
#define M_FUNC0BJ_IS_NOT_DEFINED 0
/* Design Constraints:
* callback SHALL be the first member of the structures in all the definitions.
*
* Structure definitions are specialized in function of the presence or not
* of parameters and/or attributes
* FIXME: How to factorize reasonnably well between the definitions?
*/
/* Specialization of the OPLIST in function if there is at least one member or not */
#define M_FUNC0BJ_INS_NO_ATTR_OPLIST(name) ( \
NAME(name), \
TYPE(M_F(name, _ct)), \
CLEAR(M_F(name, _clear)), \
INIT(M_F(name,_init)) \
)
/* Validate the oplist before going further */
#define M_FUNC0BJ_INS_ATTR_OPLIST_P1(name, ...) \
M_IF(M_REDUCE(M_OPLIST_P, M_AND, __VA_ARGS__))(M_FUNC0BJ_INS_ATTR_OPLIST_P3, M_FUNC0BJ_INS_ATTR_OPLIST_FAILURE)(name, __VA_ARGS__)
/* Prepare a clean compilation failure */
#define M_FUNC0BJ_INS_ATTR_OPLIST_FAILURE(name, ...) \
((M_LIB_ERROR(ONE_ARGUMENT_OF_FUNC_OBJ_INS_OPLIST_IS_NOT_AN_OPLIST, name, __VA_ARGS__)))
/* Define the oplist of the instance */
#define M_FUNC0BJ_INS_ATTR_OPLIST_P3(name, ...) ( \
NAME(name), \
TYPE(M_F(name, _ct)), \
INIT_WITH(M_F(name, _init_with)), \
CLEAR(M_F(name, _clear)), \
M_IF_METHOD_ALL(INIT, __VA_ARGS__)(INIT(M_F(name,_init)),), \
PROPERTIES(( LET_AS_INIT_WITH(1) )) \
)
/******************************** INTERNAL ***********************************/
/* Specialization of the definition a function object interface of name 'name'
* with a function like "retcode (void)" that doesn't have any input parameters.
* Define the following types to be used by instance:
* - M_F(name, _retcode_ct): internal type of the return code
* - M_F(name, _callback_ct): internal type of the callback.
* - M_F(name, _ct): synonym of main type used by oplist.
*/
#define M_FUNC0BJ_ITF_NO_PARAM_DEF(name, interface_t, retcode) \
\
/* Forward declaration */ \
struct M_F(name, _s); \
\
/* Internal type for instance */ \
typedef retcode M_F(name, _retcode_ct); \
/* No parameters to the callback */ \
typedef retcode(*M_F(name, _callback_ct))(struct M_F(name, _s) *); \
\
typedef struct M_F(name, _s) { \
M_F(name, _callback_ct) callback; \
} *interface_t; \
\
/* Internal type for oplist & instance */ \
typedef interface_t M_F(name, _ct); \
\
M_INLINE retcode \
M_F(name, _call)(interface_t funcobj) \
{ \
M_IF(M_KEYWORD_P(void, retcode)) ( /* nothing */,return) \
funcobj->callback(funcobj); \
}
/* Specialization of the definition a function object interface of name 'name'
* with a function like retcode, type of param1, type of param 2, ...
* with mandatory input parameters.
* Define the following types to be used by instance:
* - M_F(name, _retcode_ct): internal type of the return code
* - M_F(name, _callback_ct): internal type of the callback.
* - M_C4(name, _param_, num, _ct) for each parameter defined
* - M_F(name, _ct): synonym of main type used by oplist.
*/
#define M_FUNC0BJ_ITF_PARAM_DEF(name, interface_t, retcode, ...) \
\
/* Forward declaration */ \
struct M_F(name, _s); \
\
/* Internal types for instance */ \
typedef retcode M_F(name, _retcode_ct); \
/* Define types for all parameters */ \
M_MAP3(M_FUNC0BJ_BASE_TYPE, name, __VA_ARGS__) \
/* Define callback type with all parameters */ \
typedef retcode(*M_F(name, _callback_ct))(struct M_F(name, _s) *, __VA_ARGS__); \
\
typedef struct M_F(name, _s) { \
M_F(name, _callback_ct) callback; \
} *interface_t; \
\
/* Internal type for oplist & instance */ \
typedef interface_t M_F(name, _ct); \
\
M_INLINE retcode \
M_F(name, _call)(interface_t funcobj \
M_MAP3(M_FUNC0BJ_BASE_ARGLIST, name, __VA_ARGS__) ) \
{ \
/* If the retcode is 'void', don't return the value of the callback */ \
M_IF(M_KEYWORD_P(void, retcode)) ( /* nothing */,return) \
funcobj->callback(funcobj M_MAP3(M_FUNC0BJ_BASE_ARGCALL, name, __VA_ARGS__) ); \
}
/******************************** INTERNAL ***********************************/
/* Specialization of the definition a function object instance of name 'name'
* with no member attribute.
*/
#define M_FUNC0BJ_INS_NO_ATTR_DEF(name, instance_t, base_name, param_list, callback_core) \
typedef struct M_F(name, _s) { \
M_C(base_name, _callback_ct) callback; \
} instance_t[1]; \
\
/* Internal type for oplist */ \
typedef instance_t M_F(name, _ct); \
\
M_INLINE M_C(base_name, _retcode_ct) \
M_F(name, _callback)(M_C(base_name, _ct) _self \
M_IF_EMPTY(M_OPFLAT param_list)( \
/* No param */, \
M_MAP3(M_FUNC0BJ_INS_ARGLIST, base_name, M_OPFLAT param_list) \
) \
) \
{ \
struct M_F(name, _s) *self = (struct M_F(name, _s) *)_self; \
(void) self; /* maybe unused */ \
callback_core; \
} \
\
M_INLINE void \
M_F(name, _init_with)(instance_t obj) \
{ \
obj->callback = M_F(name, _callback); \
} \
\
M_INLINE void \
M_F(name, _clear)(instance_t obj) \
{ \
(void) obj; /* nothing to do */ \
} \
\
M_INLINE struct M_C(base_name, _s) * \
M_F(name, _as_interface)(instance_t obj) \
{ \
return (struct M_C(base_name, _s) *) obj; \
} \
\
M_INLINE void \
M_F(name, _init)(instance_t obj) \
{ \
obj->callback = M_F(name, _callback); \
} \
/* Specialization of the definition a function object instance of name 'name'
* with mandatory member attribute.
* First inject oplist in member attributes.
*/
#define M_FUNC0BJ_INS_ATTR_DEF(name, instance_t, base_name, param_list, callback_core, ...) \
M_FUNC0BJ_INS_ATTR_DEF_P2(name, instance_t, base_name, param_list, callback_core, M_FUNC0BJ_INJECT_GLOBAL(__VA_ARGS__) )
/* Inject the oplist within the list of arguments */
#define M_FUNC0BJ_INJECT_GLOBAL(...) \
M_MAP_C(M_FUNC0BJ_INJECT_OPLIST_A, __VA_ARGS__)
/* Transform (x, type) into (x, type, oplist) if there is global registered oplist
or (x, type, M_BASIC_OPLIST) if there is no global one,
or keep (x, type, oplist) if oplist was already present */
#define M_FUNC0BJ_INJECT_OPLIST_A( duo_or_trio ) \
M_FUNC0BJ_INJECT_OPLIST_B duo_or_trio
#define M_FUNC0BJ_INJECT_OPLIST_B( f, ... ) \
M_IF_NARGS_EQ1(__VA_ARGS__)( (f, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)()), (f, __VA_ARGS__) )
// Test if all third argument of all arguments is an oplist
#define M_FUNC0BJ_IF_ALL_OPLIST(...) \
M_IF(M_REDUCE(M_FUNC0BJ_IS_OPLIST_P, M_AND, __VA_ARGS__))
// Test if the third argument is an oplist. a is a trio (name, type, oplist)
#define M_FUNC0BJ_IS_OPLIST_P(a) \
M_OPLIST_P(M_RET_ARG3 a)
/* Validate the oplist before going further */
#define M_FUNC0BJ_INS_ATTR_DEF_P2(name, instance_t, base_name, param_list, callback_core, ...) \
M_FUNC0BJ_IF_ALL_OPLIST(__VA_ARGS__)(M_FUNC0BJ_INS_ATTR_DEF_P3, M_FUNC0BJ_INS_ATTR_DEF_FAILURE)(name, instance_t, base_name, param_list, callback_core, __VA_ARGS__)
/* Stop processing with a compilation failure */
#define M_FUNC0BJ_INS_ATTR_DEF_FAILURE(name, instance_t, base_name, param_list, callback_core, ...) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(FUNC_OBJ_INS_DEF): at least one of the given argument is not a valid oplist: " #__VA_ARGS__)
/* Expand the Function Object with members */
#define M_FUNC0BJ_INS_ATTR_DEF_P3(name, instance_t, base_name, param_list, callback_core, ...) \
typedef struct M_F(name, _s) { \
/* Callback is the mandatory first argument */ \
M_C(base_name, _callback_ct) callback; \
/* All the member attribute of the Function Object */ \
M_MAP(M_FUNC0BJ_INS_ATTR_STRUCT, __VA_ARGS__) \
} instance_t[1]; \
\
/* Internal type for oplist */ \
typedef instance_t M_F(name, _ct); \
\
M_FUNC0BJ_CONTROL_ALL_OPLIST(name, __VA_ARGS__) \
\
M_INLINE M_C(base_name, _retcode_ct) \
M_F(name, _callback)(M_C(base_name, _ct) _self \
M_IF_EMPTY(M_OPFLAT param_list)( \
/* No param */, \
M_MAP3(M_FUNC0BJ_INS_ARGLIST, base_name, M_OPFLAT param_list) \
) \
) \
{ \
/* Let's go through an uintptr_t to avoid [broken] aliasing detection by compiler */ \
uintptr_t __self = (uintptr_t) _self; \
struct M_F(name, _s) *self = (struct M_F(name, _s) *)(void*)__self; \
(void) self; /* maybe unused */ \
callback_core; \
} \
\
M_INLINE void \
M_F(name, _init_with)(instance_t obj M_MAP(M_FUNC0BJ_INS_ATTR_LIST, __VA_ARGS__)) \
{ \
obj->callback = M_F(name, _callback); \
M_MAP(M_FUNC0BJ_INS_ATTR_INIT_SET, __VA_ARGS__); \
} \
\
M_INLINE void \
M_F(name, _clear)(instance_t obj) \
{ \
M_MAP(M_FUNC0BJ_INS_ATTR_CLEAR, __VA_ARGS__); \
} \
\
M_INLINE struct M_C(base_name, _s) * \
M_F(name, _as_interface)(instance_t obj) \
{ \
return (struct M_C(base_name, _s) *) obj; \
} \
\
M_IF(M_FUNC0BJ_TEST_METHOD_P(INIT, __VA_ARGS)) \
( \
M_INLINE void \
M_F(name, _init)(instance_t obj) \
{ \
obj->callback = M_F(name, _callback); \
M_MAP(M_FUNC0BJ_INS_ATTR_INIT, __VA_ARGS__); \
} \
, /* END OF INIT METHOD */ ) \
/* Define a numbered type of a parameter of the callback*/
#define M_FUNC0BJ_BASE_TYPE(name, num, type) \
typedef type M_C4(name, _param_, num, _ct);
/* Define a list of the type of arguments for a function definition */
#define M_FUNC0BJ_BASE_ARGLIST(name, num, type) \
M_DEFERRED_COMMA type M_C(param_, num)
/* Define a list of arguments for a function call */
#define M_FUNC0BJ_BASE_ARGCALL(name, num, type) \
M_DEFERRED_COMMA M_C(param_, num)
/* Helper macros */
/* arg = (name, type [, oplist]) */
#define M_FUNC0BJ_INS_ATTR_STRUCT(arg) \
M_RET_ARG2 arg M_RET_ARG1 arg;
#define M_FUNC0BJ_INS_ATTR_LIST(arg) \
M_DEFERRED_COMMA M_RET_ARG2 arg const M_RET_ARG1 arg
#define M_FUNC0BJ_INS_ATTR_INIT(arg) \
M_CALL_INIT(M_RET_ARG3 arg, obj -> M_RET_ARG1 arg);
#define M_FUNC0BJ_INS_ATTR_INIT_SET(arg) \
M_CALL_INIT_SET(M_RET_ARG3 arg, obj -> M_RET_ARG1 arg, M_RET_ARG1 arg);
#define M_FUNC0BJ_INS_ATTR_CLEAR(arg) \
M_CALL_CLEAR(M_RET_ARG3 arg, obj -> M_RET_ARG1 arg);
/* Define the list of arguments of the instance of the callback */
#define M_FUNC0BJ_INS_ARGLIST(name, num, param) \
M_DEFERRED_COMMA M_C4(name, _param_, num, _ct) param
/* Macros for testing for a method presence in all the attributes */
#define M_FUNC0BJ_TEST_METHOD2_P(method, op) \
M_TEST_METHOD_P(method, op)
#define M_FUNC0BJ_TEST_METHOD1_P(method, arg) \
M_APPLY(M_FUNC0BJ_TEST_METHOD2_P, method, M_RET_ARG3 arg)
#define M_FUNC0BJ_TEST_METHOD_P(method, ...) \
M_IF(M_REDUCE2(M_FUNC0BJ_TEST_METHOD1_P, M_AND, method, __VA_ARGS__))
/* Macro for checking compatible type and oplist for all the attributes */
#define M_FUNC0BJ_CONTROL_ALL_OPLIST(name, ...) \
M_MAP2(M_FUNC0BJ_CONTROL_OPLIST, name, __VA_ARGS__)
#define M_FUNC0BJ_CONTROL_OPLIST(name, a) \
M_CHECK_COMPATIBLE_OPLIST(name, M_RET_ARG1 a, M_RET_ARG2 a, M_RET_ARG3 a)
/******************************** INTERNAL ***********************************/
#if M_USE_SMALL_NAME
#define FUNC_OBJ_ITF_DEF M_FUNC_OBJ_ITF_DEF
#define FUNC_OBJ_ITF_DEF_AS M_FUNC_OBJ_ITF_DEF_AS
#define FUNC_OBJ_INS_DEF M_FUNC_OBJ_INS_DEF
#define FUNC_OBJ_INS_DEF_AS M_FUNC_OBJ_INS_DEF_AS
#define FUNC_OBJ_INS_OPLIST M_FUNC_OBJ_INS_OPLIST
#endif
#endif

247
components/mlib/m-genint.h Normal file
View File

@ -0,0 +1,247 @@
/*
* M*LIB - Integer Generator (GENINT) module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_GENINT_H
#define MSTARLIB_GENINT_H
#include "m-core.h"
#include "m-atomic.h"
M_BEGIN_PROTECTED_CODE
/* GENINT is an internal container providing unique integers.
It has the following properties:
- it stores integer from [0..N) (N is fixed).
- an integer can have only one occurrence in the container.
- you can atomically push in / pop out integer from this container
provided that it is not already in the container.
- there are no order (like FIFO or stack)
This can be used to map integers to index of resources in a table.
At most we can support N = 32*64 = 2048 with the master limb usage.
For the typical usage of this container
(mapping hardware or software limited resources), this should be
enough.
*/
// Define the limb size used by genint
typedef unsigned long long m_genint_limb_ct;
/* Define a generator of unique integer (Lock Free) */
typedef struct m_genint_s {
unsigned int n; // size of the container
unsigned int max; // number of allocated limb - 1
m_genint_limb_ct mask0; // mask of the last limb (constant)
m_genint_limb_ct mask_master; // mask of the master limb that controls others (constant)
atomic_ullong master; // master bitfield (which informs if a limb is full or not)
atomic_ullong *data; // the bitfield which informs if an integer is used or not
} m_genint_t[1];
// Define the max absolute supported value. It should be 2048 on most implementations.
#define M_GENINT_MAX_ALLOC (M_GEN1NT_LIMBSIZE * (M_GEN1NT_LIMBSIZE - M_GEN1NT_ABA_CPT))
// Define the size of a limb in bits.
#define M_GEN1NT_LIMBSIZE ((unsigned)(sizeof(m_genint_limb_ct) * CHAR_BIT))
// Define the contract of a genint
#define M_GEN1NT_CONTRACT(s) do { \
M_ASSERT (s != NULL); \
M_ASSERT (s->n > 0 && s->n <= M_GENINT_MAX_ALLOC); \
M_ASSERT ((s->max+1) * M_GEN1NT_LIMBSIZE >= s->n); \
M_ASSERT (s->data != NULL); \
} while (0)
// Define the limb one
#define M_GEN1NT_ONE ((m_genint_limb_ct)1)
#define M_GEN1NT_FULL_MASK ULLONG_MAX
// Value returned in case of error (not integer available).
#define M_GENINT_ERROR (UINT_MAX)
/* 32 bits of the master mask are kept for handling the ABA problem.
* NOTE: May be too much. 16 bits should be more than enough. TBC
*/
#define M_GEN1NT_ABA_CPT 32
#define M_GEN1NT_ABA_CPT_T uint32_t
// Set the bit 'i' of the master limb, and increase ABA counter.
#define M_GEN1NT_MASTER_SET(master, i) \
((((master)& (~((M_GEN1NT_ONE<< M_GEN1NT_ABA_CPT)-1))) | (M_GEN1NT_ONE << (M_GEN1NT_LIMBSIZE - 1 - i))) \
|((M_GEN1NT_ABA_CPT_T)((master) + 1)))
// Reset the bit i of the master limb, and increase ABA counter.
#define M_GEN1NT_MASTER_RESET(master, i) \
(((master) & (~((M_GEN1NT_ONE<< M_GEN1NT_ABA_CPT)-1)) & ~(M_GEN1NT_ONE << (M_GEN1NT_LIMBSIZE - 1 - i))) \
|((M_GEN1NT_ABA_CPT_T)((master) + 1)))
/* Initialize an integer generator (CONSTRUCTOR).
* Initialy, the container is full of all the integers up to 'n-1'
* The typical sequence is to initialize the container, and pop
* the integer from it. Each pop integer is **unique** for all threads,
* meaning it can be used to index global unique resources shared
* for all threads.
*/
M_INLINE void
m_genint_init(m_genint_t s, unsigned int n)
{
M_ASSERT (s != NULL && n > 0 && n <= M_GENINT_MAX_ALLOC);
const size_t alloc = (n + M_GEN1NT_LIMBSIZE - 1) / M_GEN1NT_LIMBSIZE;
const unsigned int index = n % M_GEN1NT_LIMBSIZE;
atomic_ullong *ptr = M_MEMORY_REALLOC (atomic_ullong, NULL, alloc);
if (M_UNLIKELY_NOMEM (ptr == NULL)) {
M_MEMORY_FULL(alloc);
return;
}
s->n = n;
s->data = ptr;
s->max = (unsigned int) (alloc-1);
s->mask0 = (index == 0) ? M_GEN1NT_FULL_MASK : ~((M_GEN1NT_ONE<<(M_GEN1NT_LIMBSIZE-index))-1);
s->mask_master = (((M_GEN1NT_ONE << alloc) - 1) << (M_GEN1NT_LIMBSIZE-alloc)) >> M_GEN1NT_ABA_CPT;
atomic_init (&s->master, (m_genint_limb_ct)0);
for(unsigned int i = 0; i < alloc; i++)
atomic_init(&s->data[i], (m_genint_limb_ct)0);
M_GEN1NT_CONTRACT(s);
}
/* Clear an integer generator (Destructor) */
M_INLINE void
m_genint_clear(m_genint_t s)
{
M_GEN1NT_CONTRACT(s);
M_MEMORY_FREE(s->data);
s->data = NULL;
}
/* Return the maximum integer that the generator will provide */
M_INLINE size_t
m_genint_size(m_genint_t s)
{
M_GEN1NT_CONTRACT(s);
return s->n;
}
/* Get an unique integer from the integer generator.
* NOTE: For a typical case, the amortized cost is one CAS per pop. */
M_INLINE unsigned int
m_genint_pop(m_genint_t s)
{
M_GEN1NT_CONTRACT(s);
// First read master to see which limb is not full.
m_genint_limb_ct master = atomic_load(&s->master);
// While master is not full
while ((master >> M_GEN1NT_ABA_CPT) != s->mask_master) {
// Let's get the index i of the first not full limb according to master.
unsigned int i = m_core_clz64(~master);
M_ASSERT (i < M_GEN1NT_LIMBSIZE);
// Let's compute the mask of this limb representing the limb as being full
m_genint_limb_ct mask = s->mask0;
mask = (i == s->max) ? mask : M_GEN1NT_FULL_MASK;
unsigned int bit;
// Let's load this limb,
m_genint_limb_ct next, org = atomic_load(&s->data[i]);
do {
// If it is now full, we have been preempted by another.
if (M_UNLIKELY (org == mask))
goto next_element;
M_ASSERT (org != M_GEN1NT_FULL_MASK);
// At least one bit is free in the limb. Find one.
bit = M_GEN1NT_LIMBSIZE - 1 - m_core_clz64(~org);
M_ASSERT (bit < M_GEN1NT_LIMBSIZE);
M_ASSERT ((org & (M_GEN1NT_ONE<<bit)) == 0);
M_ASSERT (i * M_GEN1NT_LIMBSIZE + M_GEN1NT_LIMBSIZE - 1 - bit < s->n);
// Set the integer as being used.
next = org | (M_GEN1NT_ONE << bit);
// Try to reserve the integer
} while (!atomic_compare_exchange_weak (&s->data[i], &org, next));
// We have reserved the integer.
// If the limb is now full, try to update master
if (M_UNLIKELY(next == mask)) {
while (true) {
m_genint_limb_ct newMaster;
if (next == mask) {
newMaster = M_GEN1NT_MASTER_SET(master, i);
} else {
newMaster = M_GEN1NT_MASTER_RESET(master, i);
}
if (atomic_compare_exchange_weak (&s->master, &master, newMaster))
break;
// Fail to update. Reload limb to check if it is still full.
next = atomic_load(&s->data[i]);
}
}
// Return the new number
M_GEN1NT_CONTRACT(s);
return i * M_GEN1NT_LIMBSIZE + M_GEN1NT_LIMBSIZE - 1 - bit;
next_element:
// Reload master
master = atomic_load(&s->master);
}
M_GEN1NT_CONTRACT(s);
return M_GENINT_ERROR; // No more resource available
}
/* Restore a used integer in the integer generator.
* NOTE: For a typical case, the amortized cost is one CAS per pop */
M_INLINE void
m_genint_push(m_genint_t s, unsigned int n)
{
M_GEN1NT_CONTRACT(s);
M_ASSERT (n < s->n);
const unsigned int i = n / M_GEN1NT_LIMBSIZE;
const unsigned int bit = M_GEN1NT_LIMBSIZE - 1 - (n % M_GEN1NT_LIMBSIZE);
m_genint_limb_ct master = atomic_load(&s->master);
// Load the limb
m_genint_limb_ct next, org = atomic_load(&s->data[i]);
do {
M_ASSERT ((org & (M_GEN1NT_ONE << bit)) != 0);
// Reset it
next = org & (~(M_GEN1NT_ONE << bit));
// Try to unreserve it.
} while (!atomic_compare_exchange_weak (&s->data[i], &org, next));
// if the limb was marked as full by master
m_genint_limb_ct mask = s->mask0;
mask = (i == s->max) ? mask : M_GEN1NT_FULL_MASK;
if (M_UNLIKELY (next != mask)) {
// Let's compute the mask of this limb representing the limb as being full
// Let's try to update master to say that this limb is not full
while (true) {
m_genint_limb_ct newMaster;
if (next == mask) {
newMaster = M_GEN1NT_MASTER_SET(master, i);
} else {
newMaster = M_GEN1NT_MASTER_RESET(master, i);
}
if (atomic_compare_exchange_weak (&s->master, &master, newMaster))
break;
// Fail to update. Reload limb to check if it is still full.
next = atomic_load(&s->data[i]);
}
}
M_GEN1NT_CONTRACT(s);
}
M_END_PROTECTED_CODE
#endif

679
components/mlib/m-i-list.h Normal file
View File

@ -0,0 +1,679 @@
/*
* M*LIB - Intrusive List module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_I_LIST_H
#define MSTARLIB_I_LIST_H
#include "m-core.h"
#include "m-list.h" // For M_L1ST_ITBASE_DEF
/* Interface to add to a structure to enable intrusive doubly-linked support.
name: name of the intrusive list.
type: name of the type of the structure (aka. struct test_s) - not used currently
USAGE:
typedef struct tmp_str_s {
...
ILIST_INTERFACE(tmpstr, struct tmp_str_s);
...
} tmp_str_t;
*/
#define M_ILIST_INTERFACE(name, type) \
struct m_il1st_head_s name
/* Define a doubly-linked intrusive list of a given type.
The type needs to have ILIST_INTERFACE().
USAGE:
ILIST_DEF(name, type [, oplist_of_the_type]) */
#define M_ILIST_DEF(name, ...) \
M_ILIST_DEF_AS(name, M_F(name,_t), M_F(name,_it_t), __VA_ARGS__)
/* Define a doubly-linked intrusive list of a given type
as the provided type name_t with the iterator named it_t.
The type needs to have ILIST_INTERFACE().
USAGE:
ILIST_DEF_AS(name, name_t, it_t, type [, oplist_of_the_type]) */
#define M_ILIST_DEF_AS(name, name_t, it_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_IL1ST_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t, it_t ), \
(name, __VA_ARGS__, name_t, it_t ))) \
M_END_PROTECTED_CODE
/* Define the oplist of a doubly-linked instrusive list of type.
USAGE:
ILIST_OPLIST(name [, oplist_of_the_type]) */
#define M_ILIST_OPLIST(...) \
M_IL1ST_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_BASIC_OPLIST), \
(__VA_ARGS__ )))
/*****************************************************************************/
/******************************** INTERNAL ***********************************/
/*****************************************************************************/
/* Define the basic structure to be added in all objects. */
typedef struct m_il1st_head_s {
struct m_il1st_head_s *next;
struct m_il1st_head_s *prev;
} m_il1st_head_ct;
/* Indirection call to allow expanding all arguments */
#define M_IL1ST_OPLIST_P1(arg) M_IL1ST_OPLIST_P2 arg
/* Validation of the given oplist */
#define M_IL1ST_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_IL1ST_OPLIST_P3, M_IL1ST_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_IL1ST_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_ILIST_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
/* Define the oplist of an ilist of type */
#define M_IL1ST_OPLIST_P3(name, oplist) \
(INIT(M_F(name, _init)), \
CLEAR(M_F(name, _clear)), \
INIT_MOVE(M_F(name, _init_move)), \
MOVE(M_F(name, _move)), \
NAME(name), \
TYPE(M_F(name,_ct)), \
RESET(M_F(name,_reset)), \
SUBTYPE(M_F(name,_subtype_ct)), \
EMPTY_P(M_F(name,_empty_p)), \
IT_TYPE(M_F(name,_it_ct)), \
IT_FIRST(M_F(name,_it)), \
IT_SET(M_F(name,_it_set)), \
IT_LAST(M_F(name,_it_last)), \
IT_END(M_F(name,_it_end)), \
IT_END_P(M_F(name,_end_p)), \
IT_LAST_P(M_F(name,_last_p)), \
IT_EQUAL_P(M_F(name,_it_equal_p)), \
IT_NEXT(M_F(name,_next)), \
IT_PREVIOUS(M_F(name,_previous)), \
IT_REF(M_F(name,_ref)), \
IT_CREF(M_F(name,_cref)), \
IT_REMOVE(M_F(name,_remove)), \
M_IF_METHOD(NEW, oplist)(IT_INSERT(M_F(name,_insert)),), \
OPLIST(oplist), \
SPLICE_BACK(M_F(name,_splice_back)) \
)
/******************************** INTERNAL ***********************************/
/* Contract respected by all intrusive lists */
#define M_IL1ST_CONTRACT(name, list) do { \
M_ASSERT(list != NULL); \
M_ASSERT(list->name.prev != NULL); \
M_ASSERT(list->name.next != NULL); \
M_ASSERT(list->name.next->prev == &list->name); \
M_ASSERT(list->name.prev->next == &list->name); \
M_ASSERT(!(list->name.prev == &list->name) || list->name.prev == list->name.next); \
} while (0)
#define M_IL1ST_NODE_CONTRACT(node) do { \
M_ASSERT((node) != NULL); \
M_ASSERT((node)->prev != NULL); \
M_ASSERT((node)->next != NULL); \
M_ASSERT((node)->next->prev == node); \
M_ASSERT((node)->prev->next == node); \
} while (0)
/* Indirection call to allow expanding all arguments */
#define M_IL1ST_DEF_P1(arg) M_ID( M_IL1ST_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_IL1ST_DEF_P2(name, type, oplist, list_t, it_t) \
M_IF_OPLIST(oplist)(M_IL1ST_DEF_P3, M_IL1ST_DEF_FAILURE)(name, type, oplist, list_t, it_t)
/* Stop processing with a compilation failure */
#define M_IL1ST_DEF_FAILURE(name, type, oplist, list_t, it_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(ILIST_DEF): the given argument is not a valid oplist: " #oplist)
/* Definition of the type and function for an intrusive doubly-linked list.
USAGE:
name: name of the intrusive list
type: type of the object
oplist: oplist of the type
list_t: type of the intrusive list (name##_t)
it_t: iterator of the intrusive list (name##_it_t)
*/
#define M_IL1ST_DEF_P3(name, type, oplist, list_t, it_t) \
M_IL1ST_DEF_TYPE(name, type, oplist, list_t, it_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_IL1ST_DEF_CORE(name, type, oplist, list_t, it_t) \
/* Used of internal macro from m-list */ \
M_L1ST_ITBASE_DEF(name, type, oplist, list_t, it_t)
/* Define the type of an intrusive list */
#define M_IL1ST_DEF_TYPE(name, type, oplist, list_t, it_t) \
\
/* Define the list as a structure containing pointers \
* to the front & back nodes */ \
typedef struct M_F(name, _s) { \
struct m_il1st_head_s name; \
} list_t[1]; \
\
/* Define internal types pointers to such a list */ \
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
/* Define iterator of such a list */ \
typedef struct M_F(name, _it_s) { \
struct m_il1st_head_s *head; \
struct m_il1st_head_s *previous; \
struct m_il1st_head_s *current; \
struct m_il1st_head_s *next; \
} it_t[1]; \
\
/* Define types used by oplist */ \
typedef type M_F(name, _subtype_ct); \
typedef list_t M_F(name, _ct); \
typedef it_t M_F(name, _it_ct); \
/* Define core functions for intrusive lists */
#define M_IL1ST_DEF_CORE(name, type, oplist, list_t, it_t) \
\
M_INLINE void \
M_F(name, _init)(list_t list) \
{ \
M_ASSERT (list != NULL); \
list->name.next = &list->name; \
list->name.prev = &list->name; \
M_IL1ST_CONTRACT(name, list); \
} \
\
M_INLINE void \
M_F(name, _reset)(list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
for(struct m_il1st_head_s *it = list->name.next, *next ; \
it != &list->name; it = next) { \
/* Cannot check node contract as previous node may be deleted */ \
type *obj = M_TYPE_FROM_FIELD(type, it, \
struct m_il1st_head_s, name); \
/* Read next now before the object is destroyed */ \
next = it->next; \
M_ASSERT (next != NULL); \
M_CALL_CLEAR(oplist, *obj); \
/* Delete also the object if a DELETE operand is registered */ \
M_IF_METHOD(DEL, oplist)(M_CALL_DEL(oplist, obj), (void) 0); \
} \
/* Nothing remains in the list anymore */ \
list->name.next = &list->name; \
list->name.prev = &list->name; \
M_IL1ST_CONTRACT(name, list); \
} \
\
M_INLINE void \
M_F(name, _clear)(list_t list) \
{ \
/* Nothing to do more than clean the list itself */ \
M_F(name, _reset)(list); \
/* For safety purpose (create invalid represenation of object) */ \
list->name.next = NULL; \
list->name.prev = NULL; \
} \
\
M_INLINE bool \
M_F(name, _empty_p)(const list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
return list->name.next == &list->name; \
} \
\
\
M_INLINE void \
M_F(name, _init_move)(list_t list, list_t ref) \
{ \
M_IL1ST_CONTRACT(name, ref); \
M_ASSERT (list != ref); \
M_F(name,_init)(list); \
if (!M_F(name,_empty_p)(ref)) { \
list->name.next = ref->name.next; \
list->name.prev = ref->name.prev; \
list->name.next->prev = &list->name; \
list->name.prev->next = &list->name; \
} \
ref->name.next = NULL; \
ref->name.prev = NULL; \
M_IL1ST_CONTRACT(name, list); \
} \
\
M_INLINE void \
M_F(name, _move)(list_t list, list_t ref) \
{ \
M_F(name, _clear)(list); \
M_F(name, _init_move)(list, ref); \
} \
\
M_INLINE size_t \
M_F(name, _size)(const list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
size_t s = 0; \
/* Scan the full list to count the number of elements */ \
for(const struct m_il1st_head_s *it = list->name.next ; \
it != &list->name; it = it->next) { \
M_IL1ST_NODE_CONTRACT(it); \
s++; \
} \
return s; \
} \
\
M_INLINE void \
M_F(name, _push_back)(list_t list, type *obj) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (obj != NULL); \
struct m_il1st_head_s *prev = list->name.prev; \
list->name.prev = &obj->name; \
obj->name.prev = prev; \
obj->name.next = &list->name; \
prev->next = &obj->name; \
M_IL1ST_CONTRACT(name, list); \
} \
\
M_INLINE void \
M_F(name, _push_front)(list_t list, type *obj) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (obj != NULL); \
struct m_il1st_head_s *next = list->name.next; \
list->name.next = &obj->name; \
obj->name.next = next; \
obj->name.prev = &list->name; \
next->prev = &obj->name; \
M_IL1ST_CONTRACT(name, list); \
} \
\
M_INLINE void \
M_F(name, _push_after)(type *obj_pos, type *obj) \
{ \
M_ASSERT (obj_pos != NULL && obj != NULL); \
/* We don't have the list, so we have no contract at list level */ \
M_IL1ST_NODE_CONTRACT(&obj_pos->name); \
struct m_il1st_head_s *next = obj_pos->name.next; \
obj_pos->name.next = &obj->name; \
obj->name.next = next; \
obj->name.prev = &obj_pos->name; \
next->prev = &obj->name; \
} \
\
M_INLINE void \
M_F(name, _init_field)(type *obj) \
{ \
M_ASSERT (obj != NULL); \
/* Init the fields of the node. To be used in object constructor */ \
obj->name.next = NULL; \
obj->name.prev = NULL; \
} \
\
M_INLINE void \
M_F(name, _unlink)(type *obj) \
{ \
M_ASSERT (obj != NULL); \
/* We don't have the list, so we have no contract at list level */ \
M_IL1ST_NODE_CONTRACT(&obj->name); \
struct m_il1st_head_s *next = obj->name.next; \
struct m_il1st_head_s *prev = obj->name.prev; \
next->prev = prev; \
prev->next = next; \
/* Note: not really needed, but safer */ \
obj->name.next = NULL; \
obj->name.prev = NULL; \
} \
\
M_INLINE type * \
M_F(name, _back)(const list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT(!M_F(name, _empty_p)(list)); \
return M_TYPE_FROM_FIELD(type, list->name.prev, \
struct m_il1st_head_s, name); \
} \
\
M_INLINE type * \
M_F(name, _front)(const list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT(!M_F(name, _empty_p)(list)); \
return M_TYPE_FROM_FIELD(type, list->name.next, \
struct m_il1st_head_s, name); \
} \
\
M_INLINE type * \
M_F(name, _next_obj)(const list_t list, type const *obj) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (obj != NULL); \
M_IL1ST_NODE_CONTRACT(&obj->name); \
return obj->name.next == &list->name ? NULL : \
M_TYPE_FROM_FIELD(type, obj->name.next, \
struct m_il1st_head_s, name); \
} \
\
M_INLINE type * \
M_F(name, _previous_obj)(const list_t list, type const *obj) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (obj != NULL); \
M_IL1ST_NODE_CONTRACT(&obj->name); \
return obj->name.prev == &list->name ? NULL : \
M_TYPE_FROM_FIELD(type, obj->name.prev, \
struct m_il1st_head_s, name); \
} \
\
M_INLINE void \
M_F(name, _it)(it_t it, const list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (it != NULL); \
it->head = list->name.next->prev; \
it->current = list->name.next; \
it->next = list->name.next->next; \
it->previous = it->head; \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE void \
M_F(name, _it_set)(it_t it, const it_t cit) \
{ \
M_ASSERT (it != NULL && cit != NULL); \
it->head = cit->head; \
it->current = cit->current; \
it->next = cit->next; \
it->previous = cit->previous; \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE void \
M_F(name, _it_last)(it_t it, list_t const list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (it != NULL); \
it->head = list->name.next->prev; \
it->current = list->name.prev; \
it->next = it->head; \
it->previous = list->name.prev->prev; \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE void \
M_F(name, _it_end)(it_t it, list_t const list) \
{ \
M_ASSERT (it != NULL && list != NULL); \
it->head = list->name.next->prev; \
it->current = it->head; \
it->next = list->name.next; \
it->previous = list->name.prev; \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE bool \
M_F(name, _end_p)(const it_t it) \
{ \
M_ASSERT (it != NULL); \
M_IL1ST_NODE_CONTRACT(it->current); \
return it->current == it->head; \
} \
\
M_INLINE bool \
M_F(name, _last_p)(const it_t it) \
{ \
M_ASSERT (it != NULL); \
M_IL1ST_NODE_CONTRACT(it->current); \
return it->next == it->head || it->current == it->head; \
} \
\
M_INLINE void \
M_F(name, _next)(it_t it) \
{ \
M_ASSERT (it != NULL); \
/* Cannot check node for it->current: it may have been deleted! */ \
/* Note: Can't set it->previous to it->current. \
it->current may have been unlinked from the list */ \
it->current = it->next; \
M_ASSERT (it->current != NULL); \
it->next = it->current->next; \
it->previous = it->current->prev; \
M_ASSERT (it->next != NULL && it->previous != NULL); \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE void \
M_F(name, _previous)(it_t it) \
{ \
M_ASSERT (it != NULL); \
/* Cannot check node for it->current: it may have been deleted! */ \
/* Note: Can't set it->next to it->current. \
it->current may have been unlinked from the list */ \
it->current = it->previous; \
M_ASSERT (it->current != NULL); \
it->next = it->current->next; \
it->previous = it->current->prev; \
M_ASSERT (it->next != NULL && it->previous != NULL); \
M_IL1ST_NODE_CONTRACT(it->current); \
} \
\
M_INLINE bool \
M_F(name, _it_equal_p)(const it_t it1, const it_t it2 ) \
{ \
M_ASSERT (it1 != NULL && it2 != NULL); \
/* No need to check for next & previous */ \
return it1->head == it2->head && it1->current == it2->current; \
} \
\
M_INLINE type * \
M_F(name, _ref)(const it_t it) \
{ \
M_ASSERT (it != NULL && it->current != NULL); \
M_IL1ST_NODE_CONTRACT(it->current); \
/* check if 'it' was not deleted */ \
M_ASSERT (it->current->next == it->next); \
M_ASSERT (it->current->prev == it->previous); \
M_ASSERT (!M_F(name, _end_p)(it)); \
return M_TYPE_FROM_FIELD(type, it->current, \
struct m_il1st_head_s, name); \
} \
\
M_INLINE type const * \
M_F(name, _cref)(const it_t it) \
{ \
type *ptr = M_F(name, _ref)(it); \
return M_CONST_CAST(type, ptr); \
} \
\
M_INLINE void \
M_F(name, _remove)(list_t list, it_t it) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_IL1ST_NODE_CONTRACT(it->current); \
(void)list; /* list param is not used */ \
type *obj = M_TYPE_FROM_FIELD(type, it->current, \
struct m_il1st_head_s, name); \
M_F(name, _unlink)(obj); \
M_CALL_CLEAR(oplist, obj); \
M_IF_METHOD(DEL, oplist)(M_CALL_DEL(oplist, obj), (void) 0); \
M_F(name, _next)(it); \
} \
\
M_IF_METHOD2(NEW, INIT_SET, oplist)( \
M_INLINE void \
M_F(name, _insert)(list_t list, it_t it, type x) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_IL1ST_NODE_CONTRACT(it->current); \
type *p = M_CALL_NEW(oplist, type); \
if (M_UNLIKELY_NOMEM (p == NULL)) { \
M_MEMORY_FULL (sizeof (type)); \
return ; \
} \
M_CALL_INIT_SET(oplist, *p, x); \
type *obj = M_F(name, _ref)(it); \
M_F(name, _push_after)(obj, p); \
it->current = p; \
(void) list; \
M_IL1ST_CONTRACT(name, list); \
} \
, /* NEW & INIT_SET not defined */) \
\
M_INLINE type * \
M_F(name, _pop_back)(list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (!M_F(name, _empty_p)(list)); \
type *obj = M_F(name, _back)(list); \
list->name.prev = list->name.prev->prev; \
list->name.prev->next = &list->name; \
return obj; \
} \
\
M_INLINE type * \
M_F(name, _pop_front)(list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
M_ASSERT (!M_F(name, _empty_p)(list)); \
type *obj = M_F(name, _front)(list); \
list->name.next = list->name.next->next; \
list->name.next->prev = &list->name; \
return obj; \
} \
\
M_INLINE void \
M_F(name, _splice)(list_t list1, list_t list2) \
{ \
M_IL1ST_CONTRACT(name, list1); \
M_IL1ST_CONTRACT(name, list2); \
struct m_il1st_head_s *midle1 = list1->name.prev; \
struct m_il1st_head_s *midle2 = list2->name.next; \
midle1->next = midle2; \
midle2->prev = midle1; \
list1->name.prev = list2->name.prev; \
list2->name.prev->next = &list1->name; \
list2->name.next = &list2->name; \
list2->name.prev = &list2->name; \
M_IL1ST_CONTRACT(name, list1); \
M_IL1ST_CONTRACT(name, list2); \
} \
\
M_INLINE void \
M_F(name, _splice_back)(list_t nv, list_t ov, it_t it) \
{ \
M_IL1ST_CONTRACT(name, nv); \
M_IL1ST_CONTRACT(name, ov); \
M_IL1ST_NODE_CONTRACT(it->current); \
M_ASSERT (it != NULL); \
(void) ov; \
type *obj = M_F(name, _ref)(it); \
M_F(name, _unlink)(obj); \
M_F(name, _push_back)(nv, obj); \
M_F(name, _next)(it); \
M_IL1ST_CONTRACT(name, nv); \
M_IL1ST_CONTRACT(name, ov); \
} \
\
M_INLINE void \
M_F(name, _splice_at)(list_t nlist, it_t npos, \
list_t olist, it_t opos) \
{ \
M_IL1ST_CONTRACT(name, nlist); \
M_IL1ST_CONTRACT(name, olist); \
M_ASSERT (npos != NULL && opos != NULL); \
M_ASSERT (!M_F(name, _end_p)(opos)); \
/* npos may be end */ \
(void) olist, (void) nlist; \
type *obj = M_F(name, _ref)(opos); \
struct m_il1st_head_s *ref = npos->current; \
/* Remove object */ \
M_F(name, _unlink)(obj); \
/* Push 'obj' after 'ref' */ \
struct m_il1st_head_s *next = ref->next; \
ref->next = &obj->name; \
obj->name.next = next; \
obj->name.prev = ref; \
next->prev = &obj->name; \
/* Move iterator in old list */ \
M_F(name, _next)(opos); \
/* Set npos iterator to new position of object */ \
npos->previous = ref; \
npos->current = &obj->name; \
npos->next = next; \
M_IL1ST_CONTRACT(name, nlist); \
M_IL1ST_CONTRACT(name, olist); \
} \
\
M_INLINE void \
M_F(name, _swap)(list_t d, list_t e) \
{ \
M_IL1ST_CONTRACT(name, d); \
M_IL1ST_CONTRACT(name, e); \
struct m_il1st_head_s *d_item = d->name.next; \
struct m_il1st_head_s *e_item = e->name.next; \
/* it is more complicated than other swap functions since \
we need to detect "cyclic" loop */ \
d->name.next = e_item == &e->name ? &d->name : e_item; \
e->name.next = d_item == &d->name ? &e->name : d_item; \
d_item = d->name.prev; \
e_item = e->name.prev; \
d->name.prev = e_item == &e->name ? &d->name : e_item; \
e->name.prev = d_item == &d->name ? &e->name : d_item; \
d->name.next->prev = &d->name; \
d->name.prev->next = &d->name; \
e->name.next->prev = &e->name; \
e->name.prev->next = &e->name; \
M_IL1ST_CONTRACT(name, d); \
M_IL1ST_CONTRACT(name, e); \
} \
\
M_INLINE void \
M_F(name, _reverse)(list_t list) \
{ \
M_IL1ST_CONTRACT(name, list); \
struct m_il1st_head_s *next, *it; \
for(it = list->name.next ; it != &list->name; it = next) { \
next = it->next; \
it->next = it->prev; \
it->prev = next; \
} \
next = it->next; \
it->next = it->prev; \
it->prev = next; \
M_IL1ST_CONTRACT(name, list); \
} \
/******************************** INTERNAL ***********************************/
#if M_USE_SMALL_NAME
#define ILIST_INTERFACE M_ILIST_INTERFACE
#define ILIST_DEF M_ILIST_DEF
#define ILIST_DEF_AS M_ILIST_DEF_AS
#define ILIST_OPLIST M_ILIST_OPLIST
#endif
#endif

View File

@ -0,0 +1,255 @@
/*
* M*LIB - INTRUSIVE SHARED PTR Module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_I_SHARED_PTR_H
#define MSTARLIB_I_SHARED_PTR_H
#include "m-core.h"
#include "m-atomic.h"
M_BEGIN_PROTECTED_CODE
/* Define the oplist of a intrusive shared pointer.
USAGE: ISHARED_OPLIST(name [, oplist_of_the_type]) */
#define M_ISHARED_PTR_OPLIST(...) \
M_ISHAR3D_PTR_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_BASIC_OPLIST), \
(__VA_ARGS__ )))
/* Interface to add to a structure to allow intrusive support.
name: name of the intrusive shared pointer.
type: name of the type of the structure (aka. struct test_s) - not used currently.
NOTE: There can be only one interface of this kind in a type! */
#define M_ISHARED_PTR_INTERFACE(name, type) \
atomic_int M_F(name, _cpt)
/* Value of the interface field for static intialization (Uses C99 designated element). */
#define M_ISHARED_PTR_STATIC_DESIGNATED_INIT(name, type) \
.M_F(name, _cpt) = M_ATOMIC_VAR_INIT(0)
/* Value of the interface field for static intialization (Uses C89 designated element). */
#define M_ISHARED_PTR_STATIC_INIT(name, type) \
M_ATOMIC_VAR_INIT(0)
/* Define the intrusive shared pointer type and its M_INLINE functions.
USAGE: ISHARED_PTR_DEF(name, type, [, oplist]) */
#define M_ISHARED_PTR_DEF(name, ...) \
M_ISHARED_PTR_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define the intrusive shared pointer type and its M_INLINE functions
as the name name_t
USAGE: ISHARED_PTR_DEF_AS(name, name_t, type, [, oplist]) */
#define M_ISHARED_PTR_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_ISHAR3D_PTR_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__ , name_t ))) \
M_END_PROTECTED_CODE
/*****************************************************************************/
/******************************** INTERNAL ***********************************/
/*****************************************************************************/
// Deferred evaluation
#define M_ISHAR3D_PTR_OPLIST_P1(arg) M_ISHAR3D_PTR_OPLIST_P2 arg
/* Validation of the given oplist */
#define M_ISHAR3D_PTR_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_ISHAR3D_PTR_OPLIST_P3, M_ISHAR3D_PTR_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_ISHAR3D_PTR_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_ISHARED_PTR_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
// Define the oplist
#define M_ISHAR3D_PTR_OPLIST_P3(name, oplist) ( \
INIT(M_INIT_DEFAULT), \
INIT_SET(API_4(M_F(name, _init_set))), \
SET(M_F(name, _set) M_IPTR), \
CLEAR(M_F(name, _clear)), \
RESET(M_F(name, _reset) M_IPTR), \
NAME(name), \
TYPE(M_F(name, _ct)), \
OPLIST(oplist), \
SUBTYPE(M_F(name, _subtype_ct)) \
)
/******************************** INTERNAL ***********************************/
// Deferred evaluatioin
#define M_ISHAR3D_PTR_DEF_P1(arg) M_ID( M_ISHAR3D_PTR_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_ISHAR3D_PTR_DEF_P2(name, type, oplist, shared_t) \
M_IF_OPLIST(oplist)(M_ISHAR3D_PTR_DEF_P3, M_ISHAR3D_PTR_DEF_FAILURE)(name, type, oplist, shared_t)
/* Stop processing with a compilation failure */
#define M_ISHAR3D_PTR_DEF_FAILURE(name, type, oplist, shared_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(ISHARED_PTR_DEF): the given argument is not a valid oplist: " #oplist)
#define M_ISHAR3D_PTR_DEF_P3(name, type, oplist, shared_t) \
M_ISHAR3D_PTR_DEF_TYPE(name, type, oplist, shared_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_ISHAR3D_PTR_DEF_CORE(name, type, oplist, shared_t) \
/* Define the types */
#define M_ISHAR3D_PTR_DEF_TYPE(name, type, oplist, shared_t) \
\
/* The shared pointer is only a pointer to the type */ \
typedef type *shared_t; \
\
/* Define internal types for oplist */ \
typedef shared_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
/* Define the core functions */
#define M_ISHAR3D_PTR_DEF_CORE(name, type, oplist, shared_t) \
\
M_INLINE shared_t \
M_F(name, _init)(type *ptr) \
{ \
/* Initialize the type referenced by the pointer */ \
if (M_LIKELY (ptr != NULL)) { \
atomic_init(&ptr->M_F(name, _cpt), 2); \
} \
return ptr; \
} \
\
M_INLINE shared_t \
M_F(name, _init_set)(shared_t shared) \
{ \
if (M_LIKELY (shared != NULL)) { \
int n = atomic_fetch_add(&(shared->M_F(name, _cpt)), 2); \
(void) n; \
} \
return shared; \
} \
\
M_IF_METHOD(INIT, oplist)( \
M_IF_DISABLED_METHOD(NEW, oplist) \
( \
/* This function is only for static object */ \
M_INLINE shared_t \
M_F(name, _init_once)(type *shared) \
{ \
if (M_LIKELY (shared != NULL)) { \
/* Pretty much like atomic_add, except the first one increment by 1, others by 2 */ \
int o = atomic_load(&(shared->M_F(name, _cpt))); \
int n; \
do { \
n = o + 1 + (o != 0); \
} while (!atomic_compare_exchange_strong(&(shared->M_F(name, _cpt)), &o, n)); \
if (o == 0) { \
/* Partial initialization: _cpt is odd */ \
/* Call the INIT function once */ \
M_CALL_INIT(oplist, *shared); \
/* Finish initialization: _cpt is even */ \
atomic_fetch_add(&(shared->M_F(name, _cpt)), 1); \
} else if ( (o&1) != 0) { \
/* Not fully initialized yet: wait for initialization */ \
m_core_backoff_ct bkoff; \
m_core_backoff_init(bkoff); \
/* Wait for _cpt to be _even */ \
while ((atomic_load(&(shared->M_F(name, _cpt)))&1) != 0 ) { \
m_core_backoff_wait(bkoff); \
} \
} \
M_ASSERT( (atomic_load(&(shared->M_F(name, _cpt)))&1) == 0); \
} \
return shared; \
} \
, \
/* This function is only for dynamic object */ \
M_INLINE shared_t \
M_F(name, _init_new)(void) \
{ \
type *ptr = M_CALL_NEW(oplist, type); \
if (M_UNLIKELY_NOMEM (ptr == NULL)) { \
M_MEMORY_FULL(sizeof(type)); \
return NULL; \
} \
M_CALL_INIT(oplist, *ptr); \
atomic_init (&ptr->M_F(name, _cpt), 2); \
return ptr; \
} \
/* End of NEW */) \
, /* End of INIT */) \
\
M_INLINE void \
M_F(name, _clear)(shared_t shared) \
{ \
if (shared != NULL) { \
if (atomic_fetch_sub(&(shared->M_F(name, _cpt)), 2) == 2) { \
M_CALL_CLEAR(oplist, *shared); \
M_IF_DISABLED_METHOD(DEL, oplist)(, M_CALL_DEL(oplist, shared);) \
} \
} \
} \
\
M_INLINE void \
M_F(name, _clear_ptr)(shared_t *shared) \
{ \
M_ASSERT(shared != NULL); \
M_F(name, _clear)(*shared); \
*shared = NULL; \
} \
\
M_INLINE void \
M_F(name, _reset)(shared_t *shared) \
{ \
M_F(name, _clear)(*shared); \
*shared = NULL; \
} \
\
M_INLINE void \
M_F(name, _set)(shared_t *ptr, shared_t shared) \
{ \
M_ASSERT (ptr != NULL); \
if (M_LIKELY (*ptr != shared)) { \
M_F(name, _clear)(*ptr); \
*ptr = M_F(name, _init_set)(shared); \
} \
} \
\
M_END_PROTECTED_CODE
/******************************** INTERNAL ***********************************/
#if M_USE_SMALL_NAME
#define ISHARED_PTR_OPLIST M_ISHARED_PTR_OPLIST
#define ISHARED_PTR_INTERFACE M_ISHARED_PTR_INTERFACE
#define ISHARED_PTR_STATIC_DESIGNATED_INIT M_ISHARED_PTR_STATIC_DESIGNATED_INIT
#define ISHARED_PTR_STATIC_INIT M_ISHARED_PTR_STATIC_INIT
#define ISHARED_PTR_DEF M_ISHARED_PTR_DEF
#define ISHARED_PTR_DEF_AS M_ISHARED_PTR_DEF_AS
#endif
#endif

1527
components/mlib/m-list.h Normal file

File diff suppressed because it is too large Load Diff

206
components/mlib/m-mempool.h Normal file
View File

@ -0,0 +1,206 @@
/*
* M*LIB - MEMPOOL module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_MEMPOOL_H
#define MSTARLIB_MEMPOOL_H
#include "m-core.h"
/* Fast, fixed size, thread unsafe allocator based on memory regions.
No oplist is needed.
USAGE:
MEMPOOL_DEF(name, type)
Example:
MEMPOOL_DEF(mempool_uint, unsigned int)
...
mempool_uint_t m;
mempool_uint_init(m);
unsigned int *ptr = mempool_uint_alloc(m);
*ptr = 17;
mempool_uint_free(m, ptr);
mempool_uint_clear(m); // Give back memory to system
*/
#define M_MEMPOOL_DEF(name, type) \
M_MEMPOOL_DEF_AS(name, M_F(name,_t), type)
/* Fast, fixed Size, thread unsafe allocator based on memory region.
USAGE:
MEMPOOL_DEF_AS(name, name_t, type)
*/
#define M_MEMPOOL_DEF_AS(name, name_t, type) \
M_BEGIN_PROTECTED_CODE \
M_M3MPOOL_DEF_P2(name, type, name_t ) \
M_END_PROTECTED_CODE
/* User shall be able to cutomize the size of the region segment and/or
the minimun number of elements.
The default is the number of elements that fits in 16KB, or 256
is the size of the type is too big.
*/
#ifndef M_USE_MEMPOOL_MAX_PER_SEGMENT
#define M_USE_MEMPOOL_MAX_PER_SEGMENT(type) \
M_MAX((16*1024-sizeof(unsigned int) - 2*sizeof(void*)) / sizeof (type), 256U)
#endif
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
/*
Technically, it uses a list of memory regions, where multiple
allocations are performed in each region. However, it
can not use m-list since it may be expanded from LIST_DEF
(recursive dependency problem). */
#define M_M3MPOOL_DEF_P2(name, type, name_t) \
M_M3MPOOL_DEF_TYPE(name, type, name_t) \
M_M3MPOOL_DEF_CORE(name, type, name_t)
/* Define the types of the mempool */
#define M_M3MPOOL_DEF_TYPE(name, type, name_t) \
\
/* Define the type of element in a segment of the mempool. \
Either it is the basic type or a pointer to another one. */ \
typedef union M_F(name,_union_s) { \
type t; \
union M_F(name,_union_s) *next; \
} M_F(name,_union_ct); \
\
/* Define a segment of a mempool. \
It is an array of basic type, each segment is in a linked list */ \
typedef struct M_F(name,_segment_s) { \
unsigned int count; \
struct M_F(name,_segment_s) *next; \
M_F(name,_union_ct) tab[M_USE_MEMPOOL_MAX_PER_SEGMENT(type)]; \
} M_F(name,_segment_ct); \
\
/* Define a mempool. \
It is a pointer to the first free object within the segments \
and the segments themselves */ \
typedef struct M_F(name, _s) { \
M_F(name,_union_ct) *free_list; \
M_F(name,_segment_ct) *current_segment; \
} name_t[1]; \
/* Define the core functions of the mempool */
#define M_M3MPOOL_DEF_CORE(name, type, name_t) \
\
M_INLINE void \
M_F(name,_init)(name_t mem) \
{ \
mem->free_list = NULL; \
mem->current_segment = M_MEMORY_ALLOC(M_F(name,_segment_ct)); \
if (M_UNLIKELY_NOMEM(mem->current_segment == NULL)) { \
M_MEMORY_FULL(sizeof (M_F(name,_segment_ct))); \
return; \
} \
mem->current_segment->next = NULL; \
mem->current_segment->count = 0; \
M_M3MPOOL_CONTRACT(mem, type); \
} \
\
M_INLINE void \
M_F(name,_clear)(name_t mem) \
{ \
M_M3MPOOL_CONTRACT(mem, type); \
M_F(name,_segment_ct) *segment = mem->current_segment; \
while (segment != NULL) { \
M_F(name,_segment_ct) *next = segment->next; \
M_MEMORY_DEL (segment); \
segment = next; \
} \
/* Clean pointers to be safer */ \
mem->free_list = NULL; \
mem->current_segment = NULL; \
} \
\
M_INLINE type * \
M_F(name,_alloc)(name_t mem) \
{ \
M_M3MPOOL_CONTRACT(mem, type); \
/* Test if one object is in the free list */ \
M_F(name,_union_ct) *ret = mem->free_list; \
if (ret != NULL) { \
/* Yes, so return it, and pop it from the free list */ \
mem->free_list = ret->next; \
return &ret->t; \
} \
/* No cheap free object exist. Test within a segment */ \
M_F(name,_segment_ct) *segment = mem->current_segment; \
M_ASSERT(segment != NULL); \
unsigned int count = segment->count; \
/* If segment is full, allocate a new one from the system */ \
if (M_UNLIKELY (count >= M_USE_MEMPOOL_MAX_PER_SEGMENT(type))) { \
M_F(name,_segment_ct) *new_segment = M_MEMORY_ALLOC (M_F(name,_segment_ct)); \
if (M_UNLIKELY_NOMEM (new_segment == NULL)) { \
M_MEMORY_FULL(sizeof (M_F(name,_segment_ct))); \
return NULL; \
} \
new_segment->next = segment; \
new_segment->count = 0; \
mem->current_segment = new_segment; \
segment = new_segment; \
count = 0; \
} \
/* Return the object as the last element of the current segment */ \
ret = &segment->tab[count]; \
segment->count = count + 1; \
M_M3MPOOL_CONTRACT(mem, type); \
return &ret->t; \
} \
\
M_INLINE void \
M_F(name,_free)(name_t mem, type *ptr) \
{ \
M_M3MPOOL_CONTRACT(mem, type); \
/* NOTE: Unsafe cast: suppose that the given pointer \
was allocated by the previous alloc function. */ \
M_F(name,_union_ct) *ret = (M_F(name,_union_ct) *)(uintptr_t)ptr; \
/* Add the object back in the free list */ \
ret->next = mem->free_list; \
mem->free_list = ret; \
/* NOTE: the objects are NOT given back to the system until the mempool \
is fully cleared */ \
M_M3MPOOL_CONTRACT(mem, type); \
} \
/* MEMPOOL contract. We only control the current segment. */
#define M_M3MPOOL_CONTRACT(mempool, type) do { \
M_ASSERT((mempool) != NULL); \
M_ASSERT((mempool)->current_segment != NULL); \
M_ASSERT((mempool)->current_segment->count <= M_USE_MEMPOOL_MAX_PER_SEGMENT(type)); \
} while (0)
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define MEMPOOL_DEF M_MEMPOOL_DEF
#define MEMPOOL_DEF_AS M_MEMPOOL_DEF_AS
#endif
#endif

28
components/mlib/m-mutex.h Normal file
View File

@ -0,0 +1,28 @@
/*
* M*LIB - Thin Mutex & Thread wrapper (compatibility layer)
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(__GNUC__) && __GNUC__ >= 4
#warning "m-mutex.h is an obsolete header. Use m-thread.h instead."
#endif
#include "m-thread.h"

View File

@ -0,0 +1,520 @@
/*
* M*LIB - dynamic priority queue module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_PRIOQUEUE_H
#define MSTARLIB_PRIOQUEUE_H
#include "m-core.h"
#include "m-array.h" /* Priority queue are built upon array */
/* Priority queue based on binary heap implementation */
/* Define a prioqueue of a given type and its associated functions.
USAGE: PRIOQUEUE_DEF(name, type [, oplist_of_the_type]) */
#define M_PRIOQUEUE_DEF(name, ...) \
M_PRIOQUEUE_DEF_AS(name, M_F(name,_t), M_F(name,_it_t), __VA_ARGS__)
/* Define a prioqueue of a given type and its associated functions.
as the name name_t with an iterator named it_t
USAGE: PRIOQUEUE_DEF_AS(name, name_t, it_t, type [, oplist_of_the_type]) */
#define M_PRIOQUEUE_DEF_AS(name, name_t, it_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_PR1OQUEUE_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t, it_t ), \
(name, __VA_ARGS__, name_t, it_t ))) \
M_END_PROTECTED_CODE
/* Define the oplist of a prioqueue of type.
USAGE: PRIOQUEUE_OPLIST(name[, oplist of the type]) */
#define M_PRIOQUEUE_OPLIST(...) \
M_PR1OQUEUE_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_BASIC_OPLIST), \
(__VA_ARGS__ )))
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
/* Deferred evaluation for the definition,
so that all arguments are evaluated before further expansion */
#define M_PR1OQUEUE_OPLIST_P1(arg) M_PR1OQUEUE_OPLIST_P2 arg
/* Validation of the given oplist */
#define M_PR1OQUEUE_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_PR1OQUEUE_OPLIST_P3, M_PR1OQUEUE_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_PR1OQUEUE_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_PRIOQUEUE_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
/* Define oplist of a priority queue */
#define M_PR1OQUEUE_OPLIST_P3(name, oplist) \
(INIT(M_F(name, _init)) \
,INIT_SET(M_F(name, _init_set)) \
,INIT_WITH(API_1(M_INIT_VAI)) \
,SET(M_F(name, _set)) \
,CLEAR(M_F(name, _clear)) \
,INIT_MOVE(M_F(name, _init_move)) \
,MOVE(M_F(name, _move)) \
,SWAP(M_F(name, _swap)) \
,NAME(name) \
,TYPE(M_F(name,_ct)) \
,SUBTYPE(M_F(name, _subtype_ct)) \
,RESET(M_F(name,_reset)) \
,PUSH(M_F(name,_push)) \
,POP(M_F(name,_pop)) \
,OPLIST(oplist) \
,EMPTY_P(M_F(name, _empty_p)) \
,GET_SIZE(M_F(name, _size)) \
,IT_TYPE(M_F(name, _it_ct)) \
,IT_FIRST(M_F(name,_it)) \
,IT_END(M_F(name,_it_end)) \
,IT_SET(M_F(name,_it_set)) \
,IT_END_P(M_F(name,_end_p)) \
,IT_EQUAL_P(M_F(name,_it_equal_p)) \
,IT_LAST_P(M_F(name,_last_p)) \
,IT_NEXT(M_F(name,_next)) \
,IT_CREF(M_F(name,_cref)) \
,M_IF_METHOD(GET_STR, oplist)(GET_STR(M_F(name, _get_str)),) \
,M_IF_METHOD(PARSE_STR, oplist)(PARSE_STR(M_F(name, _parse_str)),) \
,M_IF_METHOD(OUT_STR, oplist)(OUT_STR(M_F(name, _out_str)),) \
,M_IF_METHOD(IN_STR, oplist)(IN_STR(M_F(name, _in_str)),) \
,M_IF_METHOD(OUT_SERIAL, oplist)(OUT_SERIAL(M_F(name, _out_serial)),) \
,M_IF_METHOD(IN_SERIAL, oplist)(IN_SERIAL(M_F(name, _in_serial)),) \
)
/********************************** INTERNAL *********************************/
/* Deferred evaluation for the definition,
so that all arguments are evaluated before further expansion */
#define M_PR1OQUEUE_DEF_P1(arg) M_ID( M_PR1OQUEUE_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_PR1OQUEUE_DEF_P2(name, type, oplist, prioqueue_t, it_t) \
M_IF_OPLIST(oplist)(M_PR1OQUEUE_DEF_P3, M_PR1OQUEUE_DEF_FAILURE)(name, type, oplist, prioqueue_t, it_t)
/* Stop processing with a compilation failure */
#define M_PR1OQUEUE_DEF_FAILURE(name, type, oplist, prioqueue_t, it_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(PRIOQUEUE_DEF): the given argument is not a valid oplist: " #oplist)
/* Define the priority queue:
- name: prefix to use,
- type: type of the contained objects,
- oplist: oplist of the contained objects,
- prioqueue_t: type of the container,
- it_t: iterator of the container
*/
#define M_PR1OQUEUE_DEF_P3(name, type, oplist, prioqueue_t, it_t) \
/* Definition of the internal array used to construct the priority queue */ \
ARRAY_DEF(M_F(name, _array), type, oplist) \
M_PR1OQUEUE_DEF_TYPE(name, type, oplist, prioqueue_t, it_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_PR1OQUEUE_DEF_CORE(name, type, oplist, prioqueue_t, it_t) \
M_PR1OQUEUE_DEF_IT(name, type, oplist, prioqueue_t, it_t) \
M_PR1OQUEUE_DEF_IO(name, type, oplist, prioqueue_t, it_t) \
M_EMPLACE_QUEUE_DEF(name, prioqueue_t, M_F(name, _emplace), oplist, M_EMPLACE_QUEUE_GENE)
/* Define the types */
#define M_PR1OQUEUE_DEF_TYPE(name, type, oplist, prioqueue_t, it_t) \
\
/* Define the priority queue over the defined array */ \
typedef struct M_F(name, _s) { \
M_F(name, _array_t) array; \
} prioqueue_t[1]; \
/* Define the pointer references to the priority queue */ \
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
/* The iterator is the same one as the one of the internal array */ \
typedef M_F(name, _array_it_t) it_t; \
\
/* Definition of the internal types used by the oplist */ \
typedef prioqueue_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
typedef it_t M_F(name, _it_ct); \
/* Define the core functions */
#define M_PR1OQUEUE_DEF_CORE(name, type, oplist, prioqueue_t, it_t) \
\
M_INLINE void \
M_F(name, _init)(prioqueue_t p) \
{ \
M_F(name, _array_init)(p->array); \
} \
\
M_INLINE void \
M_F(name, _init_set)(prioqueue_t p, prioqueue_t const o) \
{ \
M_F(name, _array_init_set)(p->array, o->array); \
} \
\
M_INLINE void \
M_F(name, _set)(prioqueue_t p, prioqueue_t const o) \
{ \
M_F(name, _array_set)(p->array, o->array); \
} \
\
M_INLINE void \
M_F(name, _clear)(prioqueue_t p) \
{ \
M_F(name, _array_clear)(p->array); \
} \
\
M_INLINE void \
M_F(name, _init_move)(prioqueue_t p, prioqueue_t o) \
{ \
M_F(name, _array_init_move)(p->array, o->array); \
} \
\
M_INLINE void \
M_F(name, _move)(prioqueue_t p, prioqueue_t o) \
{ \
M_F(name, _array_move)(p->array, o->array); \
} \
\
M_INLINE void \
M_F(name, _swap)(prioqueue_t p, prioqueue_t o) \
{ \
M_F(name, _array_swap)(p->array, o->array); \
} \
\
M_INLINE void \
M_F(name, _reset)(prioqueue_t p) \
{ \
M_F(name, _array_reset)(p->array); \
} \
\
M_INLINE size_t \
M_F(name, _i_parent)(size_t i) \
{ \
M_ASSERT (i > 0); \
return (i - 1) / 2; \
} \
\
M_INLINE size_t \
M_F(name, _i_lchild)(size_t i) \
{ \
M_ASSERT(i <= ((SIZE_MAX)-2)/2); \
return 2*i + 1; \
} \
\
M_INLINE size_t \
M_F(name, _i_rchild)(size_t i) \
{ \
M_ASSERT(i <= ((SIZE_MAX)-2)/2); \
return 2*i + 2; \
} \
\
M_INLINE int \
M_F(name, _i_cmp)(const prioqueue_t p, size_t i, size_t j) \
{ \
return M_CALL_CMP(oplist, *M_F(name, _array_cget)(p->array, i), \
*M_F(name, _array_cget)(p->array, j)); \
} \
\
M_INLINE bool \
M_F(name, _empty_p)(prioqueue_t const p) \
{ \
return M_F(name, _array_empty_p)(p->array); \
} \
\
M_INLINE size_t \
M_F(name, _size)(prioqueue_t const p) \
{ \
return M_F(name, _array_size)(p->array); \
} \
\
M_INLINE void \
M_F(name, _push)(prioqueue_t p, type const x) \
{ \
/* Push back the new element at the end of the array */ \
M_F(name, _array_push_back)(p->array, x); \
\
/* Reorder the array by swapping with its parent \
* until it reaches the right position */ \
size_t i = M_F(name, _array_size)(p->array)-1; \
while (i > 0) { \
size_t j = M_F(name, _i_parent)(i); \
if (M_F(name, _i_cmp)(p, j, i) <= 0) \
break; \
M_F(name, _array_swap_at) (p->array, i, j); \
i = j; \
} \
} \
\
M_INLINE type const * \
M_F(name, _front)(prioqueue_t const p) \
{ \
return M_F(name, _array_cget)(p->array, 0); \
} \
\
M_INLINE void \
M_F(name, _pop)(type *x, prioqueue_t p) \
{ \
/* Swap the front element with the last element */ \
size_t size = M_F(name, _array_size)(p->array)-1; \
M_F(name, _array_swap_at) (p->array, 0, size); \
/* Swap the new last element */ \
M_F(name, _array_pop_back)(x, p->array); \
\
/* Reorder the heap */ \
size_t i = 0; \
while (true) { \
size_t child = M_F(name, _i_lchild)(i); \
if (child >= size) \
break; \
size_t otherChild = M_F(name, _i_rchild)(i); \
if (otherChild < size \
&& M_F(name, _i_cmp)(p, otherChild, child) < 0 ) { \
child = otherChild; \
} \
if (M_F(name, _i_cmp)(p, i, child) <= 0) \
break; \
M_F(name, _array_swap_at) (p->array, i, child); \
i = child; \
} \
} \
\
M_IF_METHOD(EQUAL, oplist) \
( \
/* EQUAL & CMP may be uncorrelated */ \
M_INLINE bool \
M_F(name, _equal_p)(prioqueue_t const p, prioqueue_t const q) \
{ \
return M_F(name, _array_equal_p)(p->array, q->array); \
} \
\
M_INLINE size_t \
M_F(name, _i_find)(prioqueue_t p, type const x) \
{ \
size_t size = M_F(name, _array_size)(p->array); \
size_t i = 0; \
for(i = 0; i < size; i++) { \
/* We cannot use CMP and the partial order to go faster \
EQUAL & CMP may be uncorrelated */ \
if (M_CALL_EQUAL(oplist, *M_F(name, _array_cget)(p->array, i), x)) \
break; \
} \
return i; \
} \
\
M_INLINE bool \
M_F(name, _erase)(prioqueue_t p, type const x) \
{ \
/* First pass: search for an item EQUAL to x */ \
size_t size = M_F(name, _array_size)(p->array); \
size_t i = M_F(name, _i_find)(p, x); \
/* If x is not found, then stop */ \
if (i >= size) \
return false; \
/* Swap the found item and the last element */ \
size--; \
M_F(name, _array_swap_at) (p->array, i, size); \
M_F(name, _array_pop_back)(NULL, p->array); \
/* Move back the last swapped element to its right position in the heap */ \
while (true) { \
size_t child = M_F(name, _i_lchild)(i); \
if (child >= size) break; \
size_t otherChild = M_F(name, _i_rchild)(i); \
if (otherChild < size \
&& M_F(name, _i_cmp)(p, otherChild, child) < 0 ) { \
child = otherChild; \
} \
if (M_F(name, _i_cmp)(p, i, child) <= 0) break; \
M_F(name, _array_swap_at) (p->array, i, child); \
i = child; \
} \
return true; \
} \
\
M_INLINE void \
M_F(name, _update)(prioqueue_t p, type const xold, type const xnew) \
{ \
/* NOTE: xold can be the same pointer than xnew */ \
/* First pass: search for an item EQUAL to x */ \
size_t size = M_F(name, _array_size)(p->array); \
size_t i = M_F(name, _i_find)(p, xold); \
/* We shall have found the item */ \
M_ASSERT (i < size); \
/* Test if the position of the old data is further or nearer than the new */ \
int cmp = M_CALL_CMP(oplist, *M_F(name, _array_cget)(p->array, i), xnew); \
/* Set the found item to the new element */ \
M_F(name, _array_set_at) (p->array, i, xnew); \
if (cmp < 0) { \
/* Move back the updated element to its new position, further in the heap */ \
while (true) { \
size_t child = M_F(name, _i_lchild)(i); \
if (child >= size) break; \
size_t otherChild = M_F(name, _i_rchild)(i); \
if (otherChild < size \
&& M_F(name, _i_cmp)(p, otherChild, child) < 0 ) { \
child = otherChild; \
} \
if (M_F(name, _i_cmp)(p, i, child) <= 0) break; \
M_F(name, _array_swap_at) (p->array, i, child); \
i = child; \
} \
} else { \
/* Move back the updated element to its new position, nearest in the heap */ \
while (i > 0) { \
size_t parent = M_F(name, _i_parent)(i); \
if (M_F(name, _i_cmp)(p, parent, i) <= 0) break; \
M_F(name, _array_swap_at) (p->array, i, parent); \
i = parent; \
} \
} \
} \
, /* No EQUAL */ ) \
/* Define the IT based functions */
#define M_PR1OQUEUE_DEF_IT(name, type, oplist, prioqueue_t, it_t) \
\
/* Define iterators over the array iterator */ \
M_INLINE void \
M_F(name, _it)(it_t it, prioqueue_t const v) \
{ \
M_F(name, _array_it)(it, v->array); \
} \
\
M_INLINE void \
M_F(name, _it_last)(it_t it, prioqueue_t const v) \
{ \
M_F(name, _array_it_last)(it, v->array); \
} \
\
M_INLINE void \
M_F(name, _it_end)(it_t it, prioqueue_t const v) \
{ \
M_F(name, _array_it_end)(it, v->array); \
} \
\
M_INLINE void \
M_F(name, _it_set)(it_t it, const it_t org) \
{ \
M_F(name, _array_it_set)(it, org); \
} \
\
M_INLINE bool \
M_F(name, _end_p)(const it_t it) \
{ \
return M_F(name, _array_end_p)(it); \
} \
\
M_INLINE bool \
M_F(name, _last_p)(const it_t it) \
{ \
return M_F(name, _array_last_p)(it); \
} \
\
M_INLINE bool \
M_F(name, _it_equal_p)(const it_t it1, \
const it_t it2) \
{ \
return M_F(name, _array_it_equal_p)(it1, it2); \
} \
\
M_INLINE void \
M_F(name, _next)(it_t it) \
{ \
M_F(name, _array_next)(it); \
} \
\
M_INLINE void \
M_F(name, _previous)(it_t it) \
{ \
M_F(name, _array_previous)(it); \
} \
\
M_INLINE type const * \
M_F(name, _cref)(const it_t it) \
{ \
return M_F(name, _array_cref)(it); \
} \
/* Define the IO functions */
#define M_PR1OQUEUE_DEF_IO(name, type, oplist, prioqueue_t, it_t) \
M_IF_METHOD(OUT_STR, oplist)( \
M_INLINE void \
M_F(name, _out_str)(FILE *file, const prioqueue_t p) \
{ \
M_F(name, _array_out_str)(file, p->array); \
} \
,/* No OUT_STR */) \
\
M_IF_METHOD(IN_STR, oplist)( \
M_INLINE bool \
M_F(name, _in_str)(prioqueue_t p, FILE *file) \
{ \
return M_F(name, _array_in_str)(p->array, file); \
} \
,/* No IN_STR */) \
\
M_IF_METHOD(GET_STR, oplist)( \
M_INLINE void \
M_F(name, _get_str)(string_t str, const prioqueue_t p, bool append) \
{ \
M_F(name, _array_get_str)(str, p->array, append); \
} \
,/* No GET_STR */) \
\
M_IF_METHOD(PARSE_STR, oplist)( \
M_INLINE bool \
M_F(name, _parse_str)(prioqueue_t p, const char str[], const char **endp) \
{ \
return M_F(name, _array_parse_str)(p->array, str, endp); \
} \
,/* No PARSE_STR */) \
\
M_IF_METHOD(OUT_SERIAL, oplist)( \
M_INLINE m_serial_return_code_t \
M_F(name, _out_serial)(m_serial_write_t f, const prioqueue_t p) \
{ \
return M_F(name, _array_out_serial)(f, p->array); \
} \
,/* No OUT_SERIAL */) \
\
M_IF_METHOD2(IN_SERIAL, INIT, oplist)( \
M_INLINE m_serial_return_code_t \
M_F(name, _in_serial)(prioqueue_t p, m_serial_read_t f) \
{ \
return M_F(name, _array_in_serial)(p->array, f); \
} \
,/* No in_SERIAL */) \
// TODO: set all & remove all function
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define PRIOQUEUE_DEF M_PRIOQUEUE_DEF
#define PRIOQUEUE_DEF_AS M_PRIOQUEUE_DEF_AS
#define PRIOQUEUE_OPLIST M_PRIOQUEUE_OPLIST
#endif
#endif

1186
components/mlib/m-rbtree.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,552 @@
/*
* M*LIB - Serial BIN
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_SERIAL_BIN_H
#define MSTARLIB_SERIAL_BIN_H
#include <stdint.h>
#include "m-core.h"
#include "m-string.h"
M_BEGIN_PROTECTED_CODE
/********************************************************************************/
/************************** FILE / WRITE / BIN *******************************/
/********************************************************************************/
/* Internal service:
* Write size_t in the stream in a compact form to reduce consumption
* (and I/O bandwidth)
*/
M_INLINE bool
m_ser1al_bin_write_size(FILE *f, const size_t size)
{
bool b;
if (M_LIKELY(size < 253))
{
b = EOF != fputc((unsigned char) size, f);
} else if (size < 1ULL << 16) {
b = EOF != fputc(253, f); // Save 16 bits encoding
b &= EOF != fputc((unsigned char) (size >> 8), f);
b &= EOF != fputc((unsigned char) size, f);
}
// For 32 bits systems, don't encode a 64 bits size_t
#if SIZE_MAX < 1ULL<< 32
else {
b = EOF != fputc(254, f); // Save 32 bits encoding
b &= EOF != fputc((unsigned char) (size >> 24), f);
b &= EOF != fputc((unsigned char) (size >> 16), f);
b &= EOF != fputc((unsigned char) (size >> 8), f);
b &= EOF != fputc((unsigned char) size, f);
}
#else
else if (size < 1ULL<< 32) {
b = EOF != fputc(254, f); // Save 32 bits encoding
b &= EOF != fputc((unsigned char) (size >> 24), f);
b &= EOF != fputc((unsigned char) (size >> 16), f);
b &= EOF != fputc((unsigned char) (size >> 8), f);
b &= EOF != fputc((unsigned char) size, f);
} else {
b = EOF != fputc(255, f); // Save 64 bits encoding
b &= EOF != fputc((unsigned char) (size >> 56), f);
b &= EOF != fputc((unsigned char) (size >> 48), f);
b &= EOF != fputc((unsigned char) (size >> 40), f);
b &= EOF != fputc((unsigned char) (size >> 32), f);
b &= EOF != fputc((unsigned char) (size >> 24), f);
b &= EOF != fputc((unsigned char) (size >> 16), f);
b &= EOF != fputc((unsigned char) (size >> 8), f);
b &= EOF != fputc((unsigned char) size, f);
}
#endif
return b;
}
/* Internal service:
* Read size_t in the stream from a compact form to reduce consumption
* (and I/O bandwidth)
*/
M_INLINE bool
m_ser1al_bin_read_size(FILE *f, size_t *size)
{
int c;
c = fgetc(f);
if (M_UNLIKELY(c == EOF)) return false;
if (M_LIKELY(c < 253)) {
*size = (size_t) c;
return true;
}
size_t s = 0;
int l = (c == 255) ? 8 : (c == 254) ? 4 : 2;
for(int i = 0; i < l; i++) {
c = fgetc(f);
if (M_UNLIKELY(c == EOF)) return false;
s = (s << 8) | (size_t) c;
}
*size = s;
return true;
}
/* Write the boolean 'data' into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_boolean(m_serial_write_t serial, const bool data)
{
FILE *f = (FILE *)serial->data[0].p;
size_t n = fwrite (M_ASSIGN_CAST(const void*, &data), sizeof (bool), 1, f);
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Write the integer 'data' of 'size_of_type' bytes into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_integer(m_serial_write_t serial,const long long data, const size_t size_of_type)
{
size_t n;
FILE *f = (FILE *)serial->data[0].p;
if (size_of_type == 1) {
int8_t i8 = (int8_t) data;
n = fwrite (M_ASSIGN_CAST(const void*, &i8), sizeof i8, 1, f);
} else if (size_of_type == 2) {
int16_t i16 = (int16_t) data;
n = fwrite (M_ASSIGN_CAST(const void*, &i16), sizeof i16, 1, f);
} else if (size_of_type == 4) {
int32_t i32 = (int32_t) data;
n = fwrite (M_ASSIGN_CAST(const void*, &i32), sizeof i32, 1, f);
} else {
M_ASSERT(size_of_type == 8);
int64_t i64 = (int64_t) data;
n = fwrite (M_ASSIGN_CAST(const void*, &i64), sizeof i64, 1, f);
}
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Write the float 'data' of 'size_of_type' bytes into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_float(m_serial_write_t serial, const long double data, const size_t size_of_type)
{
size_t n;
FILE *f = (FILE *)serial->data[0].p;
if (size_of_type == sizeof (float) ) {
float f1 = (float) data;
n = fwrite (M_ASSIGN_CAST(const void*, &f1), sizeof f1, 1, f);
} else if (size_of_type == sizeof (double) ) {
double f2 = (double) data;
n = fwrite (M_ASSIGN_CAST(const void*, &f2), sizeof f2, 1, f);
} else {
M_ASSERT(size_of_type == sizeof (long double) );
long double f3 = (long double) data;
n = fwrite (M_ASSIGN_CAST(const void*, &f3), sizeof f3, 1, f);
}
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Write the null-terminated string 'data'into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_string(m_serial_write_t serial, const char data[], size_t length)
{
M_ASSERT_SLOW(length == strlen(data) );
FILE *f = (FILE *)serial->data[0].p;
M_ASSERT(f != NULL && data != NULL);
// Write first the number of (non null) characters
if (m_ser1al_bin_write_size(f, length) != true) return m_core_serial_fail();
// Write the characters (excluding the final null char)
// NOTE: fwrite supports length == 0.
size_t n = fwrite (M_ASSIGN_CAST(const void*, data), 1, length, f);
return (n == length) ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Start writing an array of 'number_of_elements' objects into the serial stream 'serial'.
If 'number_of_elements' is 0, then either the array has no data,
or the number of elements of the array is unkown.
Initialize 'local' so that it can be used to serialize the array
(local is an unique serialization object of the array).
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_array_start(m_serial_local_t local, m_serial_write_t serial, const size_t number_of_elements)
{
(void) local; //Unused
if (number_of_elements == (size_t)-1) return M_SERIAL_FAIL_RETRY;
FILE *f = (FILE *)serial->data[0].p;
size_t n = fwrite (M_ASSIGN_CAST(const void*, &number_of_elements), sizeof number_of_elements, 1, f);
return n == 1 ? M_SERIAL_OK_CONTINUE : m_core_serial_fail();
}
/* Write an array separator between elements of an array into the serial stream 'serial' if needed.
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_array_next(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // Unused
(void) serial; // Unused
return M_SERIAL_OK_CONTINUE;
}
/* End the writing of an array into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_array_end(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // Unused
(void) serial; // Unused
return M_SERIAL_OK_CONTINUE;
}
/* Write a value separator between element of the same pair of a map into the serial stream 'serial' if needed.
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_map_value(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_CONTINUE;
}
/* Start writing a tuple into the serial stream 'serial'.
Initialize 'local' so that it can serial the tuple
(local is an unique serialization object of the tuple).
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_tuple_start(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_CONTINUE;
}
/* Start writing the field named field_name[index] of a tuple into the serial stream 'serial'.
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_tuple_id(m_serial_local_t local, m_serial_write_t serial, const char *const field_name[], const int max, const int index)
{
(void) local; // argument not used
(void) serial;
(void) field_name;
(void) max;
(void) index; // Assume index are write in order from 0 to max.
return M_SERIAL_OK_CONTINUE;
}
/* End the write of a tuple into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_tuple_end(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_DONE;
}
/* Start writing a variant into the serial stream 'serial'.
If index <= 0, the variant is empty.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise
Otherwise, the field 'field_name[index]' will be filled.
Return M_SERIAL_OK_CONTINUE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_variant_start(m_serial_local_t local, m_serial_write_t serial, const char *const field_name[], const int max, const int index)
{
(void) field_name;
(void) max;
(void) local;
FILE *f = (FILE *)serial->data[0].p;
size_t n = fwrite (M_ASSIGN_CAST(const void*, &index), sizeof index, 1, f);
return n == 1 ? ((index < 0) ? M_SERIAL_OK_DONE : M_SERIAL_OK_CONTINUE) : m_core_serial_fail();
}
/* End Writing a variant into the serial stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_write_variant_end(m_serial_local_t local, m_serial_write_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_DONE;
}
/* The exported interface. */
static const m_serial_write_interface_t m_ser1al_bin_write_interface = {
m_ser1al_bin_write_boolean,
m_ser1al_bin_write_integer,
m_ser1al_bin_write_float,
m_ser1al_bin_write_string,
m_ser1al_bin_write_array_start,
m_ser1al_bin_write_array_next,
m_ser1al_bin_write_array_end,
m_ser1al_bin_write_array_start,
m_ser1al_bin_write_map_value,
m_ser1al_bin_write_array_next,
m_ser1al_bin_write_array_end,
m_ser1al_bin_write_tuple_start,
m_ser1al_bin_write_tuple_id,
m_ser1al_bin_write_tuple_end,
m_ser1al_bin_write_variant_start,
m_ser1al_bin_write_variant_end
};
M_INLINE void m_serial_bin_write_init(m_serial_write_t serial, FILE *f)
{
serial->m_interface = &m_ser1al_bin_write_interface;
serial->data[0].p = M_ASSIGN_CAST(void*, f);
}
M_INLINE void m_serial_bin_write_clear(m_serial_write_t serial)
{
(void) serial; // Nothing to do
}
/* Define a synonym of m_serial_read_t to the BIN serializer with its proper OPLIST */
typedef m_serial_write_t m_serial_bin_write_t;
#define M_OPL_m_serial_bin_write_t() \
(INIT_WITH(m_serial_bin_write_init), CLEAR(m_serial_bin_write_clear), \
TYPE(m_serial_bin_write_t), PROPERTIES(( LET_AS_INIT_WITH(1) )) )
/********************************************************************************/
/************************** FILE / READ / BIN *******************************/
/********************************************************************************/
/* Read from the stream 'serial' a boolean.
Set '*b' with the boolean value if succeeds
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_boolean(m_serial_read_t serial, bool *b){
FILE *f = (FILE*) serial->data[0].p;
size_t n = fread (M_ASSIGN_CAST(void*, b), sizeof (bool), 1, f);
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Read from the stream 'serial' an integer that can be represented with 'size_of_type' bytes.
Set '*i' with the integer value if succeeds
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_integer(m_serial_read_t serial, long long *i, const size_t size_of_type){
int8_t i8;
int16_t i16;
int32_t i32;
int64_t i64;
size_t n;
FILE *f = (FILE *)serial->data[0].p;
if (size_of_type == 1) {
n = fread (M_ASSIGN_CAST(void*, &i8), sizeof i8, 1, f);
*i = i8;
} else if (size_of_type == 2) {
n = fread (M_ASSIGN_CAST(void*, &i16), sizeof i16, 1, f);
*i = i16;
} else if (size_of_type == 4) {
n = fread (M_ASSIGN_CAST(void*, &i32), sizeof i32, 1, f);
*i = i32;
} else {
M_ASSERT(size_of_type == 8);
n = fread (M_ASSIGN_CAST(void*, &i64), sizeof i64, 1, f);
*i = i64;
}
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Read from the stream 'serial' a float that can be represented with 'size_of_type' bytes.
Set '*r' with the boolean value if succeeds
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_float(m_serial_read_t serial, long double *r, const size_t size_of_type){
float f1;
double f2;
long double f3;
size_t n;
FILE *f = (FILE *)serial->data[0].p;
if (size_of_type == sizeof f1) {
n = fread (M_ASSIGN_CAST(void*, &f1), sizeof f1, 1, f);
*r = f1;
} else if (size_of_type == sizeof f2) {
n = fread (M_ASSIGN_CAST(void*, &f2), sizeof f2, 1, f);
*r = f2;
} else {
M_ASSERT(size_of_type == sizeof f3);
n = fread (M_ASSIGN_CAST(void*, &f3), sizeof f3, 1, f);
*r = f3;
}
return n == 1 ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Read from the stream 'serial' a string.
Set 's' with the string if succeeds
Return M_SERIAL_OK_DONE if it succeeds, M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_string(m_serial_read_t serial, struct string_s *s){
FILE *f = (FILE*) serial->data[0].p;
M_ASSERT(f != NULL && s != NULL);
// First read the number of non null characters
size_t length;
if (m_ser1al_bin_read_size(f, &length) != true) return m_core_serial_fail();
// Use of internal string interface to dimension the string
char *p = m_str1ng_fit2size(s, length + 1);
m_str1ng_set_size(s, length);
// Read the characters excluding the final null one.
// NOTE: fread supports length == 0.
size_t n = fread(M_ASSIGN_CAST(void*, p), 1, length, f);
// Force the final null character
p[length] = 0;
return (n == length) ? M_SERIAL_OK_DONE : m_core_serial_fail();
}
/* Start reading from the stream 'serial' an array.
Set '*num' with the number of elements, or 0 if it is not known.
Initialize 'local' so that it can be used to serialize the array
(local is an unique serialization object of the array).
Return M_SERIAL_OK_CONTINUE if it succeeds and the array continue,
M_SERIAL_OK_DONE if it succeeds and the array ends (the array is empty),
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_array_start(m_serial_local_t local, m_serial_read_t serial, size_t *num)
{
FILE *f = (FILE*) serial->data[0].p;
size_t n = fread (M_ASSIGN_CAST(void*, num), sizeof *num, 1, f);
local->data[1].s = *num;
return (n != 1) ? m_core_serial_fail() : (local->data[1].s == 0) ? M_SERIAL_OK_DONE : M_SERIAL_OK_CONTINUE;
}
/* Continue reading from the stream 'serial' an array.
Return M_SERIAL_OK_CONTINUE if it succeeds and the array continue,
M_SERIAL_OK_DONE if it succeeds and the array ends,
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_array_next(m_serial_local_t local, m_serial_read_t serial)
{
(void) serial; // Unused
M_ASSERT(local->data[1].s > 0);
local->data[1].s --;
return local->data[1].s == 0 ? M_SERIAL_OK_DONE : M_SERIAL_OK_CONTINUE;
}
/* Continue reading from the stream 'serial' the value separator
Return M_SERIAL_OK_CONTINUE if it succeeds and the map continue,
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_map_value(m_serial_local_t local, m_serial_read_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_CONTINUE;
}
/* Start reading a tuple from the stream 'serial'.
Return M_SERIAL_OK_CONTINUE if it succeeds and the tuple continues,
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_tuple_start(m_serial_local_t local, m_serial_read_t serial)
{
(void) serial;
local->data[1].i = 0;
return M_SERIAL_OK_CONTINUE;
}
/* Continue reading a tuple from the stream 'serial'.
Set '*id' with the corresponding index of the table 'field_name[max]'
associated to the parsed field in the stream.
Return M_SERIAL_OK_CONTINUE if it succeeds and the tuple continues,
Return M_SERIAL_OK_DONE if it succeeds and the tuple ends,
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_tuple_id(m_serial_local_t local, m_serial_read_t serial, const char *const field_name [], const int max, int *id)
{
(void) serial;
(void) field_name;
(void) max;
*id = local->data[1].i;
local->data[1].i ++;
return (*id == max) ? M_SERIAL_OK_DONE : M_SERIAL_OK_CONTINUE;
}
/* Start reading a variant from the stream 'serial'.
Set '*id' with the corresponding index of the table 'field_name[max]'
associated to the parsed field in the stream.
Return M_SERIAL_OK_CONTINUE if it succeeds and the variant continues,
Return M_SERIAL_OK_DONE if it succeeds and the variant ends(variant is empty),
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_variant_start(m_serial_local_t local, m_serial_read_t serial, const char *const field_name[], const int max, int*id)
{
(void) field_name;
(void) max;
(void) local; // argument not used
FILE *f = (FILE*) serial->data[0].p;
size_t n = fread (M_ASSIGN_CAST(void*, id), sizeof *id, 1, f);
return n == 1 ? ((*id < 0) ? M_SERIAL_OK_DONE : M_SERIAL_OK_CONTINUE) : m_core_serial_fail();
}
/* End reading a variant from the stream 'serial'.
Return M_SERIAL_OK_DONE if it succeeds and the variant ends,
M_SERIAL_FAIL otherwise */
M_INLINE m_serial_return_code_t
m_ser1al_bin_read_variant_end(m_serial_local_t local, m_serial_read_t serial)
{
(void) local; // argument not used
(void) serial;
return M_SERIAL_OK_DONE;
}
static const m_serial_read_interface_t m_ser1al_bin_read_interface = {
m_ser1al_bin_read_boolean,
m_ser1al_bin_read_integer,
m_ser1al_bin_read_float,
m_ser1al_bin_read_string,
m_ser1al_bin_read_array_start,
m_ser1al_bin_read_array_next,
m_ser1al_bin_read_array_start,
m_ser1al_bin_read_map_value,
m_ser1al_bin_read_array_next,
m_ser1al_bin_read_tuple_start,
m_ser1al_bin_read_tuple_id,
m_ser1al_bin_read_variant_start,
m_ser1al_bin_read_variant_end
};
M_INLINE void m_serial_bin_read_init(m_serial_read_t serial, FILE *f)
{
serial->m_interface = &m_ser1al_bin_read_interface;
serial->data[0].p = M_ASSIGN_CAST(void*, f);
}
M_INLINE void m_serial_bin_read_clear(m_serial_read_t serial)
{
(void) serial; // Nothing to do
}
/* Define a synonym of m_serial_read_t to the BIN serializer with its proper OPLIST */
typedef m_serial_read_t m_serial_bin_read_t;
#define M_OPL_m_serial_bin_read_t() \
(INIT_WITH(m_serial_bin_read_init), CLEAR(m_serial_bin_read_clear), \
TYPE(m_serial_bin_read_t), PROPERTIES(( LET_AS_INIT_WITH(1) )) )
M_END_PROTECTED_CODE
#endif

File diff suppressed because it is too large Load Diff

553
components/mlib/m-shared.h Normal file
View File

@ -0,0 +1,553 @@
/*
* M*LIB - SHARED Pointer Module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_SHARED_PTR_H
#define MSTARLIB_SHARED_PTR_H
#include "m-core.h"
#include "m-atomic.h"
#include "m-genint.h"
M_BEGIN_PROTECTED_CODE
/* Define shared pointer and its function.
USAGE: SHARED_PTR_DEF(name, type, [, oplist]) */
#define M_SHARED_PTR_DEF(name, ...) \
M_SHARED_PTR_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define shared pointer and its function
as the given name name_t
USAGE: SHARED_PTR_DEF_AS(name, name_t, type, [, oplist]) */
#define M_SHARED_PTR_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SHAR3D_PTR_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), M_SHAR3D_ATOMIC_OPLIST, name_t ), \
(name, __VA_ARGS__ , M_SHAR3D_ATOMIC_OPLIST, name_t ))) \
M_END_PROTECTED_CODE
/* Define the oplist of a shared pointer.
USAGE: SHARED_OPLIST(name [, oplist_of_the_type]) */
#define M_SHARED_PTR_OPLIST(...) \
M_SHAR3D_PTR_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_BASIC_OPLIST ), \
(__VA_ARGS__ )))
/* Define relaxed shared pointer and its function (thread unsafe).
USAGE: SHARED_PTR_RELAXED_DEF(name, type, [, oplist]) */
#define M_SHARED_PTR_RELAXED_DEF(name, ...) \
M_SHARED_PTR_RELAXED_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define relaxed shared pointer and its function (thread unsafe)
as the given name name_t
USAGE: SHARED_PTR_RELAXED_DEF(name, type, [, oplist]) */
#define M_SHARED_PTR_RELAXED_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SHAR3D_PTR_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), M_SHAR3D_INTEGER_OPLIST, name_t ), \
(name, __VA_ARGS__, M_SHAR3D_INTEGER_OPLIST, name_t ))) \
M_END_PROTECTED_CODE
/* Define shared resource and its function.
This is a bounded pool of resource shared by multiple owners.
USAGE: SHARED_RESOURCE_DEF(name, type, [, oplist]) */
#define M_SHARED_RESOURCE_DEF(name, ...) \
M_SHARED_RESOURCE_DEF_AS(name, M_F(name,_t), M_F(name,_it_t), __VA_ARGS__)
/* Define shared resource and its function
as the given name named_t and the iterator it_t
This is a bounded pool of resource shared by multiple owners.
USAGE: SHARED_RESOURCE_DEF_AS(name, name_t, it_t, type, [, oplist]) */
#define M_SHARED_RESOURCE_DEF_AS(name, name_t, it_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SHAR3D_RESOURCE_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t, it_t ), \
(name, __VA_ARGS__, name_t, it_t ))) \
M_END_PROTECTED_CODE
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
// deferred evaluation
#define M_SHAR3D_PTR_OPLIST_P1(arg) M_SHAR3D_PTR_OPLIST_P2 arg
/* Validation of the given, shared_t oplist */
#define M_SHAR3D_PTR_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_SHAR3D_PTR_OPLIST_P3, M_SHAR3D_PTR_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_SHAR3D_PTR_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_SHARED_PTR_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
#define M_SHAR3D_PTR_OPLIST_P3(name, oplist) ( \
INIT(M_F(name, _init)), \
CLEAR(M_F(name, _clear)), \
INIT_SET(M_F(name, _init_set)), \
SET(M_F(name, _set)) \
INIT_MOVE(M_F(name, _init_move)), \
RESET(M_F(name, _reset)), \
MOVE(M_F(name, _move)), \
SWAP(M_F(name, _swap)) \
,NAME(name) \
,TYPE(M_F(name, _ct)) \
)
// OPLIST to handle a counter of atomic type
#define M_SHAR3D_ATOMIC_OPLIST (TYPE(atomic_int), \
INIT_SET(atomic_init), \
ADD(atomic_fetch_add), \
SUB(atomic_fetch_sub), \
IT_CREF(atomic_load))
// OPLIST to handle a counter of non-atomic type
#define M_SHAR3D_INTEGER_OPLIST (TYPE(int), \
INIT_SET(m_shar3d_integer_init_set), \
ADD(m_shar3d_integer_add), \
SUB(m_shar3d_integer_sub), \
IT_CREF(m_shar3d_integer_cref))
/* Atomic like interface for basic integers */
M_INLINE void m_shar3d_integer_init_set(int *p, int val) { *p = val; }
M_INLINE int m_shar3d_integer_add(int *p, int val) { int r = *p; *p += val; return r; }
M_INLINE int m_shar3d_integer_sub(int *p, int val) { int r = *p; *p -= val; return r; }
M_INLINE int m_shar3d_integer_cref(int *p) { return *p; }
/********************************** INTERNAL *********************************/
/* Contract of a shared pointer */
#define M_SHAR3D_CONTRACT(shared, cpt_oplist) do { \
M_ASSERT(shared != NULL); \
M_ASSERT(*shared == NULL || M_CALL_IT_CREF(cpt_oplist, &(*shared)->cpt) >= 1); \
} while (0)
// deferred evaluation
#define M_SHAR3D_PTR_DEF_P1(arg) M_ID( M_SHAR3D_PTR_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_SHAR3D_PTR_DEF_P2(name, type, oplist, cpt_oplist, shared_t) \
M_IF_OPLIST(oplist)(M_SHAR3D_PTR_DEF_P3, M_SHAR3D_PTR_DEF_FAILURE)(name, type, oplist, cpt_oplist, shared_t)
/* Stop processing with a compilation failure */
#define M_SHAR3D_PTR_DEF_FAILURE(name, type, oplist, cpt_oplist, shared_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(SHARED_PTR_DEF): the given argument is not a valid oplist: " #oplist)
/* Code generation */
#define M_SHAR3D_PTR_DEF_P3(name, type, oplist, cpt_oplist, shared_t) \
M_SHAR3D_PTR_DEF_TYPE(name, type, oplist, cpt_oplist, shared_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_SHAR3D_PTR_DEF_CORE(name, type, oplist, cpt_oplist, shared_t) \
M_EMPLACE_QUEUE_DEF(name, cpt_oplist, M_F(name, _init_with), oplist, M_SHAR3D_PTR_DEF_EMPLACE)
/* Define the types */
#define M_SHAR3D_PTR_DEF_TYPE(name, type, oplist, cpt_oplist, shared_t) \
\
typedef struct M_F(name, _s){ \
type *data; /* Pointer to the data */ \
M_GET_TYPE cpt_oplist cpt; /* Counter of how many refs the data */ \
bool combineAlloc; /* Does the data and the ptr share the slot? */ \
} *shared_t[1]; \
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
/* Internal type for oplist */ \
typedef shared_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
\
typedef struct M_F(name, _combine_s) { \
struct M_F(name, _s) ptr; \
type data; \
} M_F(name, combine_ct)[1]; \
/* Define the core functions */
#define M_SHAR3D_PTR_DEF_CORE(name, type, oplist, cpt_oplist, shared_t) \
\
M_INLINE void \
M_F(name, _init)(shared_t shared) \
{ \
*shared = NULL; \
} \
\
M_INLINE void \
M_F(name, _init2)(shared_t shared, type *data) \
{ \
M_ASSERT (shared != NULL); \
/* The shared ptr get exclusive access to data */ \
struct M_F(name, _s) *ptr; \
if (M_UNLIKELY (data == NULL)) { \
*shared = NULL; \
return; \
} \
ptr = M_CALL_NEW(oplist, struct M_F(name, _s)); \
if (M_UNLIKELY_NOMEM (ptr == NULL)) { \
M_MEMORY_FULL(sizeof(struct M_F(name, _s))); \
return; \
} \
ptr->data = data; \
M_CALL_INIT_SET(cpt_oplist, &ptr->cpt, 1); \
ptr->combineAlloc = false; \
*shared = ptr; \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
} \
\
M_IF_METHOD(INIT, oplist)( \
M_INLINE void \
M_F(name, _init_new)(shared_t shared) \
{ \
/* NOTE: Alloc 1 struct with both structures. */ \
struct M_F(name, _combine_s) *p = \
M_CALL_NEW(oplist, struct M_F(name, _combine_s)); \
if (M_UNLIKELY_NOMEM (p == NULL)) { \
M_MEMORY_FULL(sizeof(struct M_F(name, _combine_s))); \
return; \
} \
struct M_F(name, _s) *ptr = &p->ptr; \
ptr->combineAlloc = true; \
type *data = &p->data; \
M_CALL_INIT( oplist, *data); \
ptr->data = data; \
M_CALL_INIT_SET(cpt_oplist, &ptr->cpt, 1); \
*shared = ptr; \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
} \
, /* No INIT */ ) \
\
M_INLINE bool \
M_F(name, _NULL_p)(const shared_t shared) \
{ \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
return *shared == NULL; \
} \
\
M_INLINE void \
M_F(name, _init_set)(shared_t dest, \
const shared_t shared) \
{ \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_ASSERT (dest != shared); \
*dest = *shared; \
if (*dest != NULL) { \
int n = M_CALL_ADD(cpt_oplist, &((*dest)->cpt), 1); \
(void) n; /* unused return value */ \
} \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
} \
\
M_INLINE void \
M_F(name, _clear)(shared_t dest) \
{ \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
if (*dest != NULL) { \
if (M_CALL_SUB(cpt_oplist, &((*dest)->cpt), 1) == 1) { \
bool combineAlloc = (*dest)->combineAlloc; \
/* Note: if combineAlloc is true, the address of the slot \
combining both data & ptr is the same as the address of the \
first element, aka data itself. Static analyzer tools don't \
seem to detect this and report error. */ \
M_CALL_CLEAR(oplist, *(*dest)->data); \
if (combineAlloc == false) { \
M_CALL_DEL(oplist, (*dest)->data); \
} \
M_CALL_DEL(oplist, *dest); \
} \
*dest = NULL; \
} \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
} \
\
M_INLINE void \
M_F(name, _reset)(shared_t dest) \
{ \
/* NOTE: Clear will also set dest to NULL */ \
M_F(name, _clear)(dest); \
} \
\
M_INLINE void \
M_F(name, _set)(shared_t dest, \
const shared_t shared) \
{ \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_F(name, _clear)(dest); \
M_F(name, _init_set)(dest, shared); \
} \
\
M_INLINE void \
M_F(name, _init_move)(shared_t dest, \
shared_t shared) \
{ \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_ASSERT (dest != NULL && dest != shared); \
*dest = *shared; \
*shared = NULL; \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
} \
\
M_INLINE void \
M_F(name, _move)(shared_t dest, \
shared_t shared) \
{ \
M_SHAR3D_CONTRACT(dest, cpt_oplist); \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_ASSERT (dest != shared); \
M_F(name, _clear)(dest); \
M_F(name, _init_move)(dest, shared); \
} \
\
M_INLINE void \
M_F(name, _swap)(shared_t p1, \
shared_t p2) \
{ \
M_SHAR3D_CONTRACT(p1, cpt_oplist); \
M_SHAR3D_CONTRACT(p2, cpt_oplist); \
/* NOTE: SWAP is not atomic */ \
M_SWAP (struct M_F(name, _s)*, *p1, *p2); \
M_SHAR3D_CONTRACT(p1, cpt_oplist); \
M_SHAR3D_CONTRACT(p2, cpt_oplist); \
} \
\
M_INLINE bool \
M_F(name, _equal_p)(const shared_t p1, \
const shared_t p2) \
{ \
M_SHAR3D_CONTRACT(p1, cpt_oplist); \
M_SHAR3D_CONTRACT(p2, cpt_oplist); \
return *p1 == *p2; \
} \
\
M_INLINE type const * \
M_F(name, _cref)(const shared_t shared) \
{ \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_ASSERT(*shared != NULL); \
type *data = (*shared)->data; \
M_ASSERT (data != NULL); \
return M_CONST_CAST (type, data); \
} \
\
M_INLINE type * \
M_F(name, _ref)(shared_t shared) \
{ \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
M_ASSERT(*shared != NULL); \
type *data = (*shared)->data; \
M_ASSERT (data != NULL); \
return data; \
} \
/* Definition of the emplace_back function for arrays */
#define M_SHAR3D_PTR_DEF_EMPLACE(name, cpt_oplist, function_name, oplist, init_func, exp_emplace_type) \
M_INLINE void \
function_name(M_F(name, _ct) shared \
M_EMPLACE_LIST_TYPE_VAR(a, exp_emplace_type) ) \
{ \
/* NOTE: Alloc 1 struct with both structures. */ \
struct M_F(name, _combine_s) *p = \
M_CALL_NEW(oplist, struct M_F(name, _combine_s)); \
if (M_UNLIKELY_NOMEM (p == NULL)) { \
M_MEMORY_FULL(sizeof(struct M_F(name, _combine_s))); \
return; \
} \
struct M_F(name, _s) *ptr = &p->ptr; \
ptr->combineAlloc = true; \
M_F(name, _subtype_ct) *data = &p->data; \
M_EMPLACE_CALL_FUNC(a, init_func, oplist, *data, exp_emplace_type); \
ptr->data = data; \
M_CALL_INIT_SET(cpt_oplist, &ptr->cpt, 1); \
*shared = ptr; \
M_SHAR3D_CONTRACT(shared, cpt_oplist); \
} \
/********************************** INTERNAL *********************************/
#define M_SHAR3D_RESOURCE_CONTRACT(s) do { \
M_ASSERT (s != NULL); \
M_ASSERT (s->buffer != NULL); \
} while (0)
// deferred
#define M_SHAR3D_RESOURCE_DEF_P1(arg) M_ID( M_SHAR3D_RESOURCE_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_SHAR3D_RESOURCE_DEF_P2(name, type, oplist, shared_t, it_t) \
M_IF_OPLIST(oplist)(M_SHAR3D_RESOURCE_DEF_P3, M_SHAR3D_RESOURCE_DEF_FAILURE)(name, type, oplist, shared_t, it_t)
/* Stop processing with a compilation failure */
#define M_SHAR3D_RESOURCE_DEF_FAILURE(name, type, oplist, shared_t, it_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(SHARED_RESOURCE_DEF): the given argument is not a valid oplist: " #oplist)
#define M_SHAR3D_RESOURCE_DEF_P3(name, type, oplist, shared_t, it_t) \
M_SHAR3D_RESOURCE_DEF_TYPE(name, type, oplist, shared_t, it_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_SHAR3D_RESOURCE_DEF_CORE(name, type, oplist, shared_t, it_t) \
/* Define the types */
#define M_SHAR3D_RESOURCE_DEF_TYPE(name, type, oplist, shared_t, it_t) \
\
/* Create an aligned type to avoid false sharing between threads */ \
typedef struct M_F(name, _atype_s) { \
atomic_uint cpt; \
type x; \
M_CACHELINE_ALIGN(align, type, atomic_uint); \
} M_F(name, _atype_ct); \
\
typedef struct M_F(name, _s) { \
m_genint_t core; \
M_F(name, _atype_ct) *buffer; \
} shared_t[1]; \
\
typedef struct M_F(name, _it_s) { \
unsigned int idx; \
struct M_F(name, _s) *ref; \
} it_t[1]; \
\
/* Internal Types for oplist */ \
typedef shared_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
/* Define the core functions */
#define M_SHAR3D_RESOURCE_DEF_CORE(name, type, oplist, shared_t, it_t) \
M_INLINE void \
M_F(name, _init)(shared_t s, size_t n) \
{ \
M_ASSERT(s != NULL); \
M_ASSERT (n > 0 && n < UINT_MAX); \
s->buffer = M_CALL_REALLOC(oplist, M_F(name, _atype_ct), NULL, n); \
if (M_UNLIKELY_NOMEM (s->buffer == NULL)) { \
M_MEMORY_FULL(sizeof(M_F(name, _atype_ct)) * n); \
return; \
} \
for(size_t i = 0; i < n; i++) { \
M_CALL_INIT(oplist, s->buffer[i].x); \
atomic_init (&s->buffer[i].cpt, 0U); \
} \
m_genint_init(s->core, (unsigned int) n); \
M_SHAR3D_RESOURCE_CONTRACT(s); \
} \
\
M_INLINE void \
M_F(name, _clear)(shared_t s) \
{ \
M_SHAR3D_RESOURCE_CONTRACT(s); \
size_t n = m_genint_size(s->core); \
for(size_t i = 0; i < n; i++) { \
M_CALL_CLEAR(oplist, s->buffer[i].x); \
} \
M_CALL_FREE(oplist, s->buffer); \
s->buffer = NULL; \
m_genint_clear(s->core); \
} \
\
M_INLINE void \
M_F(name, _it)(it_t it, shared_t s) \
{ \
M_SHAR3D_RESOURCE_CONTRACT(s); \
M_ASSERT (it != NULL); \
unsigned int idx = m_genint_pop(s->core); \
it->idx = idx; \
it->ref = s; \
if (M_LIKELY (idx != M_GENINT_ERROR)) { \
M_ASSERT(atomic_load(&s->buffer[idx].cpt) == 0); \
atomic_store(&s->buffer[idx].cpt, 1U); \
} \
} \
\
M_INLINE bool \
M_F(name, _end_p)(it_t it) \
{ \
M_ASSERT (it != NULL); \
return it->idx == M_GENINT_ERROR; \
} \
\
M_INLINE type * \
M_F(name, _ref)(it_t it) \
{ \
M_ASSERT (it != NULL && it->ref != NULL && it->idx != M_GENINT_ERROR); \
M_SHAR3D_RESOURCE_CONTRACT(it->ref); \
return &it->ref->buffer[it->idx].x; \
} \
\
M_INLINE type const * \
M_F(name, _cref)(it_t it) \
{ \
M_ASSERT (it != NULL && it->ref != NULL && it->idx != M_GENINT_ERROR); \
M_SHAR3D_RESOURCE_CONTRACT(it->ref); \
return M_CONST_CAST (type, &it->ref->buffer[it->idx].x); \
} \
\
M_INLINE void \
M_F(name, _end)(it_t it, shared_t s) \
{ \
M_SHAR3D_RESOURCE_CONTRACT(s); \
M_ASSERT (it != NULL); \
M_ASSERT (it->ref == s); \
unsigned int idx = it->idx; \
if (M_LIKELY (idx != M_GENINT_ERROR)) { \
unsigned int c = atomic_fetch_sub (&it->ref->buffer[idx].cpt, 1U); \
if (c == 1) { \
m_genint_push(it->ref->core, idx); \
} \
it->idx = M_GENINT_ERROR; \
} \
} \
\
M_INLINE void \
M_F(name, _it_set)(it_t itd, it_t its) \
{ \
M_ASSERT (itd != NULL && its != NULL); \
M_SHAR3D_RESOURCE_CONTRACT(its->ref); \
itd->ref = its->ref; \
unsigned int idx = its->idx; \
itd->idx = idx; \
if (M_LIKELY (idx != M_GENINT_ERROR)) { \
unsigned int c = atomic_fetch_add(&itd->ref->buffer[idx].cpt, 1U); \
M_ASSERT (c >= 1); \
} \
} \
M_END_PROTECTED_CODE
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define SHARED_PTR_OPLIST M_SHARED_PTR_OPLIST
#define SHARED_PTR_DEF M_SHARED_PTR_DEF
#define SHARED_PTR_DEF_AS M_SHARED_PTR_DEF_AS
#define SHARED_PTR_RELAXED_DEF M_SHARED_PTR_RELAXED_DEF
#define SHARED_PTR_RELAXED_DEF_AS M_SHARED_PTR_RELAXED_DEF_AS
#define SHARED_RESOURCE_DEF M_SHARED_RESOURCE_DEF
#define SHARED_RESOURCE_DEF_AS M_SHARED_RESOURCE_DEF_AS
#endif
#endif

View File

@ -0,0 +1,814 @@
/*
* M*LIB - SNAPSHOT Module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_SNAPSHOT_H
#define MSTARLIB_SNAPSHOT_H
#include "m-atomic.h"
#include "m-core.h"
#include "m-genint.h"
M_BEGIN_PROTECTED_CODE
/* Define a Single Producer Single Consummer snapshot and its functions
USAGE: SNAPSHOT_SPSC_DEF(name, type[, oplist]) */
#define M_SNAPSHOT_SPSC_DEF(name, ...) \
M_SNAPSHOT_SPSC_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a Single Producer Single Consummer snapshot and its functions
as the given name name_t
USAGE: SNAPSHOT_SPSC_DEF_AS(name, name_t, type[, oplist]) */
#define M_SNAPSHOT_SPSC_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SNAPSH0T_SPSC_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__ , name_t ))) \
M_END_PROTECTED_CODE
/* Define a Single Producer Multiple Consummer snapshot and its functions
USAGE: SNAPSHOT_SPMC_DEF(name, type[, oplist]) */
#define M_SNAPSHOT_SPMC_DEF(name, ...) \
M_SNAPSHOT_SPMC_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a Single Producer Multiple Consummer snapshot and its functions
as the given name name_t
USAGE: SNAPSHOT_SPMC_DEF_AS(name, type[, oplist]) */
#define M_SNAPSHOT_SPMC_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SNAPSH0T_SPMC_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__ , name_t ))) \
M_END_PROTECTED_CODE
/* Define a Multiple Producer Multiple Consummer snapshot and its functions
USAGE: SNAPSHOT_MPMC_DEF(name, type[, oplist]) */
#define M_SNAPSHOT_MPMC_DEF(name, ...) \
M_SNAPSHOT_MPMC_DEF_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define a Multiple Producer Multiple Consummer snapshot and its functions
as the given name name_t
USAGE: SNAPSHOT_MPMC_DEF_AS(name, name_t, type[, oplist]) */
#define M_SNAPSHOT_MPMC_DEF_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_SNAPSH0T_MPMC_DEF_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((name, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)(), name_t ), \
(name, __VA_ARGS__ , name_t ))) \
M_END_PROTECTED_CODE
/* Define the oplist of a snapshot (SPSC, SPMC or MPMC).
USAGE: SNAPSHOT_OPLIST(name[, oplist]) */
#define M_SNAPSHOT_OPLIST(...) \
M_SNAPSH0T_OPLIST_P1(M_IF_NARGS_EQ1(__VA_ARGS__) \
((__VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)() ), \
(__VA_ARGS__ )))
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
// deferred evaluation of the input
#define M_SNAPSH0T_OPLIST_P1(arg) M_SNAPSH0T_OPLIST_P2 arg
/* Validation of the given oplist */
#define M_SNAPSH0T_OPLIST_P2(name, oplist) \
M_IF_OPLIST(oplist)(M_SNAPSH0T_OPLIST_P3, M_SNAPSH0T_OPLIST_FAILURE)(name, oplist)
/* Prepare a clean compilation failure */
#define M_SNAPSH0T_OPLIST_FAILURE(name, oplist) \
((M_LIB_ERROR(ARGUMENT_OF_SNAPSHOT_OPLIST_IS_NOT_AN_OPLIST, name, oplist)))
/* Define the oplist of a snapshot */
#define M_SNAPSH0T_OPLIST_P3(name, oplist) \
(INIT(M_F(name, _init)) \
,INIT_SET(M_F(name, _init_set)) \
,SET(M_F(name, _set)) \
,CLEAR(M_F(name, _clear)) \
,NAME(name) \
,TYPE(M_F(name, _ct)) \
,SUBTYPE(M_F(name, _subtype_ct)) \
,OPLIST(oplist) \
,M_IF_METHOD(INIT_MOVE, oplist)(INIT_MOVE(M_F(name, _init_move)),) \
,M_IF_METHOD(MOVE, oplist)(MOVE(M_F(name, _move)),) \
)
/********************************** INTERNAL *********************************/
/* Flag defining the atomic state of a snapshot:
* - r: Index of the read buffer Range [0..2]
* - w: Index of the write buffer Range [0..2]
* - f: Next index of the write buffer when a shot is taken Range [0..2]
* - b: Boolean indicating that the read buffer shall be updated
* all fields packed in an unsigned char type.
*/
#define M_SNAPSH0T_SPSC_FLAG(r, w, f, b) \
((unsigned char)( ( (r) << 4) | ((w) << 2) | ((f)) | ((b) << 6)))
#define M_SNAPSH0T_SPSC_R(flags) \
(((unsigned int) (flags) >> 4) & 0x03u)
#define M_SNAPSH0T_SPSC_W(flags) \
(((unsigned int) (flags) >> 2) & 0x03u)
#define M_SNAPSH0T_SPSC_F(flags) \
(((unsigned int) (flags) >> 0) & 0x03u)
#define M_SNAPSH0T_SPSC_B(flags) \
(((unsigned int) (flags) >> 6) & 0x01u)
/* NOTE: Due to atomic_load only accepting non-const pointer,
we can't have any const in the interface. */
#define M_SNAPSH0T_SPSC_FLAGS_CONTRACT(flags) \
M_ASSERT(M_SNAPSH0T_SPSC_R(flags) != M_SNAPSH0T_SPSC_W(flags) \
&& M_SNAPSH0T_SPSC_R(flags) != M_SNAPSH0T_SPSC_F(flags) \
&& M_SNAPSH0T_SPSC_W(flags) != M_SNAPSH0T_SPSC_F(flags))
#define M_SNAPSH0T_SPSC_CONTRACT(snap) do { \
M_ASSERT((snap) != NULL); \
unsigned char f = atomic_load (&(snap)->flags); \
M_SNAPSH0T_SPSC_FLAGS_CONTRACT(f); \
} while (0)
// A snapshot is basically an atomic triple buffer (Lock Free)
// between a single producer thread and a single consummer thread.
#define M_SNAPSH0T_SPSC_MAX_BUFFER 3
// Defered evaluation of the arguments.
#define M_SNAPSH0T_SPSC_DEF_P1(arg) M_ID( M_SNAPSH0T_SPSC_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_SNAPSH0T_SPSC_DEF_P2(name, type, oplist, snapshot_t) \
M_IF_OPLIST(oplist)(M_SNAPSH0T_SPSC_DEF_P3, M_SNAPSH0T_SPSC_DEF_FAILURE)(name, type, oplist, snapshot_t)
/* Stop processing with a compilation failure */
#define M_SNAPSH0T_SPSC_DEF_FAILURE(name, type, oplist, snapshot_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(SNAPSHOT_SPSC_DEF): the given argument is not a valid oplist: " #oplist)
/* Expand the type and the functions of a SPSC snapshot */
#define M_SNAPSH0T_SPSC_DEF_P3(name, type, oplist, snapshot_t) \
M_SNAPSH0T_SPSC_DEF_TYPE(name, type, oplist, snapshot_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_SNAPSH0T_SPSC_DEF_CORE(name, type, oplist, snapshot_t) \
/* Define the type */
#define M_SNAPSH0T_SPSC_DEF_TYPE(name, type, oplist, snapshot_t) \
\
/* Create an aligned type to avoid false sharing between threads */ \
typedef struct M_F(name, _aligned_type_s) { \
type x; \
M_CACHELINE_ALIGN(align, type); \
} M_F(name, _aligned_type_ct); \
\
typedef struct M_F(name, _s) { \
M_F(name, _aligned_type_ct) data[M_SNAPSH0T_SPSC_MAX_BUFFER]; \
atomic_uchar flags; \
} snapshot_t[1]; \
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
\
/* Define internal types for oplist */ \
typedef snapshot_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
/* Define the core functions */
#define M_SNAPSH0T_SPSC_DEF_CORE(name, type, oplist, snapshot_t) \
\
M_INLINE void \
M_F(name, _init)(snapshot_t snap) \
{ \
M_ASSERT(snap != NULL); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_INIT(oplist, snap->data[i].x); \
} \
atomic_init (&snap->flags, M_SNAPSH0T_SPSC_FLAG(0, 1, 2, 0)); \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
} \
\
M_INLINE void \
M_F(name, _clear)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_CLEAR(oplist, snap->data[i].x); \
} \
} \
\
/* const is missing for org due to use of atomic_load of org */ \
M_INLINE void \
M_F(name, _init_set)(snapshot_t snap, snapshot_t org) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(org); \
M_ASSERT(snap != NULL && snap != org); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_INIT_SET(oplist, snap->data[i].x, org->data[i].x); \
} \
atomic_init (&snap->flags, atomic_load(&org->flags)); \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
} \
\
/* const is missing for org due to use of atomic_load of org */ \
M_INLINE void \
M_F(name, _set)(snapshot_t snap, snapshot_t org) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
M_SNAPSH0T_SPSC_CONTRACT(org); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_SET(oplist, snap->data[i].x, org->data[i].x); \
} \
atomic_init (&snap->flags, atomic_load(&org->flags)); \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
} \
\
M_IF_METHOD(INIT_MOVE, oplist)( \
M_INLINE void \
M_F(name, _init_move)(snapshot_t snap, snapshot_t org) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(org); \
M_ASSERT(snap != NULL && snap != org); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_INIT_MOVE(oplist, snap->data[i].x, org->data[i].x); \
} \
atomic_store (&snap->flags, atomic_load(&org->flags)); \
atomic_store (&org->flags, M_SNAPSH0T_SPSC_FLAG(0,0,0,0) ); \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
} \
,) /* IF_METHOD (INIT_MOVE) */ \
\
M_IF_METHOD(MOVE, oplist)( \
M_INLINE void \
M_F(name, _move)(snapshot_t snap, \
snapshot_t org) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
M_SNAPSH0T_SPSC_CONTRACT(org); \
M_ASSERT(snap != org); \
for(int i = 0; i < M_SNAPSH0T_SPSC_MAX_BUFFER; i++) { \
M_CALL_MOVE(oplist, snap->data[i].x, org->data[i].x); \
} \
atomic_store (&snap->flags, atomic_load(&org->flags)); \
atomic_store (&org->flags, M_SNAPSH0T_SPSC_FLAG(0,0,0,0) ); \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
} \
,) /* IF_METHOD (MOVE) */ \
\
M_INLINE type * \
M_F(name, _write)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
unsigned char nextFlags, origFlags = atomic_load (&snap->flags); \
/* Atomic CAS operation */ \
do { \
/* Swap F and W buffer, setting exchange flag */ \
nextFlags = M_SNAPSH0T_SPSC_FLAG(M_SNAPSH0T_SPSC_R(origFlags), \
M_SNAPSH0T_SPSC_F(origFlags), \
M_SNAPSH0T_SPSC_W(origFlags), 1); \
/* exponential backoff is not needed as there can't be more \
than 2 threads which try to update the data. */ \
} while (!atomic_compare_exchange_weak (&snap->flags, &origFlags, \
nextFlags)); \
/* Return new write buffer for new updating */ \
return &snap->data[M_SNAPSH0T_SPSC_W(nextFlags)].x; \
} \
\
M_INLINE type const * \
M_F(name, _read)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
unsigned char nextFlags, origFlags = atomic_load (&snap->flags); \
/* Atomic CAS operation */ \
do { \
/* If no exchange registered, do nothing and keep the same */ \
if (!M_SNAPSH0T_SPSC_B(origFlags)) { \
nextFlags = origFlags; \
break; \
} \
/* Swap R and F buffer, clearing exchange flag */ \
nextFlags = M_SNAPSH0T_SPSC_FLAG(M_SNAPSH0T_SPSC_F(origFlags), \
M_SNAPSH0T_SPSC_W(origFlags), \
M_SNAPSH0T_SPSC_R(origFlags), 0); \
/* exponential backoff is not needed as there can't be more \
than 2 threads which try to update the data. */ \
} while (!atomic_compare_exchange_weak (&snap->flags, &origFlags, \
nextFlags)); \
/* Return current read buffer */ \
return M_CONST_CAST(type, &snap->data[M_SNAPSH0T_SPSC_R(nextFlags)].x); \
} \
\
/* Non const due to use of atomic_load */ \
M_INLINE bool \
M_F(name, _updated_p)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
unsigned char flags = atomic_load (&snap->flags); \
return M_SNAPSH0T_SPSC_B(flags); \
} \
\
/* Non const due to use of atomic_load */ \
M_INLINE type * \
M_F(name, _get_write_buffer)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
unsigned char flags = atomic_load(&snap->flags); \
return &snap->data[M_SNAPSH0T_SPSC_W(flags)].x; \
} \
\
/* Non const due to use of atomic_load */ \
M_INLINE type const * \
M_F(name, _get_read_buffer)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPSC_CONTRACT(snap); \
unsigned char flags = atomic_load(&snap->flags); \
return M_CONST_CAST(type, &snap->data[M_SNAPSH0T_SPSC_R(flags)].x); \
} \
/********************************** INTERNAL *********************************/
#define M_SNAPSH0T_SPMC_INT_FLAG(w, n) ( ((w) << 1) | (n) )
#define M_SNAPSH0T_SPMC_INT_FLAG_W(f) ((f) >> 1)
#define M_SNAPSH0T_SPMC_INT_FLAG_N(f) ((f) & 1)
// 2 more buffer than the number of readers are needed
#define M_SNAPSH0T_SPMC_EXTRA_BUFFER 2
#define M_SNAPSH0T_SPMC_MAX_READER (M_GENINT_MAX_ALLOC-M_SNAPSH0T_SPMC_EXTRA_BUFFER)
/* Internal structure to handle SPMC snapshot but return an unique index in the buffer array.
- lastNext: last published written index + next flag (format M_SNAPSH0T_SPMC_INT_FLAG)
- currentWrite: the index being currently written.
- n_reader : number of readers
- cptTab: ref counter array to keep track of how many readers use the corresponding buffer.
- freeList: a pool of free integers.
*/
typedef struct m_snapsh0t_mrsw_s {
atomic_uint lastNext;
unsigned int currentWrite;
size_t n_reader;
atomic_uint *cptTab;
m_genint_t freeList;
} m_snapsh0t_mrsw_ct[1];
// can't check currentWrite due to potential data race on it
#define M_SNAPSH0T_SPMC_INT_CONTRACT(s) do { \
M_ASSERT (s != NULL); \
M_ASSERT (s->n_reader > 0 && s->n_reader <= M_SNAPSH0T_SPMC_MAX_READER); \
M_ASSERT ((size_t)M_SNAPSH0T_SPMC_INT_FLAG_W(atomic_load(&s->lastNext)) \
<= s->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER); \
M_ASSERT (s->cptTab != NULL); \
} while (0)
/* Initialize m_snapsh0t_mrsw_ct for n readers (constructor) */
M_INLINE void
m_snapsh0t_mrsw_init(m_snapsh0t_mrsw_ct s, size_t n)
{
M_ASSERT (s != NULL);
M_ASSERT (n >= 1 && n <= M_SNAPSH0T_SPMC_MAX_READER);
s->n_reader = n;
n += M_SNAPSH0T_SPMC_EXTRA_BUFFER;
// Initialize the counters to zero (no reader use it)
atomic_uint *ptr = M_MEMORY_REALLOC (atomic_uint, NULL, n);
if (M_UNLIKELY_NOMEM (ptr == NULL)) {
M_MEMORY_FULL(sizeof (atomic_uint) * n);
return;
}
s->cptTab = ptr;
for(size_t i = 0; i < n; i++)
atomic_init(&s->cptTab[i], 0U);
m_genint_init (s->freeList, (unsigned int) n);
// Get a free buffer and set it as available for readers
unsigned int w = m_genint_pop(s->freeList);
M_ASSERT (w != M_GENINT_ERROR);
atomic_store(&s->cptTab[w], 1U);
atomic_init(&s->lastNext, M_SNAPSH0T_SPMC_INT_FLAG(w, true));
// Get working buffer
s->currentWrite = m_genint_pop(s->freeList);
M_ASSERT (s->currentWrite != M_GENINT_ERROR);
atomic_store(&s->cptTab[s->currentWrite], 1U);
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
}
/* Clear m_snapsh0t_mrsw_ct (destructor) */
M_INLINE void
m_snapsh0t_mrsw_clear(m_snapsh0t_mrsw_ct s)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
M_MEMORY_FREE (s->cptTab);
m_genint_clear(s->freeList);
s->cptTab = NULL;
s->n_reader = 0;
}
/* Return the current index that is written in the buffer */
M_INLINE unsigned int
m_snapsh0t_mrsw_get_write_idx(m_snapsh0t_mrsw_ct s)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return s->currentWrite;
}
/* Return the number of readers */
M_INLINE unsigned int
m_snapsh0t_mrsw_size(m_snapsh0t_mrsw_ct s)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return (unsigned int) s->n_reader;
}
/* Give the current index that is written to the readers,
and return new available index for the writer thread */
M_INLINE unsigned int
m_snapsh0t_mrsw_write_idx(m_snapsh0t_mrsw_ct s, unsigned int idx)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
// Provide the finalized written buffer to the readers.
unsigned int newNext, previous = atomic_load(&s->lastNext);
do {
newNext = M_SNAPSH0T_SPMC_INT_FLAG(idx, true);
} while (!atomic_compare_exchange_weak(&s->lastNext, &previous, newNext));
if (M_SNAPSH0T_SPMC_INT_FLAG_N(previous)) {
// Reuse previous buffer as it was not used by any reader
idx = M_SNAPSH0T_SPMC_INT_FLAG_W(previous);
// Some other read threads may already have try to reserve this index
// So atomic_load(&s->cptTab[idx]) can be greater than 1.
// However they will fail to ack it in lastNext,
// so they will remove their reservation later
} else {
// Remove the writer thread counter from the count of the previous buffer
idx = M_SNAPSH0T_SPMC_INT_FLAG_W(previous);
unsigned int c = atomic_fetch_sub(&s->cptTab[idx], 1U);
M_ASSERT (c != 0 && c <= s->n_reader + 1);
// Get a new buffer.
if (c != 1) {
// If someone else keeps a ref on the buffer, we can't reuse it
// get another free one.
idx = m_genint_pop(s->freeList);
M_ASSERT(idx != M_GENINT_ERROR);
} else {
// No other thread keep track of this buffer.
// Reuse it.
}
M_ASSERT (idx < s->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER);
M_ASSERT (atomic_load(&s->cptTab[idx]) == 0);
atomic_store(&s->cptTab[idx], 1U);
}
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return idx;
}
/* Perform a swap of the current write buffer and return a new one */
M_INLINE unsigned int
m_snapsh0t_mrsw_write(m_snapsh0t_mrsw_ct s)
{
s->currentWrite = m_snapsh0t_mrsw_write_idx(s, s->currentWrite);
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return s->currentWrite;
}
/* Start writing to the write buffer and return its index */
M_INLINE unsigned int
m_snapsh0t_mrsw_write_start(m_snapsh0t_mrsw_ct s)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
// Get a new buffer.
unsigned int idx = m_genint_pop(s->freeList);
M_ASSERT (idx != M_GENINT_ERROR);
M_ASSERT (idx < s->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER);
M_ASSERT (atomic_load(&s->cptTab[idx]) == 0);
atomic_store(&s->cptTab[idx], 1U);
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return idx;
}
/* End writing to the given write buffer */
M_INLINE void
m_snapsh0t_mrsw_write_end(m_snapsh0t_mrsw_ct s, unsigned int idx)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
// Provide this write bufer to the readers
unsigned int newNext, previous = atomic_load(&s->lastNext);
do {
newNext = M_SNAPSH0T_SPMC_INT_FLAG(idx, true);
} while (!atomic_compare_exchange_weak(&s->lastNext, &previous, newNext));
// Free the previous write buffer
idx = M_SNAPSH0T_SPMC_INT_FLAG_W(previous);
unsigned int c = atomic_fetch_sub(&s->cptTab[idx], 1U);
M_ASSERT (c != 0 && c <= s->n_reader + 1);
if (c == 1) {
m_genint_push(s->freeList, idx);
}
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
}
/* Start reading the latest written buffer and return the index to it */
M_INLINE unsigned int
m_snapsh0t_mrsw_read_start(m_snapsh0t_mrsw_ct s)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
unsigned int idx, previous;
reload:
// Load the last published index + Next flag
previous = atomic_load(&s->lastNext);
while (true) {
// Get the last published index
idx = M_SNAPSH0T_SPMC_INT_FLAG_W(previous);
// Load the number of threads using this index
unsigned int c = atomic_load(&s->cptTab[idx]);
M_ASSERT (c <= s->n_reader + 1);
// Reserve the index if it still being reserved by someone else
if (M_UNLIKELY (c == 0
|| !atomic_compare_exchange_strong(&s->cptTab[idx], &c, c+1)))
goto reload;
// Try to ack it
unsigned int newNext = M_SNAPSH0T_SPMC_INT_FLAG(idx, false);
reforce:
if (M_LIKELY (atomic_compare_exchange_strong(&s->lastNext, &previous, newNext)))
break;
// We have been preempted by another thread
if (idx == M_SNAPSH0T_SPMC_INT_FLAG_W(previous)) {
// This is still ok if the index has not changed
// We can get previous to true again if the writer has recycled the index,
// while we reserved it, and the reader get prempted until its CAS.
if (M_UNLIKELY (M_SNAPSH0T_SPMC_INT_FLAG_N(previous) == true)) goto reforce;
break;
}
// Free the reserved index as we failed it to ack it
c = atomic_fetch_sub(&s->cptTab[idx], 1U);
M_ASSERT (c != 0 && c <= s->n_reader + 1);
if (c == 1) {
m_genint_push(s->freeList, idx);
}
}
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
return idx;
}
/* End the reading the given buffer */
M_INLINE void
m_snapsh0t_mrsw_read_end(m_snapsh0t_mrsw_ct s, unsigned int idx)
{
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
M_ASSERT (idx < s->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER);
// Decrement reference counter of the buffer
unsigned int c = atomic_fetch_sub(&s->cptTab[idx], 1U);
M_ASSERT (c != 0 && c <= s->n_reader + 1);
if (c == 1) {
// Buffer no longer used by any reader thread.
// Push back index in free list
m_genint_push(s->freeList, idx);
}
M_SNAPSH0T_SPMC_INT_CONTRACT(s);
}
/********************************** INTERNAL *********************************/
/* Contract of a SPMC snapshot.
Nothing notable as it can be accessed concurrently */
#define M_SNAPSH0T_SPMC_CONTRACT(snap) do { \
M_ASSERT (snap != NULL); \
M_ASSERT (snap->data != NULL); \
} while (0)
// Defered evaluation
#define M_SNAPSH0T_SPMC_DEF_P1(arg) M_ID( M_SNAPSH0T_SPMC_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_SNAPSH0T_SPMC_DEF_P2(name, type, oplist, snapshot_t) \
M_IF_OPLIST(oplist)(M_SNAPSH0T_SPMC_DEF_P3, M_SNAPSH0T_SPMC_DEF_FAILURE)(name, type, oplist, snapshot_t)
/* Stop processing with a compilation failure */
#define M_SNAPSH0T_SPMC_DEF_FAILURE(name, type, oplist, snapshot_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(SNAPSHOT_SPMC_DEF): the given argument is not a valid oplist: " #oplist)
/* Expand the type and the functions of a SPMC snapshot */
#define M_SNAPSH0T_SPMC_DEF_P3(name, type, oplist, snapshot_t) \
M_SNAPSH0T_SPMC_DEF_TYPE(name, type, oplist, snapshot_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_SNAPSH0T_SPMC_DEF_CORE(name, type, oplist, snapshot_t) \
/* Define the type */
#define M_SNAPSH0T_SPMC_DEF_TYPE(name, type, oplist, snapshot_t) \
\
/* Create an aligned type to avoid false sharing between threads */ \
typedef struct M_F(name, _aligned_type_s) { \
type x; \
M_CACHELINE_ALIGN(align, type); \
} M_F(name, _aligned_type_ct); \
\
typedef struct M_F(name, _s) { \
M_F(name, _aligned_type_ct) *data; \
m_snapsh0t_mrsw_ct core; \
} snapshot_t[1]; \
\
/* Define internal types for oplist */ \
typedef snapshot_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
/* Define the core functions */
#define M_SNAPSH0T_SPMC_DEF_CORE(name, type, oplist, snapshot_t) \
\
M_INLINE void \
M_F(name, _init)(snapshot_t snap, size_t nReader) \
{ \
M_ASSERT (snap != NULL); \
M_ASSERT (nReader > 0 && nReader <= M_SNAPSH0T_SPMC_MAX_READER); \
snap->data = M_CALL_REALLOC(oplist, M_F(name, _aligned_type_ct), \
NULL, nReader+M_SNAPSH0T_SPMC_EXTRA_BUFFER); \
if (M_UNLIKELY_NOMEM (snap->data == NULL)) { \
M_MEMORY_FULL(sizeof(M_F(name, _aligned_type_ct)) * \
(nReader+M_SNAPSH0T_SPMC_EXTRA_BUFFER)); \
return; \
} \
for(size_t i = 0; i < nReader + M_SNAPSH0T_SPMC_EXTRA_BUFFER; i++) { \
M_CALL_INIT(oplist, snap->data[i].x); \
} \
m_snapsh0t_mrsw_init(snap->core, nReader); \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
} \
\
M_INLINE void \
M_F(name, _clear)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
size_t nReader = m_snapsh0t_mrsw_size(snap->core); \
for(size_t i = 0; i < nReader + M_SNAPSH0T_SPMC_EXTRA_BUFFER; i++) { \
M_CALL_CLEAR(oplist, snap->data[i].x); \
} \
M_CALL_FREE(oplist, snap->data); \
m_snapsh0t_mrsw_clear(snap->core); \
} \
\
M_INLINE type * \
M_F(name, _write)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
const unsigned int idx = m_snapsh0t_mrsw_write(snap->core); \
return &snap->data[idx].x; \
} \
\
M_INLINE type const * \
M_F(name, _read_start)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
const unsigned int idx = m_snapsh0t_mrsw_read_start(snap->core); \
return M_CONST_CAST(type, &snap->data[idx].x); \
} \
\
M_INLINE void \
M_F(name, _read_end)(snapshot_t snap, type const *old) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
M_ASSERT (old != NULL); \
const M_F(name, _aligned_type_ct) *oldx; \
oldx = M_CTYPE_FROM_FIELD(M_F(name, _aligned_type_ct), old, type, x); \
M_ASSERT (oldx >= snap->data); \
M_ASSERT (oldx < snap->data + snap->core->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER); \
M_ASSERT(snap->core->n_reader +M_SNAPSH0T_SPMC_EXTRA_BUFFER < UINT_MAX); \
const unsigned int idx = (unsigned int) (oldx - snap->data); \
m_snapsh0t_mrsw_read_end(snap->core, idx); \
} \
\
M_INLINE type * \
M_F(name, _get_write_buffer)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap); \
const unsigned int idx = m_snapsh0t_mrsw_get_write_idx(snap->core); \
return &snap->data[idx].x; \
} \
\
/********************************** INTERNAL *********************************/
// MPMC is built upon SPMC
// Defered evaluation
#define M_SNAPSH0T_MPMC_DEF_P1(arg) M_ID( M_SNAPSH0T_MPMC_DEF_P2 arg )
/* Validate the oplist before going further */
#define M_SNAPSH0T_MPMC_DEF_P2(name, type, oplist, snapshot_t) \
M_IF_OPLIST(oplist)(M_SNAPSH0T_MPMC_DEF_P3, M_SNAPSH0T_MPMC_DEF_FAILURE)(name, type, oplist, snapshot_t)
/* Stop processing with a compilation failure */
#define M_SNAPSH0T_MPMC_DEF_FAILURE(name, type, oplist, snapshot_t) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(SNAPSHOT_MPMC_DEF): the given argument is not a valid oplist: " #oplist)
/* Expand the type and the functions of a MPMC snapshot */
#define M_SNAPSH0T_MPMC_DEF_P3(name, type, oplist, snapshot_t) \
M_SNAPSH0T_SPMC_DEF_P1((M_F(name, _mrsw), type, oplist, M_F(name, _mrsw_pct))) \
M_SNAPSH0T_MPMC_DEF_TYPE(name, type, oplist, snapshot_t) \
M_CHECK_COMPATIBLE_OPLIST(name, 1, type, oplist) \
M_SNAPSH0T_MPMC_DEF_CORE(name, type, oplist, snapshot_t) \
/* Define the types */
#define M_SNAPSH0T_MPMC_DEF_TYPE(name, type, oplist, snapshot_t) \
\
typedef struct M_F(name, _s) { \
M_F(name, _mrsw_pct) core; \
} snapshot_t[1]; \
\
/* Define internal types for oplist */ \
typedef snapshot_t M_F(name, _ct); \
typedef type M_F(name, _subtype_ct); \
/* Define the core functions */
#define M_SNAPSH0T_MPMC_DEF_CORE(name, type, oplist, snapshot_t) \
\
M_INLINE void \
M_F(name, _init)(snapshot_t snap, size_t nReader, size_t nWriter) \
{ \
M_F(name, _mrsw_init)(snap->core, nReader + nWriter -1 ); \
unsigned int idx = snap->core->core->currentWrite; \
snap->core->core->currentWrite = M_GENINT_ERROR; \
m_snapsh0t_mrsw_write_end(snap->core->core, idx); \
} \
\
M_INLINE void \
M_F(name, _clear)(snapshot_t snap) \
{ \
M_F(name, _mrsw_clear)(snap->core); \
} \
\
M_INLINE type * \
M_F(name, _write_start)(snapshot_t snap) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap->core); \
const unsigned int idx = m_snapsh0t_mrsw_write_start(snap->core->core); \
return &snap->core->data[idx].x; \
} \
\
M_INLINE void \
M_F(name, _write_end)(snapshot_t snap, type *old) \
{ \
M_SNAPSH0T_SPMC_CONTRACT(snap->core); \
const M_F(name, _mrsw_aligned_type_ct) *oldx; \
oldx = M_CTYPE_FROM_FIELD(M_F(name, _mrsw_aligned_type_ct), old, type, x); \
M_ASSERT (oldx >= snap->core->data); \
M_ASSERT (oldx < snap->core->data + snap->core->core->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER); \
M_ASSERT(snap->core->core->n_reader + M_SNAPSH0T_SPMC_EXTRA_BUFFER < UINT_MAX); \
const unsigned int idx = (unsigned int) (oldx - snap->core->data); \
m_snapsh0t_mrsw_write_end(snap->core->core, idx); \
} \
\
M_INLINE type const * \
M_F(name, _read_start)(snapshot_t snap) \
{ \
return M_F(name, _mrsw_read_start)(snap->core); \
} \
\
M_INLINE void \
M_F(name, _read_end)(snapshot_t snap, type const *old) \
{ \
M_F(name, _mrsw_read_end)(snap->core, old); \
} \
\
//FIXME: Evaluate the needs for the methods _set_, _init_set.
M_END_PROTECTED_CODE
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define SNAPSHOT_SPSC_DEF M_SNAPSHOT_SPSC_DEF
#define SNAPSHOT_SPSC_DEF_AS M_SNAPSHOT_SPSC_DEF_AS
#define SNAPSHOT_SPMC_DEF M_SNAPSHOT_SPMC_DEF
#define SNAPSHOT_SPMC_DEF_AS M_SNAPSHOT_SPMC_DEF_AS
#define SNAPSHOT_MPMC_DEF M_SNAPSHOT_MPMC_DEF
#define SNAPSHOT_MPMC_DEF_AS M_SNAPSHOT_MPMC_DEF_AS
#define SNAPSHOT_OPLIST M_SNAPSHOT_OPLIST
#endif
#endif

2787
components/mlib/m-string.h Normal file

File diff suppressed because it is too large Load Diff

748
components/mlib/m-thread.h Normal file
View File

@ -0,0 +1,748 @@
/*
* M*LIB - Thin Mutex & Thread wrapper
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_MUTEX_H
#define MSTARLIB_MUTEX_H
/* Auto-detect the thread backend to use if the user has not override it */
#ifndef M_USE_THREAD_BACKEND
# if defined(INC_FREERTOS_H)
# define M_USE_THREAD_BACKEND 4
# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
&& !defined(__STDC_NO_THREADS__)
# define M_USE_THREAD_BACKEND 1
# elif defined(WIN32) || defined(_WIN32) || defined(__CYGWIN__)
# define M_USE_THREAD_BACKEND 2
# else
# define M_USE_THREAD_BACKEND 3
# endif
#endif
/****************************** C11 version ********************************/
#if M_USE_THREAD_BACKEND == 1
#include <threads.h>
#include <assert.h>
#include <stdbool.h>
#include "m-core.h"
M_BEGIN_PROTECTED_CODE
/* Define a mutex type based on C11 definition */
typedef mtx_t m_mutex_t[1];
/* Define a condition variable type based on C11 definition */
typedef cnd_t m_cond_t[1];
/* Define a thread type based on C11 definition */
typedef thrd_t m_thread_t[1];
/* Initialize the mutex (constructor) */
M_INLINE void m_mutex_init(m_mutex_t m)
{
int rc = mtx_init(m, mtx_plain);
// Abort program in case of initialization failure
// There is really nothing else to do if a mutex cannot be constructed
M_ASSERT_INIT (rc == thrd_success, "mutex");
}
/* Clear the mutex (destructor) */
M_INLINE void m_mutex_clear(m_mutex_t m)
{
mtx_destroy(m);
}
/* Lock the mutex */
M_INLINE void m_mutex_lock(m_mutex_t m)
{
mtx_lock(m);
}
/* Unlock the mutex */
M_INLINE void m_mutex_unlock(m_mutex_t m)
{
mtx_unlock(m);
}
/* Initialize the condition variable (constructor) */
M_INLINE void m_cond_init(m_cond_t c)
{
int rc = cnd_init(c);
// Abort program in case of initialization failure
// There is really nothing else to do if the object cannot be constructed
M_ASSERT_INIT (rc == thrd_success, "conditional variable");
}
/* Clear the condition variable (destructor) */
M_INLINE void m_cond_clear(m_cond_t c)
{
cnd_destroy(c);
}
/* Signal the condition variable to at least one waiting thread */
M_INLINE void m_cond_signal(m_cond_t c)
{
cnd_signal(c);
}
/* Signal the condition variable to all waiting threads */
M_INLINE void m_cond_broadcast(m_cond_t c)
{
cnd_broadcast(c);
}
/* Wait for signaling the condition variable by another thread */
M_INLINE void m_cond_wait(m_cond_t c, m_mutex_t m)
{
cnd_wait(c, m);
}
/* Create the thread (constructor) and start it */
M_INLINE void m_thread_create(m_thread_t t, void (*func)(void*), void* arg)
{
int rc = thrd_create(t, (int(*)(void*))(void(*)(void))func, arg);
// Abort program in case of initialization failure
M_ASSERT_INIT (rc == thrd_success, "thread");
}
/* Wait for the thread to terminate and destroy it (destructor) */
M_INLINE void m_thread_join(m_thread_t t)
{
int rc = thrd_join(*t, NULL);
M_ASSERT (rc == thrd_success);
// Avoid warning about variable unused.
(void) rc;
}
/* The thread has nothing meaningfull to do.
Inform the OS to let other threads be scheduled */
M_INLINE void m_thread_yield(void)
{
thrd_yield();
}
/* Sleep the thread for at least usec microseconds.
Return true if the sleep was successful (or we cannot know) */
M_INLINE bool m_thread_sleep(unsigned long long usec)
{
struct timespec tv;
tv.tv_sec = (long) (usec / 1000000ULL);
tv.tv_nsec = (long) ((usec % 1000000ULL) * 1000UL);
int retval = thrd_sleep(&tv, NULL);
return retval == 0;
}
// a helper structure for m_once_call
typedef once_flag m_once_t[1];
// Initial value for m_once_t
#define M_ONCE_INIT_VALUE { ONCE_FLAG_INIT }
// Call the function exactly once
M_INLINE void m_once_call(m_once_t o, void (*func)(void))
{
call_once(o,func);
}
// Attribute to use to allocate a global variable to a thread.
#define M_THREAD_ATTR _Thread_local
M_END_PROTECTED_CODE
/****************************** WIN32 version ******************************/
#elif M_USE_THREAD_BACKEND == 2
/* CLANG provides some useless and wrong warnings:
* - _WIN32_WINNT starts with '_' which is reserved by the standard
* as per the MSVC compiler, it is needed to be defined by the user
* to define which version of windows it want to be compatible with.
* - windows.h may be different than the case used by the file sytem
* there is however no normalized case.
*
* So, theses warnings have to be ignored and are disabled.
*
* We cannot add theses warnings in M_BEGIN_PROTECTED_CODE
* as they need to be disabled **BEFORE** including any system header
* and m-core includes some system headers.
* So we need to disable them explictly here.
*/
#if defined(__clang__) && __clang_major__ >= 4
_Pragma("clang diagnostic push")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
_Pragma("clang diagnostic ignored \"-Wnonportable-system-include-path\"")
#endif
/* CriticalSection & ConditionVariable are available from Windows Vista */
#ifndef WINVER
#define WINVER _WIN32_WINNT_VISTA
#endif
#ifndef _WIN32_WINNT
#define _WIN32_WINNT _WIN32_WINNT_VISTA
#endif
/* Include system headers */
#include <windows.h>
#include <assert.h>
#include <stdbool.h>
#include "m-core.h"
#if defined(__clang__) && __clang_major__ >= 4
_Pragma("clang diagnostic pop")
#endif
M_BEGIN_PROTECTED_CODE
/* Define a thread type based on WINDOWS definition */
typedef HANDLE m_thread_t[1];
/* Define a mutex type based on WINDOWS definition */
typedef CRITICAL_SECTION m_mutex_t[1];
/* Define a condition variable type based on WINDOWS definition */
typedef CONDITION_VARIABLE m_cond_t[1];
/* Initialize a mutex (Constructor)*/
M_INLINE void m_mutex_init(m_mutex_t m)
{
InitializeCriticalSection(m);
}
/* Clear a mutex (destructor) */
M_INLINE void m_mutex_clear(m_mutex_t m)
{
DeleteCriticalSection(m);
}
/* Lock a mutex */
M_INLINE void m_mutex_lock(m_mutex_t m)
{
EnterCriticalSection(m);
}
/* Unlock a mutex */
M_INLINE void m_mutex_unlock(m_mutex_t m)
{
LeaveCriticalSection(m);
}
/* Initialize a condition variable (constructor) */
M_INLINE void m_cond_init(m_cond_t c)
{
InitializeConditionVariable(c);
}
/* Clear a condition variable (destructor) */
M_INLINE void m_cond_clear(m_cond_t c)
{
(void) c; // There is no destructor for this object.
}
/* Signal a condition variable to at least one waiting thread */
M_INLINE void m_cond_signal(m_cond_t c)
{
WakeConditionVariable(c);
}
/* Signal a condition variable to all waiting threads */
M_INLINE void m_cond_broadcast(m_cond_t c)
{
WakeAllConditionVariable(c);
}
/* Wait for a condition variable */
M_INLINE void m_cond_wait(m_cond_t c, m_mutex_t m)
{
SleepConditionVariableCS(c, m, INFINITE);
}
/* Create a thread (constructor) and start it */
M_INLINE void m_thread_create(m_thread_t t, void (*func)(void*), void *arg)
{
*t = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) (uintptr_t) func, arg, 0, NULL);
M_ASSERT_INIT (*t != NULL, "thread");
}
/* Wait for the thread to terminate and destroy it (destructor) */
M_INLINE void m_thread_join(m_thread_t t)
{
DWORD dwWaitResult = WaitForSingleObject(*t, INFINITE);
(void) dwWaitResult;
M_ASSERT (dwWaitResult == WAIT_OBJECT_0);
CloseHandle(*t);
}
/* The thread has nothing meaningfull to do.
Inform the OS to let other threads be scheduled */
M_INLINE void m_thread_yield(void)
{
Sleep(0);
}
/* Sleep the thread for at least usec microseconds
Return true if the sleep was successful */
M_INLINE bool m_thread_sleep(unsigned long long usec)
{
LARGE_INTEGER ft;
M_ASSERT (usec <= LLONG_MAX);
ft.QuadPart = -(10LL*(long long) usec);
HANDLE hd = CreateWaitableTimer(NULL, TRUE, NULL);
M_ASSERT_INIT (hd != NULL, "timer");
SetWaitableTimer(hd, &ft, 0, NULL, NULL, 0);
DWORD dwWaitResult = WaitForSingleObject(hd, INFINITE);
CloseHandle(hd);
return dwWaitResult == WAIT_OBJECT_0;
}
typedef INIT_ONCE m_once_t[1];
#define M_ONCE_INIT_VALUE { INIT_ONCE_STATIC_INIT }
M_INLINE BOOL CALLBACK m_once_callback( PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContext)
{
void (*func)(void);
(void) InitOnce;
(void) lpContext;
func = (void (*)(void))(uintptr_t) Parameter;
(*func)();
return TRUE;
}
M_INLINE void m_once_call(m_once_t o, void (*func)(void))
{
InitOnceExecuteOnce(o, m_once_callback, (void*)(intptr_t)func, NULL);
}
#if defined(_MSC_VER)
// Attribute to use to allocate a global variable to a thread (MSVC def).
# define M_THREAD_ATTR __declspec( thread )
#else
// Attribute to use to allocate a global variable to a thread (GCC def).
# define M_THREAD_ATTR __thread
#endif
M_END_PROTECTED_CODE
/**************************** PTHREAD version ******************************/
#elif M_USE_THREAD_BACKEND == 3
#include <pthread.h>
#ifdef _POSIX_PRIORITY_SCHEDULING
#include <sched.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <assert.h>
#include <stdbool.h>
#include "m-core.h"
M_BEGIN_PROTECTED_CODE
/* Define a mutex type based on PTHREAD definition */
typedef pthread_mutex_t m_mutex_t[1];
/* Define a condition variable type based on PTHREAD definition */
typedef pthread_cond_t m_cond_t[1];
/* Define a thread type based on PTHREAD definition */
typedef pthread_t m_thread_t[1];
/* Initialize the mutex (constructor) */
M_INLINE void m_mutex_init(m_mutex_t m)
{
int _rc = pthread_mutex_init(m, NULL);
// Abort program in case of initialization failure
// There is really nothing else to do if a mutex cannot be constructed
M_ASSERT_INIT (_rc == 0, "mutex");
}
/* Clear the mutex (destructor) */
M_INLINE void m_mutex_clear(m_mutex_t m)
{
pthread_mutex_destroy(m);
}
/* Lock the mutex */
M_INLINE void m_mutex_lock(m_mutex_t m)
{
pthread_mutex_lock(m);
}
/* Unlock the mutex */
M_INLINE void m_mutex_unlock(m_mutex_t m)
{
pthread_mutex_unlock(m);
}
/* Lazy lock initialization */
#define M_MUTEXI_INIT_VALUE { PTHREAD_MUTEX_INITIALIZER }
/* Internal function compatible with lazy lock */
M_INLINE void m_mutexi_lazy_lock(m_mutex_t m)
{
pthread_mutex_lock(m);
}
/* Initialize the condition variable (constructor) */
M_INLINE void m_cond_init(m_cond_t c)
{
int _rc = pthread_cond_init(c, NULL);
// Abort program in case of initialization failure
// There is really nothing else to do if a mutex cannot be constructed
M_ASSERT_INIT (_rc == 0, "conditional variable");
}
/* Clear the condition variable (destructor) */
M_INLINE void m_cond_clear(m_cond_t c)
{
pthread_cond_destroy(c);
}
/* Signal a condition variable to at least a waiting thread */
M_INLINE void m_cond_signal(m_cond_t c)
{
pthread_cond_signal(c);
}
/* Signal a condition variable to all waiting threads */
M_INLINE void m_cond_broadcast(m_cond_t c)
{
pthread_cond_broadcast(c);
}
/* Waiting for a condition variable */
M_INLINE void m_cond_wait(m_cond_t c, m_mutex_t m)
{
pthread_cond_wait(c, m);
}
/* Create a thread (constructor) and start it */
M_INLINE void m_thread_create(m_thread_t t, void (*func)(void*), void *arg)
{
int _rc = pthread_create(t, NULL, (void*(*)(void*))(void(*)(void))func, arg);
M_ASSERT_INIT (_rc == 0, "thread");
}
/* Wait for the thread to terminate and destroy it (destructor) */
M_INLINE void m_thread_join(m_thread_t t)
{
int _rc = pthread_join(*t, NULL);
(void)_rc; // Avoid warning about variable unused.
M_ASSERT (_rc == 0);
}
/* The thread has nothing meaningfull to do.
Inform the OS to let other threads be scheduled */
M_INLINE void m_thread_yield(void)
{
#ifdef _POSIX_PRIORITY_SCHEDULING
sched_yield();
#endif
}
/* Sleep for at least usec microseconds
Return true if the sleep was successful */
M_INLINE bool m_thread_sleep(unsigned long long usec)
{
struct timeval tv;
/* We don't want to use usleep or nanosleep so that
we remain compatible with strict C99 build */
tv.tv_sec = (time_t) (usec / 1000000ULL);
tv.tv_usec = (suseconds_t) (usec % 1000000ULL);
int retval = select(1, NULL, NULL, NULL, &tv);
return retval == 0;
}
typedef pthread_once_t m_once_t[1];
#define M_ONCE_INIT_VALUE { PTHREAD_ONCE_INIT }
M_INLINE void m_once_call(m_once_t o, void (*func)(void))
{
pthread_once(o,func);
}
#if defined(__GNUC__)
# define M_THREAD_ATTR __thread
#else
# define M_THREAD_ATTR /* Not supported */
#endif
M_END_PROTECTED_CODE
/****************************** FreeRTOS version ********************************/
#elif M_USE_THREAD_BACKEND == 4
#include <stdatomic.h>
#include <semphr.h>
#include <task.h>
#include "m-core.h"
M_BEGIN_PROTECTED_CODE
/* Default value for the stack */
#ifndef M_USE_TASK_STACK_SIZE
#define M_USE_TASK_STACK_SIZE configMINIMAL_STACK_SIZE
#endif
/* Default value for the priority tasks */
#ifndef M_USE_TASK_PRIORITY
#define M_USE_TASK_PRIORITY ( tskIDLE_PRIORITY )
#endif
/* Define a mutex type based on FreeRTOS definition */
typedef struct m_mutex_s {
SemaphoreHandle_t handle;
StaticSemaphore_t MutexBuffer;
} m_mutex_t[1];
/* Define a thread type based on FreeRTOS definition */
typedef struct m_cond_s {
SemaphoreHandle_t handle;
StaticSemaphore_t SemBuffer;
unsigned int NumThreadWaiting;
} m_cond_t[1];
/* Define a thread type based on FreeRTOS definition */
typedef struct m_thread_s {
SemaphoreHandle_t SemHandle;
StaticSemaphore_t SemBuffer;
TaskHandle_t TaskHandle;
StaticTask_t TaskBuffer;
void (*EntryPoint)(void *);
void* ArgsEntryPoint;
StackType_t* StackBuffer;
} m_thread_t[1];
/* Initialize the mutex (constructor) */
M_INLINE void m_mutex_init(m_mutex_t m)
{
/* Create a mutex semaphore without using any dynamic allocation */
m->handle = xSemaphoreCreateMutexStatic(&m->MutexBuffer);
// It cannot fail, so we won't use M_ASSERT_INIT
M_ASSERT(m->handle);
}
/* Clear the mutex (destructor) */
M_INLINE void m_mutex_clear(m_mutex_t m)
{
vSemaphoreDelete(m->handle);
}
/* Lock the mutex */
M_INLINE void m_mutex_lock(m_mutex_t m)
{
xSemaphoreTake(m->handle, portMAX_DELAY);
}
/* Unlock the mutex */
M_INLINE void m_mutex_unlock(m_mutex_t m)
{
xSemaphoreGive(m->handle);
}
/* Initialize the condition variable (constructor) */
M_INLINE void m_cond_init(m_cond_t c)
{
c->NumThreadWaiting = 0;
// Create a semaphore to implement the conditional variable
// Initial value is 0 and valid range is <= 0
c->handle = xSemaphoreCreateCountingStatic( INT_MAX, 0, &c->SemBuffer );
// It cannot fail, so we won't use M_ASSERT_INIT
M_ASSERT(c->handle);
}
/* Clear the condition variable (destructor) */
M_INLINE void m_cond_clear(m_cond_t c)
{
vSemaphoreDelete(c->handle);
}
/* Signal the condition variable to at least one waiting thread */
M_INLINE void m_cond_signal(m_cond_t c)
{
// This function is called within the mutex lock
// NumThreadWaiting doesn't need to be atomic
if (c->NumThreadWaiting > 0) {
// Wakeup one thread by posting on the semaphore
xSemaphoreGive(c->handle);
} // Otherwise there is no waiting thread, so nothing to signal
}
/* Signal the condition variable to all waiting threads */
M_INLINE void m_cond_broadcast(m_cond_t c)
{
// This function is called within the mutex lock
// NumThreadWaiting doesn't need to be atomic
if (c->NumThreadWaiting > 0) {
// Wakeup all thread by posting on the semaphore
// as many times as there are waiting threads
for(unsigned i = 0; i < c->NumThreadWaiting; i++) {
xSemaphoreGive(c->handle);
}
} // Otherwise there is no waiting thread, so nothing to signal
}
/* Wait for signaling the condition variable by another thread */
M_INLINE void m_cond_wait(m_cond_t c, m_mutex_t m)
{
// This function is called within the mutex lock
// Increment the number of waiting thread
c->NumThreadWaiting ++;
m_mutex_unlock(m);
// Wait for post in the semaphore
xSemaphoreTake(c->handle, portMAX_DELAY);
m_mutex_lock(m);
c->NumThreadWaiting --;
}
M_INLINE void m_thr3ad_wrapper( void *args)
{
struct m_thread_s *thread_ptr = args;
thread_ptr->EntryPoint(thread_ptr->ArgsEntryPoint);
// Give back the semaphore.
xSemaphoreGive(thread_ptr->SemHandle);
// Wait for destruction
while (true) { vTaskSuspend(NULL); }
}
/* Create the thread (constructor) and start it */
M_INLINE void m_thread_create(m_thread_t t, void (*func)(void*), void* arg)
{
// Create a semaphore to implement the final wait
t->SemHandle = xSemaphoreCreateCountingStatic( 1, 0, &t->SemBuffer );
M_ASSERT(t->SemHandle);
// Save the argument to the thread
t->EntryPoint = func;
t->ArgsEntryPoint = arg;
// Allocate the stack
t->StackBuffer = pvPortMalloc( sizeof (StackType_t) * M_USE_TASK_STACK_SIZE);
M_ASSERT_INIT(t->StackBuffer, "STACK");
// Create the task without using any dynamic allocation
t->TaskHandle = xTaskCreateStatic(m_thr3ad_wrapper, "M*LIB", M_USE_TASK_STACK_SIZE, (void*) t, M_USE_TASK_PRIORITY, t->StackBuffer, &t->TaskBuffer);
// It cannot fail, so we won't use M_ASSERT_INIT
M_ASSERT(t->TaskHandle);
}
/* Wait for the thread to terminate and destroy it (destructor) */
M_INLINE void m_thread_join(m_thread_t t)
{
xSemaphoreTake(t->SemHandle, portMAX_DELAY);
vTaskDelete(t->TaskHandle);
vPortFree(t->StackBuffer);
vSemaphoreDelete(t->SemHandle);
t->TaskHandle = 0;
t->StackBuffer = 0;
t->SemHandle = 0;
}
/* The thread has nothing meaningfull to do.
Inform the OS to let other threads be scheduled */
M_INLINE void m_thread_yield(void)
{
taskYIELD();
}
/* Sleep the thread for at least usec microseconds.
Return true if the sleep was successful */
M_INLINE bool m_thread_sleep(unsigned long long usec)
{
TickType_t delay = (TickType_t) (usec / portTICK_PERIOD_MS / 1000ULL);
vTaskDelay(delay);
return true;
}
// a helper structure for m_once_call
typedef struct {
atomic_int count;
} m_once_t[1];
// Initial value for m_once_t
#define M_ONCE_INIT_VALUE { { M_ATOMIC_VAR_INIT(0) } }
// Call the function exactly once
M_INLINE void m_once_call(m_once_t o, void (*func)(void))
{
if (atomic_load(&o->count) != 2) {
int n = 0;
if (atomic_compare_exchange_strong( &o->count, &n, 1)) {
// First thread success
func();
atomic_store(&o->count, 2);
}
// Wait for function call (FIXME: priority inversion possible?)
while (atomic_load(&o->count) != 2) { m_thread_yield(); }
} // Already called. Nothing to do
}
// Attribute to use to allocate a global variable to a thread.
#define M_THREAD_ATTR __thread
M_END_PROTECTED_CODE
/******************************** INVALID VALUE **********************************/
#else
# error Value of M_USE_THREAD_BACKEND is incorrect. Please see the documentation for valid usage.
#endif
// TODO: Obsolete M_LOCK macro.
/* M_LOCK macro. Allow simple locking encapsulation.
USAGE:
static M_LOCK_DECL(name);
int f(int n) {
M_LOCK(name) {
// Exclusive access
}
}
*/
/* NOTE: Either using direct support by the OS (WIN32/PTHREAD)
or using C11's ONCE mechanism */
#ifdef M_MUTEXI_INIT_VALUE
# define M_LOCK_DECL(name) m_mutex_t name = M_MUTEXI_INIT_VALUE
# define M_LOCK(name) \
M_LOCKI_DO(name, M_C(local_cont_, __LINE__), m_mutexi_lazy_lock, m_mutex_unlock)
#else
# define M_LOCK_DECL(name) \
m_mutex_t name; \
static void M_C(m_mutex_init_, name)(void) { \
m_mutex_init(name); \
} \
m_once_t M_C(m_once_, name) = M_ONCE_INIT_VALUE
# define M_LOCKI_BY_ONCE(name) \
(m_once_call(M_C(m_once_, name), M_C(m_mutex_init_, name)), \
m_mutex_lock(name), (void) 0 )
# define M_LOCK(name) \
M_LOCKI_DO(name, M_C(local_cont_, __LINE__), M_LOCKI_BY_ONCE, m_mutex_unlock)
#endif
#define M_LOCKI_DO(name, cont, lock_func, unlock_func) \
for(bool cont = true \
; cont && (lock_func (name), true); \
(unlock_func (name), cont = false))
#endif

1603
components/mlib/m-tree.h Normal file

File diff suppressed because it is too large Load Diff

578
components/mlib/m-try.h Normal file
View File

@ -0,0 +1,578 @@
/*
* M*LIB - try / catch mechanism for M*LIB
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_TRY_H
#define MSTARLIB_TRY_H
#include "m-core.h"
#include "m-thread.h"
/*
* Select mechanism to use for support of RAII and exception,
* so that for each variable defined using M_LET,
* its destructor is still called when exceptions are thrown.
* It is either the C++ try,
* or it uses a GCC or CLANG extension,
* or the standard C compliant way (much slower).
* The user can override the desired mechanism.
*/
#ifndef M_USE_TRY_MECHANISM
# if defined(__has_extension)
# if __has_extension(blocks)
# define M_TRY_CLANG_BLOCKS
# endif
# endif
# if defined(__cplusplus)
# define M_USE_TRY_MECHANISM 1
# elif defined(M_TRY_CLANG_BLOCKS)
# define M_USE_TRY_MECHANISM 2
# elif defined(__GNUC__) && !defined(__clang__)
# define M_USE_TRY_MECHANISM 3
# else
# define M_USE_TRY_MECHANISM 4
# endif
#endif
/*
* Start a protected section of code 'name' where all exceptions are catched
* by the associated CATCH section.
*/
#define M_TRY(name) \
M_TRY_B( M_C(m_try_bool_, name), M_C(m_try_buf_, name), name)
/*
* Catch an exception associated to the TRY block 'name' that matches the given error_code
* If error_code is 0, it catches all error codes.
* error code shall be a constant positive integer.
*/
#define M_CATCH(name, error_code) M_CATCH_B(name, error_code)
/*
* Throw an exception to the upper try block
* error_code shall be the first argument.
* Other arguments are integers or pointers stored in the exception.
* error code shall be a constant positive integer.
* There is no genericity of the exception data structure itself.
*/
#define M_THROW(...) do { \
M_STATIC_ASSERT(M_RET_ARG1 (__VA_ARGS__) != 0, \
M_LIB_NOT_A_CONSTANT_NON_NULL_INTEGER, \
"The error code shall be a non null positive constant"); \
M_STATIC_ASSERT(M_NARGS (__VA_ARGS__) <= 1+M_USE_MAX_CONTEXT, \
M_LIB_TOO_MANY_ARGUMENTS, \
"There are too many arguments for an exception."); \
M_IF_NARGS_EQ1(__VA_ARGS__)(M_THROW_1, M_THROW_N)(__VA_ARGS__); \
} while (0)
/*
* Size of the context data that are stored in an exception data structure.
*/
#ifndef M_USE_MAX_CONTEXT
#define M_USE_MAX_CONTEXT 10
#endif
/*
* The exception itself.
*
* It is POD data where every fields can be used by the user.
* It has been decided to have only one exception data structure
* to simplify error code and because :
* - using generic types is much harder in C to do (still possible)
* - it will make exceptions more usable for errors which should not
* be handled by exceptions.
*
* For C++, we need to encapsulate it in a template,
* so that it can be a unique type for each error code,
* which is needed for the catch mechanism.
* We all need to override the operator -> since the C++
* throw the type and catch the type, whereas the C back-end
* throw the type and catch a pointer to the type:
* within the catch block you are supposed to use the arrow
* operator to test the content of the exception.
*/
#if M_USE_TRY_MECHANISM == 1
namespace m_lib {
template <unsigned int N>
#endif
struct m_exception_s {
unsigned error_code; // Error code
unsigned short line; // Line number where the error was detected
unsigned short num; // Number of entries in 'context' table
const char *filename; // filename where the error was detected
intptr_t context[M_USE_MAX_CONTEXT]; // Specific context of the exception
#ifdef __cplusplus
m_exception_s<N> *operator->() { return this; }
#endif
};
#if M_USE_TRY_MECHANISM == 1
}
#endif
// Typical Error codes (TODO: add more classic?)
#define M_ERROR_MEMORY 1
#define M_ERROR_ACCESS 2
#define M_ERROR_BUSY 3
/*
* Define all global needed by the try mechanism with a
* thread attribute. It needs to be defined once in all the program
*/
#define M_TRY_DEF_ONCE() M_TRY_DEF_ONCE_B()
/*
* Re-throw the last exception
* It shall be done in a CATCH block.
*/
#define M_RETHROW() m_rethrow()
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
/*
* Define the C++ back-end.
* It is fully different from C back-end as it reuses the classic try of the C++.
* Surprisingly it has more constraints than the C one.
* error_code shall be a positive, constant integer.
* the catch all block shall always be the last block.
* at least catch block is mandatory for each try block.
* Note that theses constraints are meaningless in real code,
* and simply good behavior.
* Notice also that you won't have any access to the exception for a catch all error.
*/
#if M_USE_TRY_MECHANISM == 1
// Define the CATCH block. If error_code is 0, it shall catch all errors.
// NOTE: It will even catch non M*LIB errors.
#define M_CATCH_B(name, error_code) \
M_IF(M_BOOL(error_code)) \
(catch (m_lib::m_exception_s<error_code> &name), catch (...))
// No global to define in C++
#define M_TRY_DEF_ONCE_B() /* Nothing to do */
// Reuse the try keyword of the C++
#define M_TRY_B(cont, buf, exception) \
try
// Reuse the throw keyword of the C++
// by throwing the type m_lib::m_exception_s<error_code>
#define M_THROW_1(error_code) \
throw m_lib::m_exception_s<error_code>{ error_code, __LINE__, 0, __FILE__, { 0 } }
// Reuse the throw keyword of the C++
// by throwing the type m_lib::m_exception_s<error_code>
#define M_THROW_N(error_code, ...) \
throw m_lib::m_exception_s<error_code>{ error_code, __LINE__, \
M_NARGS(__VA_ARGS__), __FILE__, { __VA_ARGS__ } }
// Nothing to inject for a pre initialization of a M*LIB object
#define M_LET_TRY_INJECT_PRE_B(cont, oplist, name) /* Nothing to do */
// Code to inject for a post initialization of a M*LIB object
// We create a C++ object with a destructor that will call the CLEAR operator of the M*LIB object
// by using a lambda function.
// If the CLEAR operator is called naturally, we disable the destructor of the C++ object.
#define M_LET_TRY_INJECT_POST_B(cont, oplist, name) \
for(m_lib::m_regclear M_C(m_try_regclear_, name){[&](void) { M_CALL_CLEAR(oplist, name); } } \
; cont ; M_C(m_try_regclear_, name).disable() )
// M_DEFER Injection / pre initialization
#define M_DEFER_TRY_INJECT_PRE_B(cont, ...) /* Nothing to do */
// M_DEFER Injection / post initialization
// Register the stack frame and tests for the longjmp.
// In which case call the 'clear' operations (...), unstack the error list and rethrow the error.
#define M_DEFER_TRY_INJECT_POST_B(cont, ...) \
for(m_lib::m_regclear M_C(m_try_regclear_, cont){[&](void) { __VA_ARGS__; } } \
; cont ; M_C(m_try_regclear_, cont).disable() )
// Definition of the C++ object wrapper
// The registered function is called by the destructor,
// except if the disable function has been called.
#include <functional>
namespace m_lib {
class m_regclear {
std::function<void(void)> function;
bool done;
public:
inline m_regclear(const std::function<void(void)> &f) : function{f}, done{false} { }
inline void disable(void) { done = true; }
inline ~m_regclear() { if (done == false) { function(); done = true; } }
};
}
// Rethrow is simply throw without any argument
#define m_rethrow() throw
/*****************************************************************************/
/* The C back-end.
* It is fully different from the C++ back-end and is based on setjmp/lonjmp
* (classic implementation).
* The main difficulty is the mechanism to register the CLEAR operators
* to call when throwing an exception.
* Contrary to the C++ back-end, it is not cost-free as it adds some
* instructions to the normal behavior of the program.
*/
#else
#if (M_USE_TRY_MECHANISM == 3)
// Use of builtin setjmp / longjmp for GCC
// There are at least twice faster at worst, and reduce stack consumption
// See https://gcc.gnu.org/onlinedocs/gcc/Nonlocal-Gotos.html
// CLANG doesn't support these builtins officialy (https://groups.google.com/g/llvm-dev/c/9QgfdW23K8M)
#define m_try_setjmp(x) __builtin_setjmp(x)
#define m_try_longjmp(x,v) __builtin_longjmp(x, v)
typedef intptr_t m_try_jmp_buf[5];
#define m_try_jmp_buf m_try_jmp_buf
#else
// C compliant setjmp
#include <setjmp.h>
#define m_try_setjmp(x) setjmp(x)
#define m_try_longjmp(x,v) longjmp(x, v)
#define m_try_jmp_buf jmp_buf
#endif
// Define the CATCH block associated to the 'name' TRY to catch the exception
// associated to 'error_code' and provide 'name' as a pointer to the exception
// if the exception matches the error code.
// If error code is 0, it matches all errors.
#define M_CATCH_B(name, error_code) \
else if (m_catch( M_C(m_try_buf_, name), (error_code), &name))
// Define the operator to define nested functions (GCC) or blocks (CLANG)
#if M_USE_TRY_MECHANISM == 2
# define M_TRY_FUNC_OPERATOR ^
#else
# define M_TRY_FUNC_OPERATOR *
#endif
// Define the linked structure used to identify what is present in the C stack.
// We create for each M_TRY and each M_LET a new node in the stack that represents
// this point in the stack frame. Each nodes are linked together, so that we can
// analyze the stack frame on exception.
typedef struct m_try_s {
enum { M_STATE_TRY, M_STATE_EXCEPTION_IN_PROGRESS, M_STATE_EXCEPTION_CATCHED,
M_STATE_CLEAR_JMPBUF, M_STATE_CLEAR_CB } kind;
struct m_try_s *next;
union {
m_try_jmp_buf buf;
struct { void (M_TRY_FUNC_OPERATOR func)(void*); void *data; } clear;
} data;
} m_try_t[1];
// Define the TRY block.
// Classic usage of the for trick to push destructor on the exit path.
#define M_TRY_B(cont, buf, exception) \
for(bool cont = true ; cont ; cont = false) \
for(m_try_t buf ; cont ; m_try_clear(buf), cont = false ) \
for(const struct m_exception_s *exception = NULL; cont; cont = false, exception = exception) \
if (m_try_init(buf))
// Throw the error code
#define M_THROW_1(error_code) \
m_throw( &(const struct m_exception_s) { error_code, __LINE__, 0, __FILE__, { 0 } } )
// Throw the error code
#define M_THROW_N(error_code, ...) \
m_throw( &(const struct m_exception_s) { error_code, __LINE__, M_NARGS(__VA_ARGS__), __FILE__, \
{ __VA_ARGS__ } } )
// Copy an exception to another.
M_INLINE void
m_exception_set(struct m_exception_s *out, const struct m_exception_s *in)
{
if (in != out) {
memcpy(out, in, sizeof *out);
}
}
// The global thread attribute variables and functions.
extern M_THREAD_ATTR struct m_try_s *m_global_error_list;
extern M_THREAD_ATTR struct m_exception_s m_global_exception;
extern M_ATTR_NO_RETURN M_ATTR_COLD_FUNCTION void m_throw(const struct m_exception_s *exception);
// Macro to add once in one source file to define theses global:
#define M_TRY_DEF_ONCE_B() \
M_THREAD_ATTR struct m_try_s *m_global_error_list; \
M_THREAD_ATTR struct m_exception_s m_global_exception; \
\
/* Throw the given exception \
This function should be rarely called. */ \
M_ATTR_NO_RETURN M_ATTR_COLD_FUNCTION void \
m_throw(const struct m_exception_s *exception) \
{ \
/* Analyze the error list to see what has been registered */ \
struct m_try_s *e = m_global_error_list; \
while (e != NULL) { \
/* A CLEAR operator has been registered: call it */ \
if (e->kind == M_STATE_CLEAR_CB) { \
e->data.clear.func(e->data.clear.data); \
} \
else { \
/* A JUMP command has been registered. \
* Either due to the M_TRY block or \
* because of the jump to the CLEAR operator of the object to clear. */ \
M_ASSERT(e->kind == M_STATE_TRY || e->kind == M_STATE_CLEAR_JMPBUF); \
/* If the exception is already m_global_exception, it won't be copied */ \
m_exception_set(&m_global_exception, exception); \
e->kind = M_STATE_EXCEPTION_IN_PROGRESS; \
m_global_error_list = e; \
m_try_longjmp(e->data.buf, 1); \
} \
/* Next stack frame */ \
e = e->next; \
} \
/* No exception found. \
Display the information and halt program . */ \
M_RAISE_FATAL("Exception '%u' raised by (%s:%d) is not catched. Program aborted.\n", \
exception->error_code, exception->filename, exception->line); \
}
// Rethrow the error
M_INLINE void
m_rethrow(void)
{
M_ASSERT(m_global_error_list != NULL);
m_throw(&m_global_exception);
}
// Catch the error code associated to the TRY block state
// and provide a pointer to the exception (which is a global).
M_INLINE bool
m_catch(m_try_t state, unsigned error_code, const struct m_exception_s **exception)
{
M_ASSERT(m_global_error_list == state);
M_ASSERT(state->kind == M_STATE_EXCEPTION_IN_PROGRESS);
*exception = &m_global_exception;
if (error_code != 0 && m_global_exception.error_code != error_code)
return false;
// The exception has been catched.
state->kind = M_STATE_EXCEPTION_CATCHED;
// Unstack the try block, so that next throw command in the CATCH block
// will reach the upper TRY block.
m_global_error_list = state->next;
return true;
}
// Initialize the state to a TRY state.
M_INLINE void
m_try_init(m_try_t state)
{
state->kind = M_STATE_TRY;
state->next = m_global_error_list;
m_global_error_list = state;
// setjmp needs to be done in the MACRO.
}
#define m_try_init(s) \
M_LIKELY ((m_try_init(s), m_try_setjmp(((s)->data.buf)) != 1))
// Disable the current TRY block.
M_INLINE void
m_try_clear(m_try_t state)
{
// Even if there is a CATCH block and an unstack of the exception
// m_global_error_list won't be changed.
m_global_error_list = state->next;
if (M_UNLIKELY (state->kind == M_STATE_EXCEPTION_IN_PROGRESS)) {
// There was no catch for this error.
// Forward it to the upper level.
m_rethrow();
}
}
// Implement the M_LET injection macros, so that the CLEAR operator is called on exception
// Helper functions
// Each mechanisme provide 3 helper functions:
// * pre: which is called before the constructor
// * post: which is called after the constructor
// * final: which is called before the destructor.
// We register a call to the CLEAR callback.
// We don't modify m_global_error_list until we have successfully called the INIT operator
// to avoid registering the CLEAR operator on exception whereas the object is not initialized yet.
// However we register the position in the stack frame now so that in case of partial initialization
// of the object (if the INIT operator of the object calls other INIT operators of composed fields),
// since partial initialization will be unstacked naturally by the composing object.
M_INLINE bool
m_try_cb_pre(m_try_t state)
{
state->kind = M_STATE_CLEAR_CB;
state->next = m_global_error_list;
return true;
}
// We register the function to call of the initialized object.
M_INLINE bool
m_try_cb_post(m_try_t state, void (M_TRY_FUNC_OPERATOR func)(void*), void *data)
{
state->data.clear.func = func;
state->data.clear.data = data;
m_global_error_list = state;
return true;
}
// The object will be cleared.
// We can pop the stack frame of the errors.
M_INLINE void
m_try_cb_final(m_try_t state)
{
m_global_error_list = state->next;
}
// Pre initialization function. Save the stack frame for a longjmp
M_INLINE bool
m_try_jump_pre(m_try_t state)
{
state->kind = M_STATE_CLEAR_JMPBUF;
state->next = m_global_error_list;
return true;
}
// Post initialization function. Register the stack frame for a longjmp
M_INLINE void
m_try_jump_post(m_try_t state)
{
m_global_error_list = state;
}
// And call setjmp to register the position in the code.
#define m_try_jump_post(s) \
M_LIKELY ((m_try_jump_post(s), m_try_setjmp(((s)->data.buf)) != 1))
// The object will be cleared.
// We can pop the stack frame of the errors.
M_INLINE void
m_try_jump_final(m_try_t state)
{
m_global_error_list = state->next;
}
// Implement the M_LET injection macros, so that the CLEAR operator is called on exception
//
#if M_USE_TRY_MECHANISM == 1
# error M*LIB: Internal error. C++ back-end requested within C implementation.
#elif M_USE_TRY_MECHANISM == 2
// Use of CLANG blocks
#define M_LET_TRY_INJECT_PRE_B(cont, oplist, name) \
for(m_try_t M_C(m_try_state_, name); cont && \
m_try_cb_pre(M_C(m_try_state_, name) ); )
#define M_LET_TRY_INJECT_POST_B(cont, oplist, name) \
for(m_try_cb_post(M_C(m_try_state_, name), \
^ void (void *_data) { M_GET_TYPE oplist *_t = _data; M_CALL_CLEAR(oplist, *_t); }, \
(void*) &name); cont; m_try_cb_final(M_C(m_try_state_, name)) )
#elif M_USE_TRY_MECHANISM == 3
// Use of GCC nested functions.
#define M_LET_TRY_INJECT_PRE_B(cont, oplist, name) \
for(m_try_t M_C(m_try_state_, name); cont && \
m_try_cb_pre(M_C(m_try_state_, name) ); )
#define M_LET_TRY_INJECT_POST_B(cont, oplist, name) \
for(m_try_cb_post(M_C(m_try_state_, name), \
__extension__ ({ __extension__ void _callback (void *_data) { M_GET_TYPE oplist *_t = _data; M_CALL_CLEAR(oplist, *_t); } _callback; }), \
(void*) &name); cont; m_try_cb_final(M_C(m_try_state_, name)) )
#elif M_USE_TRY_MECHANISM == 4
// STD C compliant (without compiler extension): use of setjmp
// This is the basic implementation in case of compiler unknown.
// It uses setjmp/longjmp, and as such, is much slower than
// other implementations.
// M_LET Injection / pre initialization
// Initialize the stack frame.
#define M_LET_TRY_INJECT_PRE_B(cont, oplist, name) \
for(m_try_t M_C(m_try_state_, name); cont && \
m_try_jump_pre(M_C(m_try_state_, name)); )
// M_LET Injection / post initialization
// Register the stack frame and tests for the longjmp.
// In which case call the CLEAR operator, unstack the error list and rethrow the error.
#define M_LET_TRY_INJECT_POST_B(cont, oplist, name) \
for( ; cont ; m_try_jump_final(M_C(m_try_state_, name))) \
if (m_try_jump_post(M_C(m_try_state_, name)) \
|| (M_CALL_CLEAR(oplist, name), m_try_jump_final(M_C(m_try_state_, name)), m_rethrow(), false))
#else
# error M*LIB: Invalid value for M_USE_TRY_MECHANISM [1..4]
#endif
// M_DEFER Injection / pre initialization
// Initialize the stack frame.
#define M_DEFER_TRY_INJECT_PRE_B(cont, ...) \
for(m_try_t M_C(m_try_state_, cont); cont && \
m_try_jump_pre(M_C(m_try_state_, cont)); )
// M_DEFER Injection / post initialization
// Register the stack frame and tests for the longjmp.
// In which case call the CLEAR operator, unstack the error list and rethrow the error.
#define M_DEFER_TRY_INJECT_POST_B(cont, ...) \
for( ; cont ; m_try_jump_final(M_C(m_try_state_, cont))) \
if (m_try_jump_post(M_C(m_try_state_, cont)) \
|| (__VA_ARGS__ , m_try_jump_final(M_C(m_try_state_, cont)), m_rethrow(), false))
#endif /* cplusplus */
/*****************************************************************************/
// Macro injection for M_LET.
// If the oplist defined NOCLEAR property, we won't register this variable for clear on exception
#undef M_LET_TRY_INJECT_PRE
#define M_LET_TRY_INJECT_PRE(cont, oplist, name) \
M_IF(M_GET_PROPERTY(oplist, NOCLEAR))(M_EAT, M_LET_TRY_INJECT_PRE_B) \
(cont, oplist, name)
#undef M_LET_TRY_INJECT_POST
#define M_LET_TRY_INJECT_POST(cont, oplist, name) \
M_IF(M_GET_PROPERTY(oplist, NOCLEAR))(M_EAT, M_LET_TRY_INJECT_POST_B) \
(cont, oplist, name)
// Macro injection for M_DEFER.
#undef M_DEFER_TRY_INJECT_PRE
#define M_DEFER_TRY_INJECT_PRE(cont, ...) M_DEFER_TRY_INJECT_PRE_B(cont, __VA_ARGS__)
#undef M_DEFER_TRY_INJECT_POST
#define M_DEFER_TRY_INJECT_POST(cont, ...) M_DEFER_TRY_INJECT_POST_B(cont, __VA_ARGS__)
// In case of MEMORY FULL errors, throw an error instead of aborting.
#undef M_MEMORY_FULL
#define M_MEMORY_FULL(size) M_THROW(M_ERROR_MEMORY, (intptr_t)(size))
#endif

784
components/mlib/m-tuple.h Normal file
View File

@ -0,0 +1,784 @@
/*
* M*LIB - TUPLE module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_TUPLE_H
#define MSTARLIB_TUPLE_H
#include "m-core.h"
/* Define the tuple type and functions.
USAGE:
TUPLE_DEF2(name, [(field1, type1[, oplist1]), (field2, type2[, oplist2]), ...] ) */
#define M_TUPLE_DEF2(name, ...) \
M_TUPLE_DEF2_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define the tuple type and functions
as the given name.
USAGE:
TUPLE_DEF2_AS(name, name_t, [(field1, type1[, oplist1]), (field2, type2[, oplist2]), ...] ) */
#define M_TUPLE_DEF2_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_TUPL3_DEF2_P1( (name, name_t M_TUPL3_INJECT_GLOBAL(__VA_ARGS__)) ) \
M_END_PROTECTED_CODE
/* Define the oplist of a tuple.
USAGE: TUPLE_OPLIST(name[, oplist of the first type, ...]) */
#define M_TUPLE_OPLIST(...) \
M_IF_NARGS_EQ1(__VA_ARGS__) \
(M_TUPL3_OPLIST_P1((__VA_ARGS__, M_BASIC_OPLIST )), \
M_TUPL3_OPLIST_P1((__VA_ARGS__ )))
/* Return an array suitable for the WIP _cmp_order function.
As compound literals are not supported in C++,
provide a separate definition for C++ using initializer_list
(shall be constexpr, but only supported in C++14).
*/
#ifndef __cplusplus
#define M_TUPLE_ORDER(name, ...) \
( (const int[]) {M_MAP2_C(M_TUPL3_ORDER_CONVERT, name, __VA_ARGS__), 0})
#else
#include <initializer_list>
namespace m_lib {
template <unsigned int N>
struct m_tupl3_integer_va {
int data[N];
/*constexpr*/ inline m_tupl3_integer_va(std::initializer_list<int> init){
int j = 0;
for(auto i:init) {
data[j++] = i;
}
}
};
}
#define M_TUPLE_ORDER(name, ...) \
(m_lib::m_tupl3_integer_va<M_NARGS(__VA_ARGS__,0)>({M_MAP2_C(M_TUPL3_ORDER_CONVERT, name, __VA_ARGS__), 0}).data)
#endif
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
/* Contract of a tuple. Nothing notable */
#define M_TUPL3_CONTRACT(tup) do { \
M_ASSERT(tup != NULL); \
} while (0)
/* Inject the oplist within the list of arguments */
#define M_TUPL3_INJECT_GLOBAL(...) \
M_MAP(M_TUPL3_INJECT_OPLIST_A, __VA_ARGS__)
/* Transform (x, type) into (x, type, oplist) if there is global registered oplist
or (x, type, M_BASIC_OPLIST) if there is no global one,
or keep (x, type, oplist) if oplist was already present */
#define M_TUPL3_INJECT_OPLIST_A( duo_or_trio ) \
M_TUPL3_INJECT_OPLIST_B duo_or_trio
#define M_TUPL3_INJECT_OPLIST_B( f, ... ) \
M_DEFERRED_COMMA \
M_IF_NARGS_EQ1(__VA_ARGS__)( (f, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)()), (f, __VA_ARGS__) )
// Deferred evaluation
#define M_TUPL3_DEF2_P1(...) M_ID( M_TUPL3_DEF2_P2 __VA_ARGS__ )
// Test if all third argument of all arguments is an oplist
#define M_TUPL3_IF_ALL_OPLIST(...) \
M_IF(M_REDUCE(M_TUPL3_IS_OPLIST_P, M_AND, __VA_ARGS__))
// Test if the third argument of (name, type, oplist) is an oplist
#define M_TUPL3_IS_OPLIST_P(a) \
M_OPLIST_P(M_RET_ARG3 a)
/* Validate the oplist before going further */
#define M_TUPL3_DEF2_P2(name, name_t, ...) \
M_TUPL3_IF_ALL_OPLIST(__VA_ARGS__)(M_TUPL3_DEF2_P3, M_TUPL3_DEF2_FAILURE)(name, name_t, __VA_ARGS__)
/* Stop processing with a compilation failure */
#define M_TUPL3_DEF2_FAILURE(name, name_t, ...) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(TUPLE_DEF2): at least one of the given argument is not a valid oplist: " #__VA_ARGS__)
/* Define the tuple */
#define M_TUPL3_DEF2_P3(name, name_t, ...) \
M_TUPL3_DEFINE_TYPE(name, name_t, __VA_ARGS__) \
M_TUPL3_DEFINE_ENUM(name, __VA_ARGS__) \
M_TUPL3_CONTROL_ALL_OPLIST(name, __VA_ARGS__) \
M_TUPL3_IF_ALL(INIT, __VA_ARGS__)(M_TUPL3_DEFINE_INIT(name, __VA_ARGS__),) \
M_TUPL3_DEFINE_INIT_SET(name, __VA_ARGS__) \
M_TUPL3_DEFINE_INIT_SET2(name, __VA_ARGS__) \
M_TUPL3_DEFINE_SET(name, __VA_ARGS__) \
M_TUPL3_DEFINE_SET2(name, __VA_ARGS__) \
M_TUPL3_DEFINE_CLEAR(name, __VA_ARGS__) \
M_TUPL3_DEFINE_GETTER_FIELD(name, __VA_ARGS__) \
M_TUPL3_DEFINE_SETTER_FIELD(name, __VA_ARGS__) \
M_TUPL3_DEFINE_EMPLACE_FIELD(name, __VA_ARGS__) \
M_TUPL3_IF_ONE(CMP, __VA_ARGS__)(M_TUPL3_DEFINE_CMP(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(CMP, __VA_ARGS__)(M_TUPL3_DEFINE_CMP_ORDER(name, __VA_ARGS__),) \
M_TUPL3_DEFINE_CMP_FIELD(name, __VA_ARGS__) \
M_TUPL3_IF_ONE(HASH, __VA_ARGS__)(M_TUPL3_DEFINE_HASH(name, __VA_ARGS__),) \
M_TUPL3_IF_ONE(EQUAL, __VA_ARGS__)(M_TUPL3_DEFINE_EQUAL(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(GET_STR, __VA_ARGS__)(M_TUPL3_DEFINE_GET_STR(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(OUT_STR, __VA_ARGS__)(M_TUPL3_DEFINE_OUT_STR(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(IN_STR, __VA_ARGS__)(M_TUPL3_DEFINE_IN_STR(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(PARSE_STR, __VA_ARGS__)(M_TUPL3_DEFINE_PARSE_STR(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(OUT_SERIAL, __VA_ARGS__)(M_TUPL3_DEFINE_OUT_SERIAL(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(IN_SERIAL, __VA_ARGS__)(M_TUPL3_DEFINE_IN_SERIAL(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(INIT_MOVE, __VA_ARGS__)(M_TUPL3_DEFINE_INIT_MOVE(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(MOVE, __VA_ARGS__)(M_TUPL3_DEFINE_MOVE(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(SWAP, __VA_ARGS__)(M_TUPL3_DEFINE_SWAP(name, __VA_ARGS__),) \
M_TUPL3_IF_ALL(RESET, __VA_ARGS__)(M_TUPL3_DEFINE_RESET(name, __VA_ARGS__),)
/* Provide order for _cmp_order */
#define M_TUPL3_ORDER_CONVERT(name, x) M_F(name, M_C(M_TUPL3_ORDER_CONVERT_, x))
#define M_TUPL3_ORDER_CONVERT_ASC(x) M_C3(_,x,_value)
#define M_TUPL3_ORDER_CONVERT_DSC(x) M_C3(_,x,_value)*-1
/* Get the field name, the type, the oplist or the methods
based on the tuple (field, type, oplist) */
#define M_TUPL3_GET_FIELD(f,t,o) f
#define M_TUPL3_GET_TYPE(f,t,o) t
#define M_TUPL3_GET_OPLIST(f,t,o) o
#define M_TUPL3_GET_INIT(f,t,o) M_GET_INIT o
#define M_TUPL3_GET_INIT_SET(f,t,o) M_GET_INIT_SET o
#define M_TUPL3_GET_INIT_MOVE(f,t,o) M_GET_INIT_MOVE o
#define M_TUPL3_GET_MOVE(f,t,o) M_GET_MOVE o
#define M_TUPL3_GET_SET(f,t,o) M_GET_SET o
#define M_TUPL3_GET_CLEAR(f,t,o) M_GET_CLEAR o
#define M_TUPL3_GET_CMP(f,t,o) M_GET_CMP o
#define M_TUPL3_GET_HASH(f,t,o) M_GET_HASH o
#define M_TUPL3_GET_EQUAL(f,t,o) M_GET_EQUAL o
#define M_TUPL3_GET_STR(f,t,o) M_GET_GET_STR o
#define M_TUPL3_GET_OUT_STR(f,t,o) M_GET_OUT_STR o
#define M_TUPL3_GET_IN_STR(f,t,o) M_GET_IN_STR o
#define M_TUPL3_GET_OUT_SERIAL(f,t,o) M_GET_OUT_SERIAL o
#define M_TUPL3_GET_IN_SERIAL(f,t,o) M_GET_IN_SERIAL o
#define M_TUPL3_GET_PARSE_STR(f,t,o) M_GET_PARSE_STR o
#define M_TUPL3_GET_SWAP(f,t,o) M_GET_SWAP o
#define M_TUPL3_GET_RESET(f,t,o) M_GET_RESET o
/* Call the method associated to the given operator for the given parameter
of the tuple t=(name, type, oplist) */
#define M_TUPL3_CALL_INIT(t, ...) M_APPLY_API(M_TUPL3_GET_INIT t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_INIT_SET(t, ...) M_APPLY_API(M_TUPL3_GET_INIT_SET t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_INIT_MOVE(t, ...) M_APPLY_API(M_TUPL3_GET_INIT_MOVE t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_MOVE(t, ...) M_APPLY_API(M_TUPL3_GET_MOVE t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_SET(t, ...) M_APPLY_API(M_TUPL3_GET_SET t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_CLEAR(t, ...) M_APPLY_API(M_TUPL3_GET_CLEAR t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_CMP(t, ...) M_APPLY_API(M_TUPL3_GET_CMP t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_HASH(t, ...) M_APPLY_API(M_TUPL3_GET_HASH t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_EQUAL(t, ...) M_APPLY_API(M_TUPL3_GET_EQUAL t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_GET_STR(t, ...) M_APPLY_API(M_TUPL3_GET_STR t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_OUT_STR(t, ...) M_APPLY_API(M_TUPL3_GET_OUT_STR t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_IN_STR(t, ...) M_APPLY_API(M_TUPL3_GET_IN_STR t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_PARSE_STR(t, ...) M_APPLY_API(M_TUPL3_GET_PARSE_STR t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_OUT_SERIAL(t, ...) M_APPLY_API(M_TUPL3_GET_OUT_SERIAL t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_IN_SERIAL(t, ...) M_APPLY_API(M_TUPL3_GET_IN_SERIAL t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_SWAP(t, ...) M_APPLY_API(M_TUPL3_GET_SWAP t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
#define M_TUPL3_CALL_RESET(t, ...) M_APPLY_API(M_TUPL3_GET_RESET t, M_TUPL3_GET_OPLIST t, __VA_ARGS__)
/* Define the type of a tuple */
#define M_TUPL3_DEFINE_TYPE(name, name_t, ...) \
typedef struct M_F(name, _s) { \
M_MAP(M_TUPL3_DEFINE_RECUR_TYPE_ELE , __VA_ARGS__) \
} name_t[1]; \
\
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
/* Define internal type for oplist */ \
typedef name_t M_F(name, _ct); \
/* Save constant as the number of arguments (internal) */ \
typedef enum { \
M_C3(m_tupl3_, name, _num_args) = M_NARGS(__VA_ARGS__) \
} M_C3(m_tupl3_, name, _num_args_ct); \
/* Save alias for the types of arguments */ \
M_MAP3(M_TUPL3_DEFINE_TYPE_ELE, name, __VA_ARGS__)
#define M_TUPL3_DEFINE_TYPE_ELE(name, num, a) \
typedef M_TUPL3_GET_TYPE a M_C4(name, _type_, num, _ct);
#define M_TUPL3_DEFINE_RECUR_TYPE_ELE(a) \
M_TUPL3_GET_TYPE a M_TUPL3_GET_FIELD a ;
/* Define the basic enumerate, identifying a parameter */
#define M_TUPL3_DEFINE_ENUM(name, ...) \
typedef enum { \
M_F(name, _first_one_val), \
M_MAP2_C(M_TUPL3_DEFINE_ENUM_ELE , name, __VA_ARGS__) \
} M_F(name,_field_e);
#define M_TUPL3_DEFINE_ENUM_ELE(name, a) \
M_C4(name, _, M_TUPL3_GET_FIELD a, _value)
/* Control that all given oplists of all parameters are really oplists */
#define M_TUPL3_CONTROL_ALL_OPLIST(name, ...) \
M_MAP2(M_TUPL3_CONTROL_OPLIST, name, __VA_ARGS__)
#define M_TUPL3_CONTROL_OPLIST(name, a) \
M_CHECK_COMPATIBLE_OPLIST(name, M_TUPL3_GET_FIELD a, \
M_TUPL3_GET_TYPE a, M_TUPL3_GET_OPLIST a)
/* Define the INIT method calling the INIT method for all params */
#define M_TUPL3_DEFINE_INIT(name, ...) \
M_INLINE void M_F(name, _init)(M_F(name,_ct) my) { \
M_MAP(M_TUPL3_DEFINE_INIT_FUNC , __VA_ARGS__) {} \
}
#define M_TUPL3_DEFINE_INIT_FUNC(a) \
M_CHAIN_OBJ(M_TUPL3_GET_FIELD a, M_TUPL3_GET_OPLIST a, my -> M_TUPL3_GET_FIELD a)
/* Define the INIT_SET method calling the INIT_SET method for all params */
#define M_TUPL3_DEFINE_INIT_SET(name, ...) \
M_INLINE void M_F(name, _init_set)(M_F(name,_ct) my , M_F(name,_ct) const org) { \
M_TUPL3_CONTRACT(org); \
M_MAP(M_TUPL3_DEFINE_INIT_SET_FUNC , __VA_ARGS__) {} \
}
#define M_TUPL3_DEFINE_INIT_SET_FUNC(a) \
M_CHAIN_OBJ(M_TUPL3_GET_FIELD a, M_TUPL3_GET_OPLIST a, \
my -> M_TUPL3_GET_FIELD a , org -> M_TUPL3_GET_FIELD a )
/* Define the INIT_WITH method calling the INIT_SET method for all params. */
#define M_TUPL3_DEFINE_INIT_SET2(name, ...) \
M_INLINE void M_F(name, _init_emplace)(M_F(name,_ct) my \
M_MAP(M_TUPL3_DEFINE_INIT_SET2_PROTO, __VA_ARGS__) \
) { \
M_MAP(M_TUPL3_DEFINE_INIT_SET2_FUNC , __VA_ARGS__) {} \
}
#define M_TUPL3_DEFINE_INIT_SET2_PROTO(a) \
, M_TUPL3_GET_TYPE a const M_TUPL3_GET_FIELD a
#define M_TUPL3_DEFINE_INIT_SET2_FUNC(a) \
M_CHAIN_OBJ(M_TUPL3_GET_FIELD a, M_TUPL3_GET_OPLIST a, \
my -> M_TUPL3_GET_FIELD a , M_TUPL3_GET_FIELD a )
/* Define the SET method calling the SET method for all params. */
#define M_TUPL3_DEFINE_SET(name, ...) \
M_INLINE void M_F(name, _set)(M_F(name,_ct) my , \
M_F(name,_ct) const org) { \
M_TUPL3_CONTRACT(my); \
M_TUPL3_CONTRACT(org); \
M_MAP(M_TUPL3_DEFINE_SET_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_SET_FUNC(a) \
M_TUPL3_CALL_SET(a, my -> M_TUPL3_GET_FIELD a , org -> M_TUPL3_GET_FIELD a );
/* Define the SET_WITH method calling the SET method for all params. */
#define M_TUPL3_DEFINE_SET2(name, ...) \
M_INLINE void M_F(name, _emplace)(M_F(name,_ct) my \
M_MAP(M_TUPL3_DEFINE_SET2_PROTO, __VA_ARGS__) \
) { \
M_TUPL3_CONTRACT(my); \
M_MAP(M_TUPL3_DEFINE_SET2_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_SET2_PROTO(a) \
, M_TUPL3_GET_TYPE a const M_TUPL3_GET_FIELD a
#define M_TUPL3_DEFINE_SET2_FUNC(a) \
M_TUPL3_CALL_SET(a, my -> M_TUPL3_GET_FIELD a , M_TUPL3_GET_FIELD a );
/* Define the CLEAR method calling the CLEAR method for all params. */
#define M_TUPL3_DEFINE_CLEAR(name, ...) \
M_INLINE void M_F(name, _clear)(M_F(name,_ct) my) { \
M_TUPL3_CONTRACT(my); \
M_MAP(M_TUPL3_DEFINE_CLEAR_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_CLEAR_FUNC(a) \
M_TUPL3_CALL_CLEAR(a, my -> M_TUPL3_GET_FIELD a );
/* Define the GET_AT_field & CGET_AT methods for all params. */
#define M_TUPL3_DEFINE_GETTER_FIELD(name, ...) \
M_MAP3(M_TUPL3_DEFINE_GETTER_FIELD_PROTO, name, __VA_ARGS__)
#define M_TUPL3_DEFINE_GETTER_FIELD_PROTO(name, num, a) \
M_INLINE M_TUPL3_GET_TYPE a * M_C3(name, _get_at_, M_TUPL3_GET_FIELD a) \
(M_F(name,_ct) my) { \
M_TUPL3_CONTRACT(my); \
return &(my->M_TUPL3_GET_FIELD a); \
} \
M_INLINE M_TUPL3_GET_TYPE a const * M_C3(name, _cget_at_, M_TUPL3_GET_FIELD a) \
(M_F(name,_ct) const my) { \
M_TUPL3_CONTRACT(my); \
return &(my->M_TUPL3_GET_FIELD a); \
} \
/* Same but uses numerical index for accessing the field (internal) */ \
M_INLINE M_TUPL3_GET_TYPE a * M_C4(m_tupl3_, name, _get_at_, num) \
(M_F(name,_ct) my) { \
return &(my->M_TUPL3_GET_FIELD a); \
} \
/* Define the SET_field methods for all params. */
#define M_TUPL3_DEFINE_SETTER_FIELD(name, ...) \
M_MAP2(M_TUPL3_DEFINE_SETTER_FIELD_PROTO, name, __VA_ARGS__)
#define M_TUPL3_DEFINE_SETTER_FIELD_PROTO(name, a) \
M_INLINE void M_C3(name, _set_, M_TUPL3_GET_FIELD a) \
(M_F(name,_ct) my, M_TUPL3_GET_TYPE a const M_TUPL3_GET_FIELD a) { \
M_TUPL3_CONTRACT(my); \
M_TUPL3_CALL_SET(a, my ->M_TUPL3_GET_FIELD a, M_TUPL3_GET_FIELD a); \
}
/* Define the EMPLACE_field methods for all params. */
#define M_TUPL3_DEFINE_EMPLACE_FIELD(name, ...) \
M_REDUCE3(M_TUPL3_DEFINE_EMPLACE_FIELD_PROTO, M_TUPL3_DEFINE_EMPLACE_G, name, __VA_ARGS__)
#define M_TUPL3_DEFINE_EMPLACE_G(a, b) a b
#define M_TUPL3_DEFINE_EMPLACE_FIELD_PROTO(name, id, a) \
M_EMPLACE_QUEUE_DEF(M_TUPL3_GET_FIELD a, M_F(name, _ct), M_C3(name, _emplace_, M_TUPL3_GET_FIELD a), M_TUPL3_GET_OPLIST a, M_TUPL3_EMPLACE_DEF)
#define M_TUPL3_EMPLACE_DEF(name, name_t, function_name, oplist, init_func, exp_emplace_type) \
M_INLINE void \
function_name(name_t v \
M_EMPLACE_LIST_TYPE_VAR(a, exp_emplace_type) ) \
{ \
M_CALL_CLEAR(oplist, v->name); \
M_EMPLACE_CALL_FUNC(a, init_func, oplist, v->name, exp_emplace_type); \
}
/* Define the CMP method by calling CMP methods for all params. */
#define M_TUPL3_DEFINE_CMP(name, ...) \
M_INLINE int M_F(name, _cmp)(M_F(name,_ct) const e1 , \
M_F(name,_ct) const e2) { \
int i; \
M_TUPL3_CONTRACT(e1); \
M_TUPL3_CONTRACT(e2); \
M_MAP(M_TUPL3_DEFINE_CMP_FUNC_P0, __VA_ARGS__) \
return 0; \
}
#define M_TUPL3_DEFINE_CMP_FUNC_P0(a) \
M_IF(M_TUPL3_TEST_METHOD_P(CMP, a))(M_TUPL3_DEFINE_CMP_FUNC_P1, M_EAT)(a)
#define M_TUPL3_DEFINE_CMP_FUNC_P1(a) \
i = M_TUPL3_CALL_CMP(a, e1 -> M_TUPL3_GET_FIELD a , e2 -> M_TUPL3_GET_FIELD a ); \
if (i != 0) return i;
/* Define the CMP_ORDER method by calling CMP methods for all params
In the right order
FIXME: _cmp_order is not supported by algorithm yet.
FIXME: All oplists shall define the CMP operator or at least one?
*/
#define M_TUPL3_DEFINE_CMP_ORDER(name, ...) \
M_INLINE int M_F(name, _cmp_order)(M_F(name,_ct) const e1 , \
M_F(name,_ct) const e2, \
const int order[]) { \
int i, r; \
M_TUPL3_CONTRACT(e1); \
M_TUPL3_CONTRACT(e2); \
while (true) { \
i=*order++; \
switch (i) { \
case 0: return 0; \
M_MAP2(M_TUPL3_DEFINE_CMP_ORDER_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(0); \
} \
} \
}
#define M_TUPL3_DEFINE_CMP_ORDER_FUNC(name, a) \
case M_C4(name, _, M_TUPL3_GET_FIELD a, _value): \
case -M_C4(name, _, M_TUPL3_GET_FIELD a, _value): \
r = M_TUPL3_CALL_CMP(a, e1 -> M_TUPL3_GET_FIELD a , e2 -> M_TUPL3_GET_FIELD a ); \
if (r != 0) return i < 0 ? -r : r; \
break;
/* Define a CMP_field method for all given params that export a CMP method */
#define M_TUPL3_DEFINE_CMP_FIELD(name, ...) \
M_MAP2(M_TUPL3_MAP_CMP_FIELD, name, __VA_ARGS__)
#define M_TUPL3_MAP_CMP_FIELD(name, a) \
M_IF_METHOD(CMP, M_TUPL3_GET_OPLIST a)( \
M_TUPL3_DEFINE_CMP_FIELD_FUNC(name, M_TUPL3_GET_FIELD a, M_TUPL3_GET_CMP a, M_TUPL3_GET_OPLIST a), \
)
#define M_TUPL3_DEFINE_CMP_FIELD_FUNC(name, field, func_cmp, oplist) \
M_INLINE int M_C3(name, _cmp_, field)(M_F(name,_ct) const e1 , \
M_F(name,_ct) const e2) { \
M_TUPL3_CONTRACT(e1); \
M_TUPL3_CONTRACT(e2); \
return M_APPLY_API(func_cmp, oplist, e1 -> field , e2 -> field ); \
}
/* Define a EQUAL method by calling the EQUAL methods for all params */
#define M_TUPL3_DEFINE_EQUAL(name, ...) \
M_INLINE bool M_F(name, _equal_p)(M_F(name,_ct) const e1 , \
M_F(name,_ct) const e2) { \
bool b; \
M_TUPL3_CONTRACT(e1); \
M_TUPL3_CONTRACT(e2); \
M_MAP(M_TUPL3_DEFINE_EQUAL_FUNC_P0, __VA_ARGS__) \
return true; \
}
#define M_TUPL3_DEFINE_EQUAL_FUNC_P0(a) \
M_IF(M_TUPL3_TEST_METHOD_P(EQUAL, a))(M_TUPL3_DEFINE_EQUAL_FUNC_P1, M_EAT)(a)
#define M_TUPL3_DEFINE_EQUAL_FUNC_P1(a) \
b = M_TUPL3_CALL_EQUAL(a, e1 -> M_TUPL3_GET_FIELD a , e2 -> M_TUPL3_GET_FIELD a ); \
if (!b) return false;
/* Define a HASH method by calling the HASH methods for all params */
#define M_TUPL3_DEFINE_HASH(name, ...) \
M_INLINE size_t M_F(name, _hash)(M_F(name,_ct) const e1) { \
M_TUPL3_CONTRACT(e1); \
M_HASH_DECL(hash); \
M_MAP(M_TUPL3_DEFINE_HASH_FUNC_P0, __VA_ARGS__) \
return M_HASH_FINAL (hash); \
}
#define M_TUPL3_DEFINE_HASH_FUNC_P0(a) \
M_IF(M_TUPL3_TEST_METHOD_P(HASH, a))(M_TUPL3_DEFINE_HASH_FUNC_P1, M_EAT)(a)
#define M_TUPL3_DEFINE_HASH_FUNC_P1(a) \
M_HASH_UP(hash, M_TUPL3_CALL_HASH(a, e1 -> M_TUPL3_GET_FIELD a) );
/* Define a GET_STR method by calling the GET_STR methods for all params */
#define M_TUPL3_DEFINE_GET_STR(name, ...) \
M_INLINE void M_F(name, _get_str)(m_string_t str, \
M_F(name,_ct) const el, \
bool append) { \
bool comma = false; \
M_TUPL3_CONTRACT(el); \
M_ASSERT (str != NULL); \
(append ? m_string_cat_cstr : m_string_set_cstr) (str, "("); \
M_MAP(M_TUPL3_DEFINE_GET_STR_FUNC , __VA_ARGS__) \
m_string_push_back (str, ')'); \
}
#define M_TUPL3_DEFINE_GET_STR_FUNC(a) \
if (comma) m_string_push_back (str, ','); \
comma = true; \
M_TUPL3_CALL_GET_STR(a, str, el -> M_TUPL3_GET_FIELD a, true); \
/* Define a OUT_STR method by calling the OUT_STR methods for all params */
#define M_TUPL3_DEFINE_OUT_STR(name, ...) \
M_INLINE void M_F(name, _out_str)(FILE *f, \
M_F(name,_ct) const el) { \
bool comma = false; \
M_TUPL3_CONTRACT(el); \
M_ASSERT (f != NULL); \
fputc('(', f); \
M_MAP(M_TUPL3_DEFINE_OUT_STR_FUNC , __VA_ARGS__) \
fputc (')', f); \
}
#define M_TUPL3_DEFINE_OUT_STR_FUNC(a) \
if (comma) fputc (',', f); \
comma = true; \
M_TUPL3_CALL_OUT_STR(a, f, el -> M_TUPL3_GET_FIELD a); \
/* Define a IN_STR method by calling the IN_STR methods for all params */
#define M_TUPL3_DEFINE_IN_STR(name, ...) \
M_INLINE bool M_F(name, _in_str)(M_F(name,_ct) el, FILE *f) { \
bool comma = false; \
M_TUPL3_CONTRACT(el); \
M_ASSERT (f != NULL); \
int c = fgetc(f); \
if (c != '(') return false; \
M_MAP(M_TUPL3_DEFINE_IN_STR_FUNC , __VA_ARGS__) \
c = fgetc(f); \
return (c == ')'); \
}
#define M_TUPL3_DEFINE_IN_STR_FUNC(a) \
if (comma) { \
c = fgetc (f); \
if (c != ',' || c == EOF) return false; \
} \
comma = true; \
if (M_TUPL3_CALL_IN_STR(a, el -> M_TUPL3_GET_FIELD a, f) == false) \
return false ; \
/* Define a PARSE_STR method by calling the PARSE_STR methods for all params */
#define M_TUPL3_DEFINE_PARSE_STR(name, ...) \
M_INLINE bool M_F(name, _parse_str)(M_F(name,_ct) el, \
const char str[], \
const char **endptr) { \
M_TUPL3_CONTRACT(el); \
M_ASSERT (str != NULL); \
bool success = false; \
bool comma = false; \
int c = *str++; \
if (c != '(') goto exit; \
M_MAP(M_TUPL3_DEFINE_PARSE_STR_FUNC , __VA_ARGS__) \
c = *str++; \
success = (c == ')'); \
exit: \
if (endptr) *endptr = str; \
return success; \
}
#define M_TUPL3_DEFINE_PARSE_STR_FUNC(a) \
if (comma) { \
c = *str++; \
if (c != ',' || c == 0) goto exit; \
} \
comma = true; \
if (M_TUPL3_CALL_PARSE_STR(a, el -> M_TUPL3_GET_FIELD a, str, &str) == false) \
goto exit ; \
/* Return the parameter name as a C string */
#define M_TUPL3_STRINGIFY_NAME(a) \
M_AS_STR(M_TUPL3_GET_FIELD a)
/* Define a OUT_SERIAL method by calling the OUT_SERIAL methods for all params */
#define M_TUPL3_DEFINE_OUT_SERIAL(name, ...) \
M_INLINE m_serial_return_code_t \
M_F(name, _out_serial)(m_serial_write_t f, \
M_F(name,_ct) const el) { \
M_TUPL3_CONTRACT(el); \
M_ASSERT (f != NULL && f->m_interface != NULL); \
const int field_max = M_NARGS(__VA_ARGS__); \
/* Define a constant static table of all fields names */ \
static const char *const field_name[] = \
{ M_REDUCE(M_TUPL3_STRINGIFY_NAME, M_ID, __VA_ARGS__) }; \
int index = 0; \
m_serial_local_t local; \
m_serial_return_code_t ret; \
ret = f->m_interface->write_tuple_start(local, f); \
M_MAP(M_TUPL3_DEFINE_OUT_SERIAL_FUNC , __VA_ARGS__) \
M_ASSERT( index == field_max); \
ret |= f->m_interface->write_tuple_end(local, f); \
return ret & M_SERIAL_FAIL; \
}
#define M_TUPL3_DEFINE_OUT_SERIAL_FUNC(a) \
f->m_interface->write_tuple_id(local, f, field_name, field_max, index); \
M_TUPL3_CALL_OUT_SERIAL(a, f, el -> M_TUPL3_GET_FIELD a); \
index++; \
/* Define a IN_SERIAL method by calling the IN_SERIAL methods for all params */
#define M_TUPL3_DEFINE_IN_SERIAL(name, ...) \
M_INLINE m_serial_return_code_t \
M_F(name, _in_serial)(M_F(name,_ct) el, m_serial_read_t f) { \
M_TUPL3_CONTRACT(el); \
M_ASSERT (f != NULL && f->m_interface != NULL); \
int index = -1; \
const int field_max = M_NARGS(__VA_ARGS__); \
static const char *const field_name[] = \
{ M_REDUCE(M_TUPL3_STRINGIFY_NAME, M_ID, __VA_ARGS__) }; \
m_serial_local_t local; \
m_serial_return_code_t ret; \
ret = f->m_interface->read_tuple_start(local, f); \
while (ret == M_SERIAL_OK_CONTINUE) { \
ret = f->m_interface->read_tuple_id(local, f, field_name, field_max, &index); \
if (ret == M_SERIAL_OK_CONTINUE) { \
M_ASSERT (index >= 0 && index < field_max); \
switch (1+index) { \
M_MAP2(M_TUPL3_DEFINE_IN_SERIAL_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(0); \
} \
ret = (ret == M_SERIAL_OK_DONE) ? M_SERIAL_OK_CONTINUE : M_SERIAL_FAIL; \
} \
} \
return ret; \
}
#define M_TUPL3_DEFINE_IN_SERIAL_FUNC(name, a) \
case M_C4(name, _, M_TUPL3_GET_FIELD a, _value): \
ret = M_TUPL3_CALL_IN_SERIAL(a, el -> M_TUPL3_GET_FIELD a, f); \
break; \
/* Define a INIT_MOVE method by calling the INIT_MOVE methods for all params
INIT_MOVE cannot fail and cannot throw any exception */
#define M_TUPL3_DEFINE_INIT_MOVE(name, ...) \
M_INLINE void M_F(name, _init_move)(M_F(name,_ct) el, M_F(name,_ct) org) { \
M_TUPL3_CONTRACT(el); \
M_MAP(M_TUPL3_DEFINE_INIT_MOVE_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_INIT_MOVE_FUNC(a) \
M_TUPL3_CALL_INIT_MOVE(a, el -> M_TUPL3_GET_FIELD a, org -> M_TUPL3_GET_FIELD a);
/* Define a MOVE method by calling the MOVE methods for all params */
#define M_TUPL3_DEFINE_MOVE(name, ...) \
M_INLINE void M_F(name, _move)(M_F(name,_ct) el, M_F(name,_ct) org) { \
M_TUPL3_CONTRACT(el); \
M_MAP(M_TUPL3_DEFINE_MOVE_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_MOVE_FUNC(a) \
M_TUPL3_CALL_MOVE(a, el -> M_TUPL3_GET_FIELD a, org -> M_TUPL3_GET_FIELD a);
/* Define a SWAP method by calling the SWAP methods for all params */
#define M_TUPL3_DEFINE_SWAP(name, ...) \
M_INLINE void M_F(name, _swap)(M_F(name,_ct) el1, M_F(name,_ct) el2) { \
M_TUPL3_CONTRACT(el1); \
M_TUPL3_CONTRACT(el2); \
M_MAP(M_TUPL3_DEFINE_SWAP_FUNC , __VA_ARGS__) \
}
#define M_TUPL3_DEFINE_SWAP_FUNC(a) \
M_TUPL3_CALL_SWAP(a, el1 -> M_TUPL3_GET_FIELD a, el2 -> M_TUPL3_GET_FIELD a);
/* Define a RESET method by calling the RESET methods for all params */
#define M_TUPL3_DEFINE_RESET(name, ...) \
M_INLINE void M_F(name, _reset)(M_F(name,_ct) el1) { \
M_TUPL3_CONTRACT(el1); \
M_MAP(M_TUPL3_DEFINE_RESET_FUNC , __VA_ARGS__) \
} \
#define M_TUPL3_DEFINE_RESET_FUNC(a) \
M_TUPL3_CALL_RESET(a, el1 -> M_TUPL3_GET_FIELD a);
/********************************** INTERNAL *********************************/
/* INIT_WITH macro enabling recursive INIT_WITH initialization
tuple = { int, m_string_t, array<m_string_t> }
USAGE:
M_LET( (x, 2, ("John"), ( ("Bear"), ("Rabbit") )), tuple_t)
"If you think it's simple, you're deluding yourself."
Several pass are done:
1) If the number of arguments doesn't match the number of oplists of the
tuple oplist, it is assumed something is wrong. It uses the _init_emplace
function to provide proper warning in such case.
2) Otherwise, it checks that the number of arguments matches the number
of arguments of the tuple definition.
3) Mix all arguments with their associated oplists to have pair (arg, oplist),
4) Map the following macro for each computed pair :
4.a) If INIT_WITH macro is not defined for this pair, it uses INIT_SET
4.b) If the argument is encapsulated with parenthesis, it uses INIT_WITH
4.c) If the oplist property LET_AS_INIT_WITH is defined, it uses INIT_WITH
4.d) Otherwise it uses INIT_SET.
*/
#define M_TUPL3_INIT_WITH(oplist, dest, ...) \
M_TUPL3_INIT_WITH_P1(M_GET_NAME oplist, M_GET_OPLIST oplist, dest, __VA_ARGS__)
#define M_TUPL3_INIT_WITH_P1(name, oplist_arglist, dest, ...) \
M_IF(M_NOTEQUAL( M_NARGS oplist_arglist, M_NARGS (__VA_ARGS__))) \
(M_TUPL3_INIT_WITH_P1_FUNC, M_TUPL3_INIT_WITH_P1_MACRO)(name, oplist_arglist, dest, __VA_ARGS__)
#define M_TUPL3_INIT_WITH_P1_FUNC(name, oplist_arglist, dest, ...) \
M_F(name, _init_emplace)(dest, __VA_ARGS__)
#define M_TUPL3_INIT_WITH_P1_MACRO(name, oplist_arglist, dest, ...) \
( M_STATIC_ASSERT( M_NARGS oplist_arglist == M_C3(m_tupl3_, name, _num_args), M_LIB_DIMENSION_ERROR, "The number of oplists given to TUPLE_OPLIST don't match the number of oplists used to create the tuple." ), \
M_STATIC_ASSERT( M_NARGS(__VA_ARGS__) == M_C3(m_tupl3_, name, _num_args), M_LIB_DIMENSION_ERROR, "Missing / Too many arguments for tuple"), \
M_MAP3(M_TUPL3_INIT_WITH_P2, (name, dest), M_OPFLAT M_MERGE_ARGLIST( oplist_arglist, (__VA_ARGS__) ) ) \
(void) 0)
#define M_TUPL3_INIT_WITH_P2(name_dest, num, pair) \
M_TUPL3_INIT_WITH_P3( M_PAIR_1 name_dest, M_PAIR_2 name_dest, num, M_PAIR_1 pair, M_PAIR_2 pair )
#define M_TUPL3_INIT_WITH_P3(name, dest, num, oplist, param) \
M_IF(M_TEST_METHOD_P(INIT_WITH, oplist))(M_TUPL3_INIT_WITH_P4, M_TUPL3_INIT_WITH_SET)(name, dest, num, oplist, param)
#define M_TUPL3_INIT_WITH_SET(name, dest, num, oplist, param) \
M_CALL_INIT_SET (oplist, *M_C4(m_tupl3_, name, _get_at_, num)(dest), param) ,
#define M_TUPL3_INIT_WITH_P4(name, dest, num, oplist, param) \
M_IF(M_PARENTHESIS_P( param))(M_TUPL3_INIT_WITH_P5, M_TUPL3_INIT_WITH_P6)(name, dest, num, oplist, param)
#define M_TUPL3_INIT_WITH_P5(name, dest, num, oplist, param) \
M_CALL_INIT_WITH(oplist, *M_C4(m_tupl3_, name, _get_at_, num)(dest), M_REMOVE_PARENTHESIS (param) ) ,
#define M_TUPL3_INIT_WITH_P6(name, dest, num, oplist, param) \
M_IF(M_GET_PROPERTY(oplist, LET_AS_INIT_WITH))(M_TUPL3_INIT_WITH_P5, M_TUPL3_INIT_WITH_SET)(name, dest, num, oplist, param)
/* Macros for testing for the presence of a method in the parameter (name, type, oplist) */
#define M_TUPL3_TEST_METHOD_P(method, trio) \
M_APPLY(M_TUPL3_TEST_METHOD2_P, method, M_OPFLAT trio)
#define M_TUPL3_TEST_METHOD2_P(method, f, t, op) \
M_TEST_METHOD_P(method, op)
/********************************** INTERNAL *********************************/
/* Macros for testing for the presence of a method in all the params */
#define M_TUPL3_IF_ALL(method, ...) \
M_IF(M_REDUCE2(M_TUPL3_TEST_METHOD_P, M_AND, method, __VA_ARGS__))
/* Macros for testing for the presence of a method in at least one params */
#define M_TUPL3_IF_ONE(method, ...) \
M_IF(M_REDUCE2(M_TUPL3_TEST_METHOD_P, M_OR, method, __VA_ARGS__))
// deferred evaluation
#define M_TUPL3_OPLIST_P1(arg) M_TUPL3_OPLIST_P2 arg
/* Validate the oplist before going further */
#define M_TUPL3_OPLIST_P2(name, ...) \
M_IF(M_REDUCE(M_OPLIST_P, M_AND, __VA_ARGS__))(M_TUPL3_OPLIST_P3, M_TUPL3_OPLIST_FAILURE)(name, __VA_ARGS__)
/* Prepare a clean compilation failure */
#define M_TUPL3_OPLIST_FAILURE(name, ...) \
((M_LIB_ERROR(ONE_ARGUMENT_OF_M_TUPL3_OPLIST_IS_NOT_AN_OPLIST, name, __VA_ARGS__)))
/* Define the TUPLE oplist */
#define M_TUPL3_OPLIST_P3(name, ...) \
(M_IF_METHOD_ALL(INIT, __VA_ARGS__)(INIT(M_F(name,_init)),), \
INIT_SET(M_F(name, _init_set)), \
INIT_WITH(API_1(M_TUPL3_INIT_WITH)), \
SET(M_F(name,_set)), \
CLEAR(M_F(name, _clear)), \
NAME(name), \
TYPE(M_F(name,_ct)), \
OPLIST( (__VA_ARGS__) ), \
M_IF_METHOD_ALL(CMP, __VA_ARGS__)(CMP(M_F(name, _cmp)),), \
M_IF_METHOD_ALL(HASH, __VA_ARGS__)(HASH(M_F(name, _hash)),), \
M_IF_METHOD_ALL(EQUAL, __VA_ARGS__)(EQUAL(M_F(name, _equal_p)),), \
M_IF_METHOD_ALL(GET_STR, __VA_ARGS__)(GET_STR(M_F(name, _get_str)),), \
M_IF_METHOD_ALL(PARSE_STR, __VA_ARGS__)(PARSE_STR(M_F(name, _parse_str)),), \
M_IF_METHOD_ALL(IN_STR, __VA_ARGS__)(IN_STR(M_F(name, _in_str)),), \
M_IF_METHOD_ALL(OUT_STR, __VA_ARGS__)(OUT_STR(M_F(name, _out_str)),), \
M_IF_METHOD_ALL(IN_SERIAL, __VA_ARGS__)(IN_SERIAL(M_F(name, _in_serial)),), \
M_IF_METHOD_ALL(OUT_SERIAL, __VA_ARGS__)(OUT_SERIAL(M_F(name, _out_serial)),), \
M_IF_METHOD_ALL(INIT_MOVE, __VA_ARGS__)(INIT_MOVE(M_F(name, _init_move)),), \
M_IF_METHOD_ALL(MOVE, __VA_ARGS__)(MOVE(M_F(name, _move)),), \
M_IF_METHOD_ALL(SWAP, __VA_ARGS__)(SWAP(M_F(name, _swap)),), \
M_IF_METHOD_ALL(RESET, __VA_ARGS__)(RESET(M_F(name, _reset)),), \
EMPLACE_TYPE( ( M_REDUCE2(M_TUPL3_OPLIST_SUBTYPE, M_ID, name, M_SEQ(1, M_NARGS(__VA_ARGS__))) ) ) \
)
/* Support for EMPLACE_TYPE in OPLIST. It refers the created internal type alias */
#define M_TUPL3_OPLIST_SUBTYPE(name, num) \
M_C4(name, _type_, num, _ct)
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define TUPLE_DEF2 M_TUPLE_DEF2
#define TUPLE_DEF2_AS M_TUPLE_DEF2_AS
#define TUPLE_OPLIST M_TUPLE_OPLIST
#define TUPLE_ORDER M_TUPLE_ORDER
#endif
#endif

819
components/mlib/m-variant.h Normal file
View File

@ -0,0 +1,819 @@
/*
* M*LIB - VARIANT module
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_VARIANT_H
#define MSTARLIB_VARIANT_H
#include "m-core.h"
/* Define the variant type and functions.
USAGE:
VARIANT_DEF2(name, [(field1, type1, oplist1), (field2, type2, oplist2), ...] ) */
#define M_VARIANT_DEF2(name, ...) \
M_VARIANT_DEF2_AS(name, M_F(name,_t), __VA_ARGS__)
/* Define the variant type and functions
as the given name_t
USAGE:
VARIANT_DEF2_AS(name, name_t, [(field1, type1, oplist1), (field2, type2, oplist2), ...] ) */
#define M_VARIANT_DEF2_AS(name, name_t, ...) \
M_BEGIN_PROTECTED_CODE \
M_VAR1ANT_DEF2_P1( (name, name_t M_VAR1ANT_INJECT_GLOBAL(__VA_ARGS__)) ) \
M_END_PROTECTED_CODE
/* Define the oplist of a variant.
USAGE: VARIANT_OPLIST(name[, oplist of the first type, ...]) */
#define M_VARIANT_OPLIST(...) \
M_IF_NARGS_EQ1(__VA_ARGS__) \
(M_VAR1ANT_OPLIST_P1((__VA_ARGS__, M_BASIC_OPLIST)), \
M_VAR1ANT_OPLIST_P1((__VA_ARGS__ )))
/*****************************************************************************/
/********************************** INTERNAL *********************************/
/*****************************************************************************/
/* Contract of a variant. */
#define M_VAR1ANT_CONTRACT(name, my) do { \
M_ASSERT(my != NULL); \
M_ASSERT(my->type >= M_F(name, _EMPTY)); \
M_ASSERT(my->type <= (enum M_F(name, _enum)) M_F(name, _MAX_TYPE)); \
} while (0)
/* Inject the oplist within the list of arguments */
#define M_VAR1ANT_INJECT_GLOBAL(...) \
M_MAP(M_VAR1ANT_INJECT_OPLIST_A, __VA_ARGS__)
/* Transform (x, type) into (x, type, oplist) if there is global registered oplist
or (x, type, M_BASIC_OPLIST) if there is no global one,
or keep (x, type, oplist) if oplist was already present */
#define M_VAR1ANT_INJECT_OPLIST_A( duo_or_trio ) \
M_VAR1ANT_INJECT_OPLIST_B duo_or_trio
#define M_VAR1ANT_INJECT_OPLIST_B( f, ... ) \
M_DEFERRED_COMMA \
M_IF_NARGS_EQ1(__VA_ARGS__)( (f, __VA_ARGS__, M_GLOBAL_OPLIST_OR_DEF(__VA_ARGS__)()), (f, __VA_ARGS__) )
// Deferred evaluation
#define M_VAR1ANT_DEF2_P1(...) M_ID( M_VAR1ANT_DEF2_P2 __VA_ARGS__ )
// Test if all third argument of all arguments is an oplist
#define M_VAR1ANT_IF_ALL_OPLIST(...) \
M_IF(M_REDUCE(M_VAR1ANT_IS_OPLIST_P, M_AND, __VA_ARGS__))
// Test if the third argument is an oplist
#define M_VAR1ANT_IS_OPLIST_P(a) \
M_OPLIST_P(M_RET_ARG3 a)
/* Validate the oplist before going further */
#define M_VAR1ANT_DEF2_P2(name, name_t, ...) \
M_VAR1ANT_IF_ALL_OPLIST(__VA_ARGS__)(M_VAR1ANT_DEF2_P3, M_VAR1ANT_DEF2_FAILURE)(name, name_t, __VA_ARGS__)
/* Stop processing with a compilation failure */
#define M_VAR1ANT_DEF2_FAILURE(name, name_t, ...) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(VARIANT_DEF2): at least one of the given argument is not a valid oplist: " #__VA_ARGS__)
/* Define the variant */
#define M_VAR1ANT_DEF2_P3(name, name_t, ...) \
M_VAR1ANT_DEFINE_TYPE(name, name_t, __VA_ARGS__) \
M_VAR1ANT_CONTROL_ALL_OPLIST(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_INIT(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_CLEAR(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_INIT_SET(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_SET(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_EMPLACE(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_TEST_P(name, __VA_ARGS__) \
M_VAR1ANT_IF_ALL(INIT, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_INIT_FIELD(name, __VA_ARGS__),) \
M_VAR1ANT_DEFINE_INIT_SETTER_FIELD(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_SETTER_FIELD(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_GETTER_FIELD(name, __VA_ARGS__) \
M_VAR1ANT_DEFINE_RESET_FUNC(name, __VA_ARGS__) \
M_VAR1ANT_IF_ALL(HASH, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_HASH(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(EQUAL, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_EQUAL(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(GET_STR, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_GET_STR(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL2(PARSE_STR, INIT, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_PARSE_STR(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(OUT_STR, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_OUT_STR(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL2(IN_STR, INIT, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_IN_STR(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(OUT_SERIAL, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_OUT_SERIAL(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL2(IN_SERIAL, INIT, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_IN_SERIAL(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(INIT_MOVE, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_INIT_MOVE(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(INIT_MOVE, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_MOVE(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(INIT_MOVE, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_MOVER(name, __VA_ARGS__),) \
M_VAR1ANT_IF_ALL(SWAP, __VA_ARGS__) \
(M_VAR1ANT_DEFINE_SWAP(name, __VA_ARGS__),)
/* Get the field name, the type, the oplist or the methods
based on the variant (field, type, oplist) */
#define M_VAR1ANT_GET_FIELD(f,t,o) f
#define M_VAR1ANT_GET_TYPE(f,t,o) t
#define M_VAR1ANT_GET_OPLIST(f,t,o) o
#define M_VAR1ANT_GET_INIT(f,t,o) M_GET_INIT o
#define M_VAR1ANT_GET_INIT_SET(f,t,o) M_GET_INIT_SET o
#define M_VAR1ANT_GET_INIT_MOVE(f,t,o) M_GET_INIT_MOVE o
#define M_VAR1ANT_GET_MOVE(f,t,o) M_GET_MOVE o
#define M_VAR1ANT_GET_SET(f,t,o) M_GET_SET o
#define M_VAR1ANT_GET_CLEAR(f,t,o) M_GET_CLEAR o
#define M_VAR1ANT_GET_CMP(f,t,o) M_GET_CMP o
#define M_VAR1ANT_GET_HASH(f,t,o) M_GET_HASH o
#define M_VAR1ANT_GET_EQUAL(f,t,o) M_GET_EQUAL o
#define M_VAR1ANT_GET_STR(f,t,o) M_GET_GET_STR o
#define M_VAR1ANT_GET_PARSE_STR(f,t,o) M_GET_PARSE_STR o
#define M_VAR1ANT_GET_OUT_STR(f,t,o) M_GET_OUT_STR o
#define M_VAR1ANT_GET_IN_STR(f,t,o) M_GET_IN_STR o
#define M_VAR1ANT_GET_OUT_SERIAL(f,t,o) M_GET_OUT_SERIAL o
#define M_VAR1ANT_GET_IN_SERIAL(f,t,o) M_GET_IN_SERIAL o
#define M_VAR1ANT_GET_SWAP(f,t,o) M_GET_SWAP o
/* Call the methods through API */
#define M_VAR1ANT_CALL_INIT(t, ...) M_APPLY_API(M_VAR1ANT_GET_INIT t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_INIT_SET(t, ...) M_APPLY_API(M_VAR1ANT_GET_INIT_SET t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_INIT_MOVE(t, ...) M_APPLY_API(M_VAR1ANT_GET_INIT_MOVE t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_MOVE(t, ...) M_APPLY_API(M_VAR1ANT_GET_MOVE t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_SET(t, ...) M_APPLY_API(M_VAR1ANT_GET_SET t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_CLEAR(t, ...) M_APPLY_API(M_VAR1ANT_GET_CLEAR t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_CMP(t, ...) M_APPLY_API(M_VAR1ANT_GET_CMP t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_HASH(t, ...) M_APPLY_API(M_VAR1ANT_GET_HASH t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_EQUAL(t, ...) M_APPLY_API(M_VAR1ANT_GET_EQUAL t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_GET_STR(t, ...) M_APPLY_API(M_VAR1ANT_GET_STR t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_PARSE_STR(t, ...) M_APPLY_API(M_VAR1ANT_GET_PARSE_STR t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_OUT_STR(t, ...) M_APPLY_API(M_VAR1ANT_GET_OUT_STR t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_IN_STR(t, ...) M_APPLY_API(M_VAR1ANT_GET_IN_STR t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_OUT_SERIAL(t, ...) M_APPLY_API(M_VAR1ANT_GET_OUT_SERIAL t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_IN_SERIAL(t, ...) M_APPLY_API(M_VAR1ANT_GET_IN_SERIAL t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
#define M_VAR1ANT_CALL_SWAP(t, ...) M_APPLY_API(M_VAR1ANT_GET_SWAP t, M_VAR1ANT_GET_OPLIST t, __VA_ARGS__)
/* Define the type */
#define M_VAR1ANT_DEFINE_TYPE(name, name_t, ...) \
/* Define enum of all types of the variant */ \
enum M_F(name, _enum) { M_F(name, _EMPTY) \
M_MAP2(M_VAR1ANT_DEFINE_UNION_ELE, name, __VA_ARGS__) \
}; \
/* Define enum equal to the number of types of the variant */ \
enum M_F(name, _enum_max) { \
M_F(name, _MAX_TYPE) = M_NARGS(__VA_ARGS__) \
}; \
/* Define the variant */ \
typedef struct M_F(name, _s) { \
enum M_F(name, _enum) type; \
union { \
M_MAP(M_VAR1ANT_DEFINE_TYPE_ELE , __VA_ARGS__) \
} value; \
} name_t[1]; \
\
typedef struct M_F(name, _s) *M_F(name, _ptr); \
typedef const struct M_F(name, _s) *M_F(name, _srcptr); \
/* Define internal type for oplist */ \
typedef name_t M_F(name, _ct);
#define M_VAR1ANT_DEFINE_UNION_ELE(name, a) \
, M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value)
#define M_VAR1ANT_DEFINE_TYPE_ELE(a) \
M_VAR1ANT_GET_TYPE a M_VAR1ANT_GET_FIELD a ;
/* Control that all given oplists of all parameters are really oplists */
#define M_VAR1ANT_CONTROL_ALL_OPLIST(name, ...) \
M_MAP2(M_VAR1ANT_CONTROL_OPLIST, name, __VA_ARGS__)
#define M_VAR1ANT_CONTROL_OPLIST(name, a) \
M_CHECK_COMPATIBLE_OPLIST(name, M_VAR1ANT_GET_FIELD a, \
M_VAR1ANT_GET_TYPE a, M_VAR1ANT_GET_OPLIST a)
/* Define the INIT function. Init the variant to empty */
#define M_VAR1ANT_DEFINE_INIT(name, ...) \
M_INLINE void M_F(name, _init)(M_F(name,_ct) my) { \
my->type = M_F(name, _EMPTY); \
}
/* Define the INIT_SET function. */
#define M_VAR1ANT_DEFINE_INIT_SET(name, ...) \
M_INLINE void M_F(name, _init_set)(M_F(name,_ct) my , \
M_F(name,_ct) const org) { \
M_VAR1ANT_CONTRACT(name, org); \
my->type = org->type; \
switch (org->type) { \
M_MAP2(M_VAR1ANT_DEFINE_INIT_SET_FUNC, name, __VA_ARGS__) \
case M_F(name, _EMPTY): /* fallthrough */ \
default: M_ASSUME(org->type == M_F(name, _EMPTY)); break; \
} \
}
#define M_VAR1ANT_DEFINE_INIT_SET_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_INIT_SET(a, my -> value. M_VAR1ANT_GET_FIELD a , \
org -> value.M_VAR1ANT_GET_FIELD a ); \
break;
/* Define the SET function. */
#define M_VAR1ANT_DEFINE_SET(name, ...) \
M_INLINE void M_F(name, _set)(M_F(name,_ct) my , \
M_F(name,_ct) const org) { \
M_VAR1ANT_CONTRACT(name, my); \
M_VAR1ANT_CONTRACT(name, org); \
if (my->type != org->type) { \
/* Different types: clear previous one and create new */ \
M_F(name, _clear)(my); \
M_F(name, _init_set)(my, org); \
} else { \
/* Same type: optimize the set */ \
switch (org->type) { \
M_MAP2(M_VAR1ANT_DEFINE_SET_FUNC, name, __VA_ARGS__) \
case M_F(name, _EMPTY): /* fallthrough */ \
default: M_ASSUME(org->type == M_F(name, _EMPTY)); break; \
} \
} \
}
#define M_VAR1ANT_DEFINE_SET_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_SET(a, my -> value. M_VAR1ANT_GET_FIELD a , \
org -> value.M_VAR1ANT_GET_FIELD a ); \
break;
/* Define the CLEAR function. */
#define M_VAR1ANT_DEFINE_CLEAR(name, ...) \
M_INLINE void M_F(name, _clear)(M_F(name,_ct) my) { \
M_VAR1ANT_CONTRACT(name, my); \
switch (my->type) { \
M_MAP2(M_VAR1ANT_DEFINE_CLEAR_FUNC, name, __VA_ARGS__) \
case M_F(name, _EMPTY): /* fallthrough */ \
default: M_ASSUME(my->type == M_F(name, _EMPTY)); break; \
} \
my->type = M_F(name, _EMPTY); \
}
#define M_VAR1ANT_DEFINE_CLEAR_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_CLEAR(a, my -> value. M_VAR1ANT_GET_FIELD a); \
break;
/* Define the TEST_P function. */
#define M_VAR1ANT_DEFINE_TEST_P(name, ...) \
M_INLINE bool M_F(name, _empty_p)(M_F(name,_ct) const my) { \
M_VAR1ANT_CONTRACT(name, my); \
return my->type == M_F(name, _EMPTY); \
} \
M_INLINE enum M_F(name, _enum) \
M_F(name, _type)(M_F(name,_ct) my) { \
M_VAR1ANT_CONTRACT(name, my); \
return my->type; \
} \
M_MAP2(M_VAR1ANT_DEFINE_TEST_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_TEST_FUNC(name, a) \
M_INLINE bool \
M_C4(name, _, M_VAR1ANT_GET_FIELD a, _p)(M_F(name,_ct) const my) { \
M_VAR1ANT_CONTRACT(name, my); \
return my->type == M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
}
/* Define the INIT function. */
#define M_VAR1ANT_DEFINE_INIT_FIELD(name, ...) \
M_MAP2(M_VAR1ANT_DEFINE_INIT_FIELD_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_INIT_FIELD_FUNC(name, a) \
M_INLINE void \
M_C3(name, _init_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) my) { \
/* Reinit variable with the given value */ \
my->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT(a, my -> value. M_VAR1ANT_GET_FIELD a); \
}
/* Define the INIT_SET of a given type function. */
#define M_VAR1ANT_DEFINE_INIT_SETTER_FIELD(name, ...) \
M_MAP2(M_VAR1ANT_DEFINE_INIT_SETTER_FIELD_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_INIT_SETTER_FIELD_FUNC(name, a) \
M_INLINE void \
M_C3(name, _init_set_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) my, \
M_VAR1ANT_GET_TYPE a const M_VAR1ANT_GET_FIELD a ) { \
my->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT_SET(a, my -> value. M_VAR1ANT_GET_FIELD a, \
M_VAR1ANT_GET_FIELD a); \
}
/* Define the SET of a given type function. */
#define M_VAR1ANT_DEFINE_SETTER_FIELD(name, ...) \
M_MAP2(M_VAR1ANT_DEFINE_SETTER_FIELD_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_SETTER_FIELD_FUNC(name, a) \
M_INLINE void \
M_C3(name, _set_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) my, \
M_VAR1ANT_GET_TYPE a const M_VAR1ANT_GET_FIELD a ) { \
M_VAR1ANT_CONTRACT(name, my); \
if (my->type == M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value) ) { \
M_VAR1ANT_CALL_SET(a, my -> value. M_VAR1ANT_GET_FIELD a, \
M_VAR1ANT_GET_FIELD a); \
} else { \
M_F(name, _clear)(my); \
/* Reinit variable with the given value */ \
my->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT_SET(a, my -> value. M_VAR1ANT_GET_FIELD a, \
M_VAR1ANT_GET_FIELD a); \
} \
}
/* Define the GET_field of a given type function. */
#define M_VAR1ANT_DEFINE_GETTER_FIELD(name, ...) \
M_MAP2(M_VAR1ANT_DEFINE_GETTER_FIELD_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_GETTER_FIELD_FUNC(name, a) \
M_INLINE M_VAR1ANT_GET_TYPE a * \
M_C3(name, _get_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) my) { \
M_VAR1ANT_CONTRACT(name, my); \
if (my->type != M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value) ) { \
return NULL; \
} \
return &my -> value . M_VAR1ANT_GET_FIELD a; \
} \
\
M_INLINE M_VAR1ANT_GET_TYPE a const * \
M_C3(name, _cget_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) const my) { \
M_VAR1ANT_CONTRACT(name, my); \
if (my->type != M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value) ) { \
return NULL; \
} \
return &my -> value . M_VAR1ANT_GET_FIELD a; \
}
/* Define the EMPLACE of a given type function.
NOTE: Use of a variant of MAP3 because of recursive use of MAP2/MAP3/REDUCE2 !
*/
#define M_VAR1ANT_DEFINE_EMPLACE(name, ...) \
M_VAR1ANT_MAP3_ALT(M_VAR1ANT_DEFINE_EMPLACE_FUNC, name, __VA_ARGS__)
// Variant of M_MAP3 using M_REDUCE3
#define M_VAR1ANT_MAP3_ALT(f, d, ...) M_REDUCE3(f, M_VAR1ANT_MAP3_ALT_ID, d, __VA_ARGS__)
#define M_VAR1ANT_MAP3_ALT_ID(a, b) a b
#define M_VAR1ANT_DEFINE_EMPLACE_FUNC(name, num, a) \
M_EMPLACE_QUEUE_DEF( (name, M_VAR1ANT_GET_FIELD a), M_F(name,_ct), M_C3(name, _init_emplace_, M_VAR1ANT_GET_FIELD a), M_VAR1ANT_GET_OPLIST a, M_VAR1ANT_DEFINE_INIT_EMPLACE_DEF) \
M_EMPLACE_QUEUE_DEF( (name, M_VAR1ANT_GET_FIELD a), M_F(name,_ct), M_C3(name, _emplace_, M_VAR1ANT_GET_FIELD a), M_VAR1ANT_GET_OPLIST a, M_VAR1ANT_DEFINE_EMPLACE_DEF)
#define M_VAR1ANT_DEFINE_INIT_EMPLACE_DEF(name, name_t, function_name, oplist, init_func, exp_emplace_type) \
M_INLINE void \
function_name(name_t my \
M_EMPLACE_LIST_TYPE_VAR(ab, exp_emplace_type) ) \
{ \
my->type = M_C4(M_PAIR_1 name, _, M_PAIR_2 name, _value); \
M_EMPLACE_CALL_FUNC(ab, init_func, oplist, my -> value. M_PAIR_2 name, exp_emplace_type); \
} \
#define M_VAR1ANT_DEFINE_EMPLACE_DEF(name, name_t, function_name, oplist, init_func, exp_emplace_type) \
M_INLINE void \
function_name(name_t my \
M_EMPLACE_LIST_TYPE_VAR(ab, exp_emplace_type) ) \
{ \
/* No optimization done */ \
M_C(M_PAIR_1 name, _clear)(my); \
my->type = M_C4(M_PAIR_1 name, _, M_PAIR_2 name, _value); \
M_EMPLACE_CALL_FUNC(ab, init_func, oplist, my -> value. M_PAIR_2 name, exp_emplace_type); \
} \
/* Define the EQUAL_P function. */
#define M_VAR1ANT_DEFINE_EQUAL(name, ...) \
M_INLINE bool M_F(name, _equal_p)(M_F(name,_ct) const e1 , \
M_F(name,_ct) const e2) { \
bool b; \
M_VAR1ANT_CONTRACT(name, e1); \
M_VAR1ANT_CONTRACT(name, e2); \
if (e1->type != e2->type) return false; \
switch (e1->type) { \
case M_F(name, _EMPTY): break; \
M_MAP2(M_VAR1ANT_DEFINE_EQUAL_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
return true; \
}
#define M_VAR1ANT_DEFINE_EQUAL_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
b = M_VAR1ANT_CALL_EQUAL(a, e1 -> value . M_VAR1ANT_GET_FIELD a , \
e2 -> value . M_VAR1ANT_GET_FIELD a ); \
return b; \
break;
/* Define the HASH function. */
#define M_VAR1ANT_DEFINE_HASH(name, ...) \
M_INLINE size_t M_F(name, _hash)(M_F(name,_ct) const e1) { \
M_VAR1ANT_CONTRACT(name, e1); \
M_HASH_DECL(hash); \
M_HASH_UP (hash, (unsigned int) (e1 -> type)); \
switch (e1->type) { \
case M_F(name, _EMPTY): break; \
M_MAP2(M_VAR1ANT_DEFINE_HASH_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
return M_HASH_FINAL (hash); \
}
#define M_VAR1ANT_DEFINE_HASH_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_HASH_UP(hash, M_VAR1ANT_CALL_HASH(a, e1 -> value . M_VAR1ANT_GET_FIELD a) ); \
break;
/* Define the INIT_MOVE function. */
#define M_VAR1ANT_DEFINE_INIT_MOVE(name, ...) \
M_INLINE void \
M_F(name, _init_move)(M_F(name,_ct) el, M_F(name,_ct) org) { \
M_VAR1ANT_CONTRACT(name, org); \
el -> type = org -> type; \
switch (el->type) { \
case M_F(name, _EMPTY): break; \
M_MAP2(M_VAR1ANT_DEFINE_INIT_MOVE_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
org -> type = M_F(name, _EMPTY); \
}
#define M_VAR1ANT_DEFINE_INIT_MOVE_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_INIT_MOVE(a, el -> value . M_VAR1ANT_GET_FIELD a, \
org -> value . M_VAR1ANT_GET_FIELD a); \
break;
/* Define the MOVE function.
This is not optimized version.
It can be optimized if both types are the same.
*/
#define M_VAR1ANT_DEFINE_MOVE(name, ...) \
M_INLINE void \
M_F(name, _move)(M_F(name,_ct) el, M_F(name,_ct) org) { \
M_VAR1ANT_CONTRACT(name, el); \
M_VAR1ANT_CONTRACT(name, org); \
M_F(name, _clear)(el); \
M_F(name, _init_move)(el , org); \
}
/* Define the MOVE function of a given type */
#define M_VAR1ANT_DEFINE_MOVER(name, ...) \
M_MAP2(M_VAR1ANT_DEFINE_MOVER_FUNC, name, __VA_ARGS__)
#define M_VAR1ANT_DEFINE_MOVER_FUNC(name, a) \
M_INLINE void \
M_C3(name, _move_, M_VAR1ANT_GET_FIELD a)(M_F(name,_ct) my, \
M_VAR1ANT_GET_TYPE a M_VAR1ANT_GET_FIELD a ) { \
M_VAR1ANT_CONTRACT(name, my); \
M_F(name, _clear)(my); \
/* Reinit variable with the given value */ \
my->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT_MOVE(a, my -> value. M_VAR1ANT_GET_FIELD a, \
M_VAR1ANT_GET_FIELD a); \
}
/* Define the SWAP function */
#define M_VAR1ANT_DEFINE_SWAP(name, ...) \
M_INLINE void \
M_F(name, _swap)(M_F(name,_ct) el1, M_F(name,_ct) el2) { \
M_VAR1ANT_CONTRACT(name, el1); \
M_VAR1ANT_CONTRACT(name, el2); \
if (el1->type == el2->type) { \
switch (el1->type) { \
case M_F(name, _EMPTY): break; \
M_MAP2(M_VAR1ANT_DEFINE_INIT_SWAP_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
} else { \
M_F(name,_ct) tmp; \
M_VAR1ANT_IF_ALL(INIT_MOVE, __VA_ARGS__) \
( /* NOTE: Slow implementation */ \
M_F(name, _init_move)(tmp, el1); \
M_F(name, _init_move)(el1, el2); \
M_F(name, _init_move)(el2, tmp); \
, \
/* NOTE: Very slow implementation */ \
M_F(name, _init_set)(tmp, el1); \
M_F(name, _set)(el1, el2); \
M_F(name, _set)(el2, tmp); \
M_F(name, _clear)(tmp); \
) \
} \
}
#define M_VAR1ANT_DEFINE_INIT_SWAP_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_SWAP(a, el1 -> value . M_VAR1ANT_GET_FIELD a, \
el2 -> value . M_VAR1ANT_GET_FIELD a); \
break;
/* Define the GET_STR function */
#define M_VAR1ANT_DEFINE_GET_STR(name, ...) \
M_INLINE void M_F(name, _get_str)(m_string_t str, \
M_F(name,_ct) const el, \
bool append) { \
M_VAR1ANT_CONTRACT(name, el); \
M_ASSERT (str != NULL); \
void (*func)(m_string_t, const char *); \
func = append ? m_string_cat_cstr : m_string_set_cstr; \
switch (el->type) { \
case M_F(name, _EMPTY): func(str, "@EMPTY@"); break; \
M_MAP2(M_VAR1ANT_DEFINE_GET_STR_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
m_string_push_back (str, '@'); \
}
#define M_VAR1ANT_DEFINE_GET_STR_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
func(str, "@" M_AS_STR(M_VAR1ANT_GET_FIELD a) "@"); \
M_VAR1ANT_CALL_GET_STR(a, str, el -> value . M_VAR1ANT_GET_FIELD a, true); \
break;
/* Define the PARSE_STR function */
#define M_VAR1ANT_DEFINE_PARSE_STR(name, ...) \
M_INLINE bool M_F(name, _parse_str)(M_F(name,_ct) el, \
const char str[], \
const char **endp) { \
M_VAR1ANT_CONTRACT(name, el); \
M_ASSERT (str != NULL); \
bool success = false; \
char variantTypeBuf[M_USE_IDENTIFIER_ALLOC+1]; \
int c = *str++; \
unsigned int i = 0; \
M_F(name, _reset)(el); \
if (c != '@') goto exit; \
/* First read the name of the type */ \
c = *str++; \
while (c != '@' && c != 0 && i < sizeof(variantTypeBuf) - 1) { \
variantTypeBuf[i++] = (char) c; \
c = *str++; \
} \
if (c != '@') goto exit; \
variantTypeBuf[i++] = 0; \
M_ASSERT(i < sizeof(variantTypeBuf)); \
/* In function of the type */ \
if (strcmp(variantTypeBuf, "EMPTY") == 0) { \
el->type = M_F(name, _EMPTY); \
} \
M_MAP2(M_VAR1ANT_DEFINE_PARSE_STR_FUNC , name, __VA_ARGS__) \
else goto exit; \
success = (*str++ == '@'); \
exit: \
if (endp) *endp = str; \
return success; \
}
#define M_VAR1ANT_DEFINE_PARSE_STR_FUNC(name, a) \
else if (strcmp (variantTypeBuf, M_AS_STR(M_VAR1ANT_GET_FIELD a)) == 0) { \
el->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT(a, el ->value . M_VAR1ANT_GET_FIELD a ); \
bool b = M_VAR1ANT_CALL_PARSE_STR(a, el -> value . M_VAR1ANT_GET_FIELD a, str, &str); \
if (!b) goto exit; \
}
/* Define the OUT_STR function */
#define M_VAR1ANT_DEFINE_OUT_STR(name, ...) \
M_INLINE void M_F(name, _out_str)(FILE *f, \
M_F(name,_ct) const el) { \
M_VAR1ANT_CONTRACT(name, el); \
M_ASSERT (f != NULL); \
switch (el->type) { \
case M_F(name, _EMPTY): fprintf(f, "@EMPTY@"); break; \
M_MAP2(M_VAR1ANT_DEFINE_OUT_STR_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
fputc ('@', f); \
}
#define M_VAR1ANT_DEFINE_OUT_STR_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
fprintf(f, "@" M_AS_STR(M_VAR1ANT_GET_FIELD a) "@"); \
M_VAR1ANT_CALL_OUT_STR(a, f, el -> value . M_VAR1ANT_GET_FIELD a); \
break;
/* Define the IN_STR function */
#define M_VAR1ANT_DEFINE_IN_STR(name, ...) \
M_INLINE bool M_F(name, _in_str)(M_F(name,_ct) el, \
FILE *f) { \
M_VAR1ANT_CONTRACT(name, el); \
M_ASSERT (f != NULL); \
char variantTypeBuf[M_USE_IDENTIFIER_ALLOC+1]; \
M_F(name, _reset)(el); \
if (fgetc(f) != '@') return false; \
/* First read the name of the type */ \
bool b = true; \
int c = fgetc(f); \
unsigned int i = 0; \
while (c != '@' && c != EOF && i < sizeof(variantTypeBuf) - 1) { \
variantTypeBuf[i++] = (char) c; \
c = fgetc(f); \
} \
if (c != '@') return false; \
variantTypeBuf[i++] = 0; \
M_ASSERT(i < sizeof(variantTypeBuf)); \
/* In function of the type */ \
if (strcmp(variantTypeBuf, "EMPTY") == 0) { \
el->type = M_F(name, _EMPTY); \
} \
M_MAP2(M_VAR1ANT_DEFINE_IN_STR_FUNC , name, __VA_ARGS__) \
else { b = false; } \
return b && (fgetc(f) == '@'); \
}
#define M_VAR1ANT_DEFINE_IN_STR_FUNC(name, a) \
else if (strcmp (variantTypeBuf, M_AS_STR(M_VAR1ANT_GET_FIELD a)) == 0) { \
el->type = M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value); \
M_VAR1ANT_CALL_INIT(a, el ->value . M_VAR1ANT_GET_FIELD a ); \
b = M_VAR1ANT_CALL_IN_STR(a, el -> value . M_VAR1ANT_GET_FIELD a, f); \
}
/* Return the STRING version of a parameter name */
#define M_VAR1ANT_STRINGIFY_NAME(a) \
M_AS_STR(M_VAR1ANT_GET_FIELD a)
/* Define the OUT_SERIAL function */
#define M_VAR1ANT_DEFINE_OUT_SERIAL(name, ...) \
M_INLINE m_serial_return_code_t \
M_F(name, _out_serial)(m_serial_write_t f, \
M_F(name,_ct) const el) { \
M_VAR1ANT_CONTRACT(name, el); \
const int field_max = M_NARGS(__VA_ARGS__); \
static const char *const field_name[] = \
{ M_REDUCE(M_VAR1ANT_STRINGIFY_NAME, M_ID, __VA_ARGS__) }; \
M_ASSERT (f != NULL && f->m_interface != NULL); \
m_serial_local_t local; \
m_serial_return_code_t ret; \
switch (el->type) { \
case M_F(name, _EMPTY): \
return f->m_interface->write_variant_start(local, f, field_name, field_max, -1); \
break; \
M_MAP2(M_VAR1ANT_DEFINE_OUT_SERIAL_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
ret |= f->m_interface->write_variant_end(local, f); \
return ret & M_SERIAL_FAIL; \
}
#define M_VAR1ANT_DEFINE_OUT_SERIAL_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
ret = f->m_interface->write_variant_start(local, f, field_name, field_max, \
M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value) -1); \
M_VAR1ANT_CALL_OUT_SERIAL(a, f, el -> value . M_VAR1ANT_GET_FIELD a); \
break;
/* Define the IN_SERIAL function */
#define M_VAR1ANT_DEFINE_IN_SERIAL(name, ...) \
M_INLINE m_serial_return_code_t \
M_F(name, _in_serial)(M_F(name,_ct) el, \
m_serial_read_t f) { \
M_VAR1ANT_CONTRACT(name, el); \
const int field_max = M_NARGS(__VA_ARGS__); \
static const char *const field_name[] = \
{ M_REDUCE(M_VAR1ANT_STRINGIFY_NAME, M_ID, __VA_ARGS__) }; \
M_ASSERT (f != NULL && f->m_interface != NULL); \
m_serial_local_t local; \
m_serial_return_code_t ret; \
int id = -1; \
M_F(name, _reset)(el); \
ret = f->m_interface->read_variant_start(local, f, field_name, field_max, &id); \
if (ret != M_SERIAL_OK_CONTINUE) return ret; \
M_ASSERT (id >= 0 && id < field_max); \
el->type = (enum M_F(name, _enum))(id+1); \
switch (id+1) { \
M_MAP2(M_VAR1ANT_DEFINE_IN_SERIAL_FUNC , name, __VA_ARGS__) \
default: M_ASSUME(false); break; \
} \
if (ret == M_SERIAL_OK_DONE) \
ret = f->m_interface->read_variant_end(local, f); \
return ret; \
}
#define M_VAR1ANT_DEFINE_IN_SERIAL_FUNC(name, a) \
case M_C4(name, _, M_VAR1ANT_GET_FIELD a, _value): \
M_VAR1ANT_CALL_INIT(a, el ->value . M_VAR1ANT_GET_FIELD a ); \
ret = M_VAR1ANT_CALL_IN_SERIAL(a, el -> value . M_VAR1ANT_GET_FIELD a, f); \
break; \
/* Define the RESET function */
#define M_VAR1ANT_DEFINE_RESET_FUNC(name, ...) \
M_INLINE void M_F(name, _reset)(M_F(name,_ct) my) \
{ \
M_VAR1ANT_CONTRACT(name, my); \
M_F(name, _clear)(my); \
M_F(name, _init)(my); \
} \
/********************************** INTERNAL *********************************/
/* deferred evaluation of the oplist */
#define M_VAR1ANT_OPLIST_P1(arg) M_VAR1ANT_OPLIST_P2 arg
/* Validate the oplist before going further */
#define M_VAR1ANT_OPLIST_P2(name, ...) \
M_IF(M_REDUCE(M_OPLIST_P, M_AND, __VA_ARGS__))(M_VAR1ANT_OPLIST_P3, M_VAR1ANT_OPLIST_FAILURE)(name, __VA_ARGS__)
/* Prepare a clean compilation failure */
#define M_VAR1ANT_OPLIST_FAILURE(name, ...) \
((M_LIB_ERROR(ONE_ARGUMENT_OF_VARIANT_OPLIST_IS_NOT_AN_OPLIST, name, __VA_ARGS__)))
/* Define the oplist */
#define M_VAR1ANT_OPLIST_P3(name, ...) \
(INIT(M_F(name,_init)), \
INIT_SET(M_F(name, _init_set)), \
SET(M_F(name,_set)), \
CLEAR(M_F(name, _clear)), \
RESET(M_F(name, _reset)), \
NAME(name), \
TYPE(M_F(name,_ct)), \
EMPTY_P(M_F(name,_empty_p)), \
M_IF_METHOD_ALL(HASH, __VA_ARGS__)(HASH(M_F(name, _hash)),), \
M_IF_METHOD_ALL(EQUAL, __VA_ARGS__)(EQUAL(M_F(name, _equal_p)),), \
M_IF_METHOD_ALL(GET_STR, __VA_ARGS__)(GET_STR(M_F(name, _get_str)),), \
M_IF_METHOD2_ALL(PARSE_STR, INIT, __VA_ARGS__)(PARSE_STR(M_F(name, _parse_str)),), \
M_IF_METHOD2_ALL(IN_STR, INIT, __VA_ARGS__)(IN_STR(M_F(name, _in_str)),), \
M_IF_METHOD_ALL(OUT_STR, __VA_ARGS__)(OUT_STR(M_F(name, _out_str)),), \
M_IF_METHOD2_ALL(IN_SERIAL, INIT, __VA_ARGS__)(IN_SERIAL(M_F(name, _in_serial)),), \
M_IF_METHOD_ALL(OUT_SERIAL, __VA_ARGS__)(OUT_SERIAL(M_F(name, _out_serial)),), \
M_IF_METHOD_ALL(INIT_MOVE, __VA_ARGS__)(INIT_MOVE(M_F(name, _init_move)),), \
M_IF_METHOD_ALL(INIT_MOVE, __VA_ARGS__)(MOVE(M_F(name, _move)),), \
M_IF_METHOD_ALL(SWAP, __VA_ARGS__)(SWAP(M_F(name, _swap)),), \
)
/********************************** INTERNAL *********************************/
/* Macros for testing for method presence */
#define M_VAR1ANT_TEST_METHOD_P2(method, f, t, op) \
M_TEST_METHOD_P(method, op)
#define M_VAR1ANT_TEST_METHOD_P(method, trio) \
M_APPLY(M_VAR1ANT_TEST_METHOD_P2, method, M_OPFLAT trio)
#define M_VAR1ANT_IF_ALL(method, ...) \
M_IF(M_REDUCE2(M_VAR1ANT_TEST_METHOD_P, M_AND, method, __VA_ARGS__))
#define M_VAR1ANT_TEST_METHOD2_P2(method1, method2, f, t, op) \
M_AND(M_TEST_METHOD_P(method1, op), M_TEST_METHOD_P(method2, op))
#define M_VAR1ANT_TEST_METHOD2_P(method, trio) \
M_APPLY(M_VAR1ANT_TEST_METHOD2_P2, M_PAIR_1 method, M_PAIR_2 method, M_OPFLAT trio)
#define M_VAR1ANT_IF_ALL2(method1, method2, ...) \
M_IF(M_REDUCE2(M_VAR1ANT_TEST_METHOD2_P, M_AND, (method1, method2), __VA_ARGS__))
/********************************** INTERNAL *********************************/
#if M_USE_SMALL_NAME
#define VARIANT_DEF2 M_VARIANT_DEF2
#define VARIANT_DEF2_AS M_VARIANT_DEF2_AS
#define VARIANT_OPLIST M_VARIANT_OPLIST
#endif
#endif

698
components/mlib/m-worker.h Normal file
View File

@ -0,0 +1,698 @@
/*
* M*LIB / WORKER - Extra worker interface
*
* Copyright (c) 2017-2023, Patrick Pelissier
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* + Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* + Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSTARLIB_WORKER_H
#define MSTARLIB_WORKER_H
/* The User Code can define M_USE_WORKER to 0 to disable the use of workers.
The macros / functions are then defined to only use one core.
By default, the behavior is to use workers.
*/
#ifndef M_USE_WORKER
# define M_USE_WORKER 1
#endif
#if M_USE_WORKER
#include "m-atomic.h"
#include "m-buffer.h"
#include "m-thread.h"
/* Include needed system header for detection of how many cores are available in the system */
#if defined(_WIN32)
# include <sysinfoapi.h>
#elif (defined(__APPLE__) && defined(__MACH__)) \
|| defined(__DragonFly__) || defined(__FreeBSD__) \
|| defined(__NetBSD__) || defined(__OpenBSD__)
# include <sys/param.h>
# include <sys/sysctl.h>
# define M_USE_WORKER_SYSCTL 1
#else
# include <unistd.h>
#endif
/* Support for CLANG block since CLANG doesn't support nested function.
M-WORKER uses its 'blocks' extension instead, but it is not compatible
with function.
So you need to compile with "-fblocks" and link with "-lBlocksRuntime"
if you use clang & want to use the MACRO version.
if C++, it will use Lambda function (and std::function) instead
(It doesn't support pre-C++11 compiler).
Otherwise go with nested function (GCC) for the MACRO version.
This behavior can be overriden by User Code by defining to 1 or 0 the
following macros:
* M_USE_WORKER_CPP_FUNCTION
* M_USE_WORKER_CLANG_BLOCK
*/
#if defined(__cplusplus) && !defined(M_USE_WORKER_CPP_FUNCTION)
# define M_USE_WORKER_CPP_FUNCTION 1
# include <functional>
#elif defined(__has_extension) && !defined(M_USE_WORKER_CLANG_BLOCK)
# if __has_extension(blocks)
# define M_USE_WORKER_CLANG_BLOCK 1
# endif
#endif
#ifndef M_USE_WORKER_CLANG_BLOCK
# define M_USE_WORKER_CLANG_BLOCK 0
#endif
#ifndef M_USE_WORKER_CPP_FUNCTION
# define M_USE_WORKER_CPP_FUNCTION 0
#endif
/* Control that not both options are selected at the same time.
Note: there are not really incompatible, but if we use C++ we shall go to
lambda directly (there is no need to support blocks). */
#if M_USE_WORKER_CLANG_BLOCK && M_USE_WORKER_CPP_FUNCTION
# error M_USE_WORKER_CPP_FUNCTION and M_USE_WORKER_CLANG_BLOCK are both defined. This is not supported.
#endif
M_BEGIN_PROTECTED_CODE
/* Definition of a work order */
typedef struct m_work3r_order_s {
struct m_worker_sync_s *block; // Reference to the shared Synchronization block
void * data; // The work order data
void (*func) (void *data); // The work order function (for GCC)
#if M_USE_WORKER_CLANG_BLOCK
void (^blockFunc)(void *data); // The work order function (block for clang)
#endif
#if M_USE_WORKER_CPP_FUNCTION
std::function<void(void*)> function; // The work order function (for C++)
#endif
} m_work3r_order_ct;
/* Define the macros needed to initialize an order.
* * MACRO to be used to send an empty order to stop the thread
* * MACRO to complete the not-used fields
*/
#if M_USE_WORKER_CLANG_BLOCK || M_USE_WORKER_CPP_FUNCTION
# define M_WORK3R_EMPTY_ORDER { NULL, NULL, NULL, NULL }
# define M_WORK3R_EXTRA_ORDER , NULL
#else
# define M_WORK3R_EMPTY_ORDER { NULL, NULL, NULL }
# define M_WORK3R_EXTRA_ORDER
#endif
/* As it is C++, it uses std::function, M_POD_OPLIST
is not sufficient for initialization of the structure.
So let's use C++ constructor, destructor and copy constructor */
#if M_USE_WORKER_CPP_FUNCTION
# define M_WORK3R_CPP_INIT(x) (new (&(x)) m_work3r_order_ct())
# define M_WORK3R_CPP_INIT_SET(x, y) (new (&(x)) m_work3r_order_ct(y))
# define M_WORK3R_CPP_SET(x, y) ((x) = (y))
# define M_WORK3R_CPP_CLEAR(x) ((&(x))->~m_work3r_order_ct())
# define M_WORK3R_CPP_INIT_MOVE(x,y) (new (&(x)) m_work3r_order_ct(y), ((&(y))->~m_work3r_order_ct()))
# define M_WORK3R_OPLIST \
(INIT(M_WORK3R_CPP_INIT), INIT_SET(M_WORK3R_CPP_INIT_SET), \
SET(M_WORK3R_CPP_SET), CLEAR(M_WORK3R_CPP_CLEAR), INIT_MOVE(M_WORK3R_CPP_INIT_MOVE) )
#else
# define M_WORK3R_OPLIST M_POD_OPLIST
#endif
/* Definition of the identity of a worker thread */
typedef struct m_work3r_thread_s {
m_thread_t id;
} m_work3r_thread_ct;
/* Definition of the queue that will record the work orders */
BUFFER_DEF(m_work3r_queue, m_work3r_order_ct, 0,
BUFFER_QUEUE|BUFFER_UNBLOCKING_PUSH|BUFFER_BLOCKING_POP|BUFFER_THREAD_SAFE|BUFFER_DEFERRED_POP, M_WORK3R_OPLIST)
/* Definition the global pool of workers */
typedef struct m_worker_s {
/* The work order queue */
m_work3r_queue_t queue_g;
/* The table of available workers */
m_work3r_thread_ct *worker;
/* Number of workers in the table */
unsigned int numWorker_g;
/* The global reset function */
void (*resetFunc_g)(void);
/* The global clear function */
void (*clearFunc_g)(void);
m_mutex_t lock;
m_cond_t a_thread_ends; // EVENT: A worker has ended
} m_worker_t[1];
/* Definition of the synchronization point for workers */
typedef struct m_worker_sync_s {
atomic_int num_spawn; // Number of spawned workers accord this synchronization point
atomic_int num_terminated_spawn; // Number of terminated spawned workers
struct m_worker_s *worker; // Reference to the pool of workers
} m_worker_sync_t[1];
/* Extend m_worker_spawn by defining a specialization function
with the given arguments.
Generate the needed encapsulation for the user.
USAGE: name, oplists of arguments */
#define M_WORKER_SPAWN_DEF2(name, ...) \
M_BEGIN_PROTECTED_CODE \
M_WORK3R_SPAWN_EXTEND_P1( (name, M_MAP_C(M_WORK3R_SPAWN_EXTEND_P0, __VA_ARGS__) ) ) \
M_END_PROTECTED_CODE
/* Output a valid oplist with the given type.
input is (fieldname, type) or (fieldname, type, oplist)
Output shall be : M_OPEXTEND(M_GLOBAL_OPLIST_OR_DEF(type_or_oplist)(), TYPE(type)) / M_OPEXTEND(oplist, TYPE(type))
*/
#define M_WORK3R_SPAWN_EXTEND_P0(...) M_BY_NARGS(M_WORK3R_SPAWN_EXTEND_P0, M_ID __VA_ARGS__) __VA_ARGS__
#define M_WORK3R_SPAWN_EXTEND_P0__2(field, type) M_OPEXTEND(M_GLOBAL_OPLIST_OR_DEF(type)(), TYPE(type))
#define M_WORK3R_SPAWN_EXTEND_P0__3(field, type, oplist) M_IF_OPLIST(oplist)(M_WORK3R_SPAWN_EXTEND_P0__3_OK, M_WORK3R_SPAWN_EXTEND_P0__3_KO)(field, type, oplist)
#define M_WORK3R_SPAWN_EXTEND_P0__3_OK(field, type, oplist) M_OPEXTEND(oplist, TYPE(type))
#define M_WORK3R_SPAWN_EXTEND_P0__3_KO(field, type, oplist) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, "(M_WORKER_SPAWN_EXTEND): the argument is not a valid oplist: " M_MAP(M_AS_STR, oplist))
/* Deferred evaluation for the definition,
so that all arguments are evaluated before further expansion */
#define M_WORK3R_SPAWN_EXTEND_P1(arg) M_ID( M_WORK3R_SPAWN_EXTEND_P2 arg )
/* Validate the oplist before going further */
#define M_WORK3R_SPAWN_EXTEND_P2(name, ...) \
M_IF(M_REDUCE(M_OPLIST_P, M_AND, __VA_ARGS__)) \
(M_WORK3R_SPAWN_EXTEND_P3, M_WORK3R_SPAWN_EXTEND_FAILURE)(name, __VA_ARGS__)
/* Stop processing with a compilation failure */
#define M_WORK3R_SPAWN_EXTEND_FAILURE(name, ...) \
M_STATIC_FAILURE(M_LIB_NOT_AN_OPLIST, \
"(M_WORKER_SPAWN_EXTEND): at least one of the given argument is not a valid oplist: " \
M_MAP(M_AS_STR, __VA_ARGS__))
/* Define the extension of spawn */
#define M_WORK3R_SPAWN_EXTEND_P3(name, ...) \
M_WORK3R_SPAWN_EXTEND_DEF_TYPE(name, __VA_ARGS__) \
M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK(name, __VA_ARGS__) \
M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE(name, __VA_ARGS__) \
/* Define the type */
#define M_WORK3R_SPAWN_EXTEND_DEF_TYPE(name, ...) \
typedef void (*M_C3(m_worker_,name, _callback_ct))(M_MAP_C(M_WORK3R_SPAWN_EXTEND_DEF_TYPE_TYPE, __VA_ARGS__)); \
struct M_C3(m_worker_, name, _s){ \
M_C3(m_worker_, name, _callback_ct) callback; \
M_MAP3(M_WORK3R_SPAWN_EXTEND_DEF_TYPE_FIELD, data, __VA_ARGS__) \
};
#define M_WORK3R_SPAWN_EXTEND_DEF_TYPE_FIELD(data, num, oplist) \
M_GET_TYPE oplist M_C(field, num);
#define M_WORK3R_SPAWN_EXTEND_DEF_TYPE_TYPE(oplist) \
M_GET_TYPE oplist
/* Define the callback */
#define M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK(name, ...) \
M_INLINE void \
M_C3(m_work3r_, name, _clear)(struct M_C3(m_worker_, name, _s) *p) \
{ \
M_MAP3(M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK_CLEAR, data, __VA_ARGS__) \
/* TODO: Overload */ \
M_MEMORY_DEL(p); \
} \
\
M_INLINE void \
M_C3(m_work3r_, name, _callback)(void *data) \
{ \
struct M_C3(m_worker_, name, _s) *p = (struct M_C3(m_worker_, name, _s) *) data; \
(*p->callback)( \
M_MAP3_C(M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK_FIELD, data, __VA_ARGS__) \
); \
M_C3(m_work3r_, name, _clear)(p); \
}
#define M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK_FIELD(data, num, oplist) \
p->M_C(field, num)
#define M_WORK3R_SPAWN_EXTEND_DEF_CALLBACK_CLEAR(data, num, oplist) \
M_CALL_CLEAR(oplist, p->M_C(field, num)) ;
/* Define the emplace like spawn method */
#define M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE(name, ...) \
M_INLINE void \
M_C(m_worker_spawn_, name)(m_worker_sync_t block, M_C3(m_worker_, name, _callback_ct) callback, \
M_MAP3_C(M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD, data, __VA_ARGS__) \
) \
{ \
if (!m_work3r_queue_full_p(block->worker->queue_g)) { \
struct M_C3(m_worker_, name, _s) *p = M_MEMORY_ALLOC ( struct M_C3(m_worker_, name, _s)); \
if (M_UNLIKELY_NOMEM(p == NULL)) { \
M_MEMORY_FULL(sizeof (struct M_C3(m_worker_, name, _s))); \
} \
p->callback = callback; \
M_MAP3(M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD_COPY, data, __VA_ARGS__) \
const m_work3r_order_ct w = { block, p, M_C3(m_work3r_, name, _callback) M_WORK3R_EXTRA_ORDER }; \
if (m_work3r_queue_push (block->worker->queue_g, w) == true) { \
atomic_fetch_add (&block->num_spawn, 1); \
return; \
} \
/* No worker available now. Call the function ourself */ \
/* But before clear the allocated data */ \
M_C3(m_work3r_, name, _clear)(p); \
} \
/* No worker available. Call the function ourself */ \
(*callback) ( \
M_MAP3_C(M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD_ALONE, data, __VA_ARGS__) \
); \
}
#define M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD(data, num, oplist) \
M_GET_TYPE oplist M_C(param, num)
#define M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD_COPY(data, num, oplist) \
M_CALL_INIT_SET(oplist, p-> M_C(field, num), M_C(param, num) );
#define M_WORK3R_SPAWN_EXTEND_DEF_EMPLACE_FIELD_ALONE(data, num, oplist) \
M_C(param, num)
/* Return the number of CPU cores available in the system.
Works for WINDOWS, MACOS, *BSD, LINUX.
*/
M_INLINE int
m_work3r_get_cpu_count(void)
{
#if defined(_WIN32)
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
M_ASSERT(sysinfo.dwNumberOfProcessors <= INT_MAX);
return (int) sysinfo.dwNumberOfProcessors;
#elif defined(M_USE_WORKER_SYSCTL)
int nm[2];
int count = 0;
size_t len = sizeof (count);
nm[0] = CTL_HW;
nm[1] = HW_NCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
return M_MAX(1, count);
#elif defined (_SC_NPROCESSORS_ONLN)
return (int) sysconf(_SC_NPROCESSORS_ONLN);
#elif defined (_SC_NPROCESSORS_CONF)
return (int) sysconf(_SC_NPROCESSORS_CONF);
#else
return 1;
#endif
}
// (INTERNAL) Debug support for workers
#if 1
#define M_WORK3R_DEBUG(...) (void) 0
#else
#define M_WORK3R_DEBUG(...) printf(__VA_ARGS__)
#endif
/* Execute the registered work order **synchronously** */
M_INLINE void
m_work3r_exec(m_work3r_order_ct *w)
{
M_ASSERT (w!= NULL && w->block != NULL);
M_WORK3R_DEBUG ("Starting thread with data %p\n", w->data);
#if M_USE_WORKER_CLANG_BLOCK
M_WORK3R_DEBUG ("Running %s f=%p b=%p\n", (w->func == NULL) ? "Blocks" : "Function", w->func, w->blockFunc);
if (w->func == NULL)
w->blockFunc(w->data);
else
#endif
#if M_USE_WORKER_CPP_FUNCTION
M_WORK3R_DEBUG ("Running %s f=%p b=%p\n", (w->function == NULL) ? "Lambda" : "Function", w->func, w->blockFunc);
if (w->function)
w->function(w->data);
else
#endif
w->func(w->data);
/* Increment the number of terminated work order for the synchronous point */
atomic_fetch_add (&w->block->num_terminated_spawn, 1);
}
/* The worker thread main loop*/
M_INLINE void
m_work3r_thread(void *arg)
{
// Get back the given argument
struct m_worker_s *g = M_ASSIGN_CAST(struct m_worker_s *, arg);
while (true) {
m_work3r_order_ct w;
// If needed, reset the global state of the worker
if (g->resetFunc_g != NULL) {
g->resetFunc_g();
}
// Waiting for data
M_WORK3R_DEBUG ("Waiting for data (queue: %lu / %lu)\n", m_work3r_queue_size(g->queue_g), m_work3r_queue_capacity(g->queue_g));
m_work3r_queue_pop(&w, g->queue_g);
// We received a work order
// Note: that the work order is still present in the queue
// preventing further work order to be pushed in the queue until it finishes doing the work
// If a stop request is received, terminate the thread
if (w.block == NULL) break;
// Execute the work order
m_work3r_exec(&w);
// Consumme fully the work order in the queue
m_work3r_queue_pop_release(g->queue_g);
// Signal that a worker has finished.
m_mutex_lock(g->lock);
m_cond_broadcast(g->a_thread_ends);
m_mutex_unlock(g->lock);
}
// If needed, clear global state of the thread
if (g->clearFunc_g != NULL) {
g->clearFunc_g();
}
}
/* Initialization of the worker module (constructor)
Input:
@numWorker: number of worker to create (0=autodetect, -1=2*autodetect)
@extraQueue: number of extra work order we can get if all workers are full
@resetFunc: function to reset the state of a worker between work orders (or NULL if none)
@clearFunc: function to clear the state of a worker before terminaning (or NULL if none)
*/
M_INLINE void
m_worker_init(m_worker_t g, int numWorker, unsigned int extraQueue, void (*resetFunc)(void), void (*clearFunc)(void))
{
M_ASSERT (numWorker >= -1);
// Auto compute number of workers if the argument is 0
if (numWorker <= 0)
numWorker = (1 + (numWorker == -1))*m_work3r_get_cpu_count()-1;
M_WORK3R_DEBUG ("Starting queue with: %d\n", numWorker + extraQueue);
// Initialization
// numWorker can still be 0 if it is a single core cpu (no worker available)
M_ASSERT(numWorker >= 0);
size_t numWorker_st = (size_t) numWorker;
g->worker = M_MEMORY_REALLOC(m_work3r_thread_ct, NULL, numWorker_st);
if (M_UNLIKELY_NOMEM (g->worker == NULL)) {
M_MEMORY_FULL(sizeof (m_work3r_thread_ct) * numWorker_st);
return;
}
m_work3r_queue_init(g->queue_g, numWorker_st + extraQueue);
g->numWorker_g = (unsigned int) numWorker_st;
g->resetFunc_g = resetFunc;
g->clearFunc_g = clearFunc;
m_mutex_init(g->lock);
m_cond_init(g->a_thread_ends);
// Create & start the workers
for(size_t i = 0; i < numWorker_st; i++) {
m_thread_create(g->worker[i].id, m_work3r_thread, M_ASSIGN_CAST(void*, g));
}
}
/* Initialization of the worker module (constructor)
Provide default values for the arguments.
Input:
@numWorker: number of worker to create (0=autodetect, -1=2*autodetect)
@extraQueue: number of extra work order we can get if all workers are full
@resetFunc: function to reset the state of a worker between work orders (optional)
@clearFunc: function to clear the state of a worker before terminaning (optional)
*/
#define m_worker_init(...) m_worker_init(M_DEFAULT_ARGS(5, (0, 0, NULL, NULL), __VA_ARGS__))
/* Clear of the worker module (destructor) */
M_INLINE void
m_worker_clear(m_worker_t g)
{
M_ASSERT (m_work3r_queue_empty_p (g->queue_g));
// Push the terminate order on the queue
for(unsigned int i = 0; i < g->numWorker_g; i++) {
m_work3r_order_ct w = M_WORK3R_EMPTY_ORDER;
// Normaly all worker threads shall be waiting at this
// stage, so all push won't block as the queue is empty.
// But for robustness, let's wait.
m_work3r_queue_push_blocking (g->queue_g, w, true);
}
// Wait for thread terminanison
for(unsigned int i = 0; i < g->numWorker_g; i++) {
m_thread_join(g->worker[i].id);
}
// Clear memory
M_MEMORY_FREE(g->worker);
m_mutex_clear(g->lock);
m_cond_clear(g->a_thread_ends);
m_work3r_queue_clear(g->queue_g);
}
/* Start a new collaboration between workers of pool 'g'
by defining the synchronization point 'block' */
M_INLINE void
m_worker_start(m_worker_sync_t block, m_worker_t g)
{
atomic_init (&block->num_spawn, 0);
atomic_init (&block->num_terminated_spawn, 0);
block->worker = g;
}
/* Spawn the given work order to workers if possible,
or do it ourself if no worker is available.
The synchronization point is defined a 'block'
The work order if composed of the function 'func' and its 'data'
*/
M_INLINE void
m_worker_spawn(m_worker_sync_t block, void (*func)(void *data), void *data)
{
const m_work3r_order_ct w = { block, data, func M_WORK3R_EXTRA_ORDER };
if (M_UNLIKELY (!m_work3r_queue_full_p(block->worker->queue_g))
&& m_work3r_queue_push (block->worker->queue_g, w) == true) {
M_WORK3R_DEBUG ("Sending data to thread: %p (block: %d / %d)\n", data, block->num_spawn, block->num_terminated_spawn);
atomic_fetch_add (&block->num_spawn, 1);
return;
}
M_WORK3R_DEBUG ("Running data ourself: %p\n", data);
/* No worker available. Call the function ourself */
(*func) (data);
}
#if M_USE_WORKER_CLANG_BLOCK
/* Spawn or not the given work order to workers,
or do it ourself if no worker is available */
M_INLINE void
m_work3r_spawn_block(m_worker_sync_t block, void (^func)(void *data), void *data)
{
const m_work3r_order_ct w = { block, data, NULL, func };
if (M_UNLIKELY (!m_work3r_queue_full_p(block->worker->queue_g))
&& m_work3r_queue_push (block->worker->queue_g, w) == true) {
M_WORK3R_DEBUG ("Sending data to thread as block: %p (block: %d / %d)\n", data, block->num_spawn, block->num_terminated_spawn);
atomic_fetch_add (&block->num_spawn, 1);
return;
}
M_WORK3R_DEBUG ("Running data ourself as block: %p\n", data);
/* No worker available. Call the function ourself */
func (data);
}
#endif
#if M_USE_WORKER_CPP_FUNCTION
/* Spawn or not the given work order to workers,
or do it ourself if no worker is available */
M_INLINE void
m_work3r_spawn_function(m_worker_sync_t block, std::function<void(void *data)> func, void *data)
{
const m_work3r_order_ct w = { block, data, NULL, func };
if (M_UNLIKELY (!m_work3r_queue_full_p(block->worker->queue_g))
&& m_work3r_queue_push (block->worker->queue_g, w) == true) {
M_WORK3R_DEBUG ("Sending data to thread as block: %p (block: %d / %d)\n", data, block->num_spawn, block->num_terminated_spawn);
atomic_fetch_add (&block->num_spawn, 1);
return;
}
M_WORK3R_DEBUG ("Running data ourself as block: %p\n", data);
/* No worker available. Call the function ourself */
func (data);
}
#endif
/* Test if all work orders of the given synchronization point are finished */
M_INLINE bool
m_worker_sync_p(m_worker_sync_t block)
{
/* If the number of spawns is greated than the number
of terminated spawns, some spawns are still working.
So wait for terminaison */
return (atomic_load(&block->num_spawn) == atomic_load (&block->num_terminated_spawn));
}
/* Wait for all work orders of the given synchronization point to be finished */
M_INLINE void
m_worker_sync(m_worker_sync_t block)
{
M_WORK3R_DEBUG ("Waiting for thread terminasion.\n");
// Fast case: all workers have finished
if (m_worker_sync_p(block)) return;
// Slow case: perform a locked wait to put this thread to waiting state
m_mutex_lock(block->worker->lock);
while (!m_worker_sync_p(block)) {
m_cond_wait(block->worker->a_thread_ends, block->worker->lock);
}
m_mutex_unlock(block->worker->lock);
}
/* Flush any work order in the queue ourself if some remains.*/
M_INLINE void
m_worker_flush(m_worker_t g)
{
m_work3r_order_ct w;
while (m_work3r_queue_pop_blocking (&w, g->queue_g, false) == true) {
m_work3r_exec(&w);
m_work3r_queue_pop_release(g->queue_g);
}
}
/* Return the number of workers */
M_INLINE size_t
m_worker_count(m_worker_t g)
{
return g->numWorker_g + 1;
}
/* Spawn the 'core' block computation into another thread if
a worker thread is available. Compute it in the current thread otherwise.
'block' shall be the initialised synchronised block for all threads.
'input' is the list of input variables of the 'core' block within "( )"
'output' is the list of output variables of the 'core' block within "( )"
Output variables are only available after a synchronisation block.
TODO: Support oplist for input & outputs parameters
*/
#if M_USE_WORKER_CLANG_BLOCK
#define M_WORKER_SPAWN(_block, _input, _core, _output) \
M_WORK3R_DEF_DATA(_input, _output) \
M_WORK3R_DEF_SUBBLOCK(_input, _output, _core) \
m_work3r_spawn_block ((_block), M_WORK3R_SPAWN_SUBFUNC_NAME, &M_WORK3R_SPAWN_DATA_NAME)
#elif M_USE_WORKER_CPP_FUNCTION
// TODO: Explicit pass all arguments by reference.
#define M_WORKER_SPAWN(_block, _input, _core, _output) \
m_work3r_spawn_function ((_block), [&](void *param) {(void)param ; _core } , NULL)
#else
#define M_WORKER_SPAWN(_block, _input, _core, _output) \
M_WORK3R_DEF_DATA(_input, _output) \
M_WORK3R_DEF_SUBFUNC(_input, _output, _core) \
m_worker_spawn ((_block), M_WORK3R_SPAWN_SUBFUNC_NAME, &M_WORK3R_SPAWN_DATA_NAME)
#endif
#define M_WORK3R_SPAWN_STRUCT_NAME M_C(m_work3r_data_s_, __LINE__)
#define M_WORK3R_SPAWN_DATA_NAME M_C(m_work3r_data_, __LINE__)
#define M_WORK3R_SPAWN_SUBFUNC_NAME M_C(m_work3r_subfunc_, __LINE__)
#define M_WORK3R_DEF_DATA(_input, _output) \
struct M_WORK3R_SPAWN_STRUCT_NAME { \
M_WORK3R_DEF_DATA_INPUT _input \
M_IF_EMPTY _output ( , M_WORK3R_DEF_DATA_OUTPUT _output) \
} M_WORK3R_SPAWN_DATA_NAME = { \
M_WORK3R_INIT_DATA_INPUT _input \
M_IF_EMPTY _output (, M_WORK3R_INIT_DATA_OUTPUT _output) \
};
#define M_WORK3R_DEF_SINGLE_INPUT(var) __typeof__(var) var;
#define M_WORK3R_DEF_DATA_INPUT(...) \
M_MAP(M_WORK3R_DEF_SINGLE_INPUT, __VA_ARGS__)
#define M_WORK3R_DEF_SINGLE_OUTPUT(var) \
__typeof__(var) *M_C(var, _ptr);
#define M_WORK3R_DEF_DATA_OUTPUT(...) \
M_MAP(M_WORK3R_DEF_SINGLE_OUTPUT, __VA_ARGS__)
#define M_WORK3R_INIT_SINGLE_INPUT(var) \
.var = var,
#define M_WORK3R_INIT_DATA_INPUT(...) \
M_MAP(M_WORK3R_INIT_SINGLE_INPUT, __VA_ARGS__)
#define M_WORK3R_INIT_SINGLE_OUTPUT(var) \
.M_C(var, _ptr) = &var,
#define M_WORK3R_INIT_DATA_OUTPUT(...) \
M_MAP(M_WORK3R_INIT_SINGLE_OUTPUT, __VA_ARGS__)
#define M_WORK3R_DEF_SUBFUNC(_input, _output, _core) \
__extension__ auto void M_WORK3R_SPAWN_SUBFUNC_NAME(void *) ; \
__extension__ void M_WORK3R_SPAWN_SUBFUNC_NAME(void *_data) \
{ \
struct M_WORK3R_SPAWN_STRUCT_NAME *_s_data = _data ; \
M_WORK3R_INIT_LOCAL_INPUT _input \
M_IF_EMPTY _output ( , M_WORK3R_INIT_LOCAL_OUTPUT _output) \
do { _core } while (0); \
M_IF_EMPTY _output ( , M_WORK3R_PROPAGATE_LOCAL_OUTPUT _output) \
};
#define M_WORK3R_DEF_SUBBLOCK(_input, _output, _core) \
void (^M_WORK3R_SPAWN_SUBFUNC_NAME) (void *) = ^ void (void * _data) \
{ \
struct M_WORK3R_SPAWN_STRUCT_NAME *_s_data = _data ; \
M_WORK3R_INIT_LOCAL_INPUT _input \
M_IF_EMPTY _output ( , M_WORK3R_INIT_LOCAL_OUTPUT _output) \
do { _core } while (0); \
M_IF_EMPTY _output ( , M_WORK3R_PROPAGATE_LOCAL_OUTPUT _output) \
};
#define M_WORK3R_INIT_SINGLE_LOCAL_INPUT(var) \
__typeof__(var) var = _s_data->var;
#define M_WORK3R_INIT_LOCAL_INPUT(...) \
M_MAP(M_WORK3R_INIT_SINGLE_LOCAL_INPUT, __VA_ARGS__)
#define M_WORK3R_INIT_SINGLE_LOCAL_OUTPUT(var) \
__typeof__(var) var;
#define M_WORK3R_INIT_LOCAL_OUTPUT(...) \
M_MAP(M_WORK3R_INIT_SINGLE_LOCAL_OUTPUT, __VA_ARGS__)
#define M_WORK3R_PROPAGATE_SINGLE_OUTPUT(var) \
*(_s_data->M_C(var, _ptr)) = var;
#define M_WORK3R_PROPAGATE_LOCAL_OUTPUT(...) \
M_MAP(M_WORK3R_PROPAGATE_SINGLE_OUTPUT, __VA_ARGS__)
M_END_PROTECTED_CODE
#else /* M_USE_WORKER */
/* Define empty types and empty functions to not use any worker */
typedef struct m_worker_block_s {
int x;
} m_worker_sync_t[1];
typedef struct m_worker_s {
int x;
} m_worker_t[1];
#define m_worker_init(g, numWorker, extraQueue, resetFunc) do { (void) g; } while (0)
#define m_worker_clear(g) do { (void) g; } while (0)
#define m_worker_start(b, w) do { (void) b; } while (0)
#define m_worker_spawn(b, f, d) do { f(d); } while (0)
#define m_worker_sync_p(b) true
#define m_worker_sync(b) do { (void) b; } while (0)
#define m_worker_count(w) 1
#define m_worker_flush(w) do { (void) w; } while (0)
#define M_WORKER_SPAWN(b, i, c, o) do { c } while (0)
#endif /* M_USE_WORKER */
#if M_USE_SMALL_NAME
#define worker_t m_worker_t
#define worker_sync_t m_worker_sync_t
#define worker_init m_worker_init
#define worker_clear m_worker_clear
#define worker_start m_worker_start
#define worker_spawn m_worker_spawn
#define worker_sync_p m_worker_sync_p
#define worker_sync m_worker_sync
#define worker_count m_worker_count
#define worker_flush m_worker_flush
#define WORKER_SPAWN M_WORKER_SPAWN
#endif
#endif

View File

@ -1,7 +1,11 @@
idf_component_register(
SRC_DIRS "src"
"src/applications"
"src/applications/main/system_info"
"src/applications/services/desktop"
"src/applications/services/loader"
"src/applications/services/gui"
INCLUDE_DIRS "inc"
PRIV_INCLUDE_DIRS "src"
REQUIRES esp_lvgl_port esp_lcd esp_lcd_touch driver
REQUIRES esp_lvgl_port esp_lcd esp_lcd_touch driver mlib cmsis_core furi
)

View File

@ -1,11 +1,19 @@
#ifndef NANOBAKE_H
#define NANOBAKE_H
#pragma once
#include "nb_platform.h"
#include "nb_hardware.h"
#include "nb_app.h"
extern void nanobake_run(nb_platform_config_t _Nonnull * config);
#ifdef __cplusplus
extern "C" {
#endif
//extern nb_app_config_t[32] nanobake_apps(nb_app_config_t app_configs, ...);
extern void nanobake_start(nb_config_t _Nonnull * config);
#endif //NANOBAKE_H
typedef void* FuriThreadId;
extern FuriThreadId nanobake_get_app_thread_id(size_t index);
extern size_t nanobake_get_app_thread_count();
#ifdef __cplusplus
}
#endif

View File

@ -1,49 +1,38 @@
#ifndef NANOBAKE_NB_APP_H
#define NANOBAKE_NB_APP_H
#define NB_APP_ID_LENGTH 32
#define NB_APP_NAME_LENGTH 32
#pragma once
#include <stdio.h>
#include <esp_err.h>
#include <lvgl.h>
// region Forward declarations
struct nb_platform;
typedef struct nb_platform nb_platform_t;
//endregion
#ifdef __cplusplus
extern "C" {
#endif
#define NB_APP_ID_LENGTH 32
#define NB_APP_NAME_LENGTH 32
typedef enum nb_app_type nb_app_type_t;
enum nb_app_type {
SERVICE,
SYSTEM,
USER
USER,
STARTUP
};
typedef struct nb_app nb_app_t;
typedef void (*nb_app_callback_on_create) (nb_platform_t* platform, lv_obj_t* lv_parent);
typedef void (*nb_app_callback_update) (nb_platform_t* platform, lv_obj_t* lv_parent);
typedef void (*nb_app_callback_on_destroy) (nb_platform_t* platform);
typedef int32_t (*nb_app_entry_point) (void _Nonnull* parameter);
struct nb_app {
char id[NB_APP_ID_LENGTH];
char name[NB_APP_NAME_LENGTH];
nb_app_type_t type;
nb_app_callback_on_create _Nullable on_create;
nb_app_callback_on_destroy _Nullable on_destroy;
nb_app_callback_update _Nullable on_update;
size_t update_task_stack_size;
uint32_t update_task_priority;
const char id[NB_APP_ID_LENGTH];
const char name[NB_APP_NAME_LENGTH];
const nb_app_type_t type;
const nb_app_entry_point _Nullable entry_point;
const size_t stack_size;
const uint32_t priority;
};
typedef struct nb_app_instance nb_app_instance_t;
struct nb_app_instance {
nb_app_t config;
};
esp_err_t nb_app_validate(nb_app_t* _Nonnull app);
#endif //NANOBAKE_NB_APP_H
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,26 @@
#pragma once
#include "nb_display.h"
#include "nb_touch.h"
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef nb_touch_driver_t (*create_touch_driver)();
typedef nb_display_driver_t (*create_display_driver)();
typedef struct nb_config nb_config_t;
struct nb_config {
// Required driver for display
const create_display_driver _Nonnull display_driver;
// Optional driver for touch input
const create_touch_driver _Nullable touch_driver;
// List of user applications
const nb_app_t* apps[];
};
#ifdef __cplusplus
}
#endif

View File

@ -1,8 +1,11 @@
#ifndef NANOBAKE_NB_DISPLAY_H
#define NANOBAKE_NB_DISPLAY_H
#pragma once
#include <esp_lcd_panel_io.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct nb_display nb_display_t;
struct nb_display {
@ -25,6 +28,8 @@ struct nb_display_driver {
* @param[in] driver
* @return allocated display object
*/
nb_display_t _Nonnull* nb_display_create(nb_display_driver_t _Nonnull* driver);
nb_display_t _Nonnull* nb_display_alloc(nb_display_driver_t _Nonnull* driver);
#endif // NANOBAKE_NB_DISPLAY_H
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,24 @@
#pragma once
#include "nb_config.h"
#include <lvgl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct nb_hardware nb_hardware_t;
struct nb_hardware {
nb_display_t* _Nonnull display;
nb_touch_t* _Nullable touch;
};
/**
* @param[in] config
* @return a newly allocated platform instance (caller takes ownership)
*/
nb_hardware_t _Nonnull* nb_hardware_alloc(nb_config_t _Nonnull* config);
#ifdef __cplusplus
}
#endif

View File

@ -1,42 +0,0 @@
#ifndef NANOBAKE_NB_PLATFORM_H
#define NANOBAKE_NB_PLATFORM_H
#include "nb_display.h"
#include "nb_touch.h"
#include "nb_app.h"
#include <esp_err.h>
#include <lvgl.h>
typedef nb_touch_driver_t (*create_touch_driver)();
typedef nb_display_driver_t (*create_display_driver)();
typedef struct nb_platform_config nb_platform_config_t;
struct nb_platform_config {
// Required driver for display
create_display_driver _Nonnull display_driver;
// Optional driver for touch input
create_touch_driver _Nullable touch_driver;
// List of user applications
nb_app_t* apps[];
};
typedef struct nb_lvgl nb_lvgl_t;
struct nb_lvgl {
lv_disp_t* _Nonnull disp;
lv_indev_t* _Nullable touch_indev;
};
typedef struct nb_platform nb_platform_t;
struct nb_platform {
nb_display_t* _Nonnull display;
nb_touch_t* _Nullable touch;
nb_lvgl_t* _Nonnull lvgl;
};
/**
* @param[in] config
* @return a newly allocated platform instance (caller takes ownership)
*/
nb_platform_t _Nonnull* nb_platform_create(nb_platform_config_t _Nonnull* config);
#endif // NANOBAKE_NB_PLATFORM_H

View File

@ -1,9 +1,12 @@
#ifndef NANOBAKE_NB_TOUCH_H
#define NANOBAKE_NB_TOUCH_H
#pragma once
#include "esp_lcd_touch.h"
#include <esp_lcd_panel_io.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct nb_touch_driver nb_touch_driver_t;
struct nb_touch_driver {
@ -22,6 +25,8 @@ struct nb_touch {
* @param[in] driver
* @return a newly allocated instance
*/
nb_touch_t _Nonnull* nb_touch_create(nb_touch_driver_t _Nonnull* driver);
nb_touch_t _Nonnull* nb_touch_alloc(nb_touch_driver_t _Nonnull* driver);
#endif // NANOBAKE_NB_TOUCH_H
#ifdef __cplusplus
}
#endif

View File

@ -1,33 +1,40 @@
#include "system_info.h"
#include "nanobake.h"
#include <core_defines.h>
#include <thread.h>
#include <esp_lvgl_port.h>
#include <nb_platform.h>
static int32_t system_info_entry_point(void* param) {
UNUSED(param);
static void prv_on_create(nb_platform_t _Nonnull* platform, lv_obj_t _Nonnull* lv_parent) {
lvgl_port_lock(0);
// Wait for all apps to start
vTaskDelay(1000 / portTICK_PERIOD_MS);
lv_obj_t* cpu_label = lv_label_create(lv_parent);
lv_label_set_recolor(cpu_label, true);
lv_obj_set_width(cpu_label, (lv_coord_t)platform->display->horizontal_resolution);
lv_obj_set_style_text_align(cpu_label, LV_TEXT_ALIGN_LEFT, 0);
lv_label_set_text(cpu_label, "CPU usage: ?");
lv_obj_align(cpu_label, LV_ALIGN_TOP_LEFT, 0, 0);
size_t system_service_count = nanobake_get_app_thread_count();
printf("Running apps:\n");
for (int i = 0; i < system_service_count; ++i) {
FuriThreadId thread_id = nanobake_get_app_thread_id(i);
const char* appid = furi_thread_get_appid(thread_id);
const char* name = furi_thread_get_name(thread_id);
bool is_suspended = furi_thread_is_suspended(thread_id);
const char* status = is_suspended ? "suspended" : "active";
bool is_service = furi_thread_mark_is_service(thread_id);
const char* type = is_service ? "service" : "app";
printf(" - [%s, %s] %s (%s)\n", type, status, name, appid);
}
lv_obj_t* mem_free_label = lv_label_create(lv_parent);
lv_label_set_recolor(mem_free_label, true);
lv_obj_set_width(mem_free_label, (lv_coord_t)platform->display->horizontal_resolution);
lv_obj_set_style_text_align(mem_free_label, LV_TEXT_ALIGN_LEFT, 0);
lv_label_set_text(mem_free_label, "Memory: ?");
lv_obj_align(mem_free_label, LV_ALIGN_TOP_LEFT, 0, 15);
printf("Heap memory available: %d / %d\n",
heap_caps_get_free_size(MALLOC_CAP_DEFAULT),
heap_caps_get_total_size(MALLOC_CAP_DEFAULT)
);
lvgl_port_unlock();
return 0;
}
nb_app_t system_info_app = {
.id = "systeminfo",
.name = "System Info",
.type = SYSTEM,
.on_create = &prv_on_create,
.on_update = NULL,
.on_destroy = NULL
.entry_point = &system_info_entry_point,
.stack_size = 2048,
.priority = 10
};

View File

@ -1,8 +1,13 @@
#ifndef NANOBAKE_SYSTEM_INFO_H
#define NANOBAKE_SYSTEM_INFO_H
#pragma once
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
extern nb_app_t system_info_app;
#endif // NANOBAKE_SYSTEM_INFO_H
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,28 @@
#include "nb_applications.h"
// System services
extern const nb_app_t desktop_app;
extern const nb_app_t gui_app;
extern const nb_app_t loader_app;
// System apps
extern const nb_app_t system_info_app;
const nb_app_t* const FLIPPER_SERVICES[] = {
&desktop_app,
&gui_app,
&loader_app
};
const size_t FLIPPER_SERVICES_COUNT = sizeof(FLIPPER_SERVICES) / sizeof(nb_app_t*);
const nb_app_t* const FLIPPER_SYSTEM_APPS[] = {
&system_info_app
};
const size_t FLIPPER_SYSTEM_APPS_COUNT = sizeof(FLIPPER_SYSTEM_APPS) / sizeof(nb_app_t*);
const FlipperInternalOnStartHook FLIPPER_ON_SYSTEM_START[] = {
};
const size_t FLIPPER_ON_SYSTEM_START_COUNT = sizeof(FLIPPER_ON_SYSTEM_START) / sizeof(FlipperInternalOnStartHook);

View File

@ -0,0 +1,22 @@
#pragma once
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*FlipperInternalOnStartHook)(void);
extern const nb_app_t* const FLIPPER_SERVICES[];
extern const size_t FLIPPER_SERVICES_COUNT;
extern const nb_app_t* const FLIPPER_SYSTEM_APPS[];
extern const size_t FLIPPER_SYSTEM_APPS_COUNT;
extern const FlipperInternalOnStartHook FLIPPER_ON_SYSTEM_START[];
extern const size_t FLIPPER_ON_SYSTEM_START_COUNT;
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,35 @@
#include "desktop.h"
#include "nb_hardware.h"
#include <esp_lvgl_port.h>
#include "core_defines.h"
#include <esp_log.h>
//nb_desktop_t* shared_desktop = NULL;
static int32_t prv_desktop_main(void* param) {
UNUSED(param);
printf("desktop app init\n");
// nb_desktop_t* desktop = desktop_alloc();
// shared_desktop = desktop;
// lvgl_port_lock(0);
//
// lv_obj_t* label = lv_label_create(lv_parent);
// lv_label_set_recolor(label, true);
// lv_obj_set_width(label, (lv_coord_t)platform->display->horizontal_resolution);
// lv_obj_set_style_text_align(label, LV_TEXT_ALIGN_LEFT, 0);
// lv_label_set_text(label, "Desktop app");
// lv_obj_align(label, LV_ALIGN_TOP_LEFT, 0, 0);
//
// lvgl_port_unlock();
return 0;
}
const nb_app_t desktop_app = {
.id = "desktop",
.name = "Desktop",
.type = SERVICE,
.entry_point = &prv_desktop_main,
.stack_size = 2048,
.priority = 10
};

View File

@ -0,0 +1,13 @@
#pragma once
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
extern const nb_app_t desktop_app;
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,18 @@
#include "gui.h"
#include "core_defines.h"
#include "check.h"
static int32_t prv_gui_main(void* param) {
UNUSED(param);
printf("gui app init\n");
return 0;
}
const nb_app_t gui_app = {
.id = "gui",
.name = "GUI",
.type = STARTUP,
.entry_point = &prv_gui_main,
.stack_size = 2048,
.priority = 10
};

View File

@ -0,0 +1,13 @@
#pragma once
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
extern const nb_app_t gui_app;
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,17 @@
#include "loader.h"
#include "core_defines.h"
static int32_t prv_loader_main(void* param) {
UNUSED(param);
printf("loader app init\n");
return 0;
}
const nb_app_t loader_app = {
.id = "loader",
.name = "Loader",
.type = STARTUP,
.entry_point = &prv_loader_main,
.stack_size = 2048,
.priority = 10
};

View File

@ -0,0 +1,13 @@
#pragma once
#include "nb_app.h"
#ifdef __cplusplus
extern "C" {
#endif
extern const nb_app_t loader_app;
#ifdef __cplusplus
}
#endif

View File

@ -1,15 +1,104 @@
#include "nanobake.h"
#include "applications/main/system_info/system_info.h"
#include "nb_hardware.h"
#include "nb_lvgl.h"
#include "applications/nb_applications.h"
#include <esp_log.h>
#include <m-list.h>
// Furi
#include <thread.h>
#include <kernel.h>
#include <record.h>
#include <check.h>
void nb_app_start(nb_platform_t _Nonnull* platform, nb_app_t _Nonnull* config) {
lv_obj_t* scr = lv_scr_act();
ESP_ERROR_CHECK(nb_app_validate(config));
config->on_create(platform, scr);
static const char* TAG = "nanobake";
M_LIST_DEF(thread_ids, FuriThreadId);
static void prv_furi_init() {
// TODO: can we remove the suspend-resume logic?
if (xTaskGetSchedulerState() == taskSCHEDULER_RUNNING) {
vTaskSuspendAll();
}
furi_record_init();
xTaskResumeAll();
}
extern void nanobake_run(nb_platform_config_t _Nonnull* config) {
nb_platform_t _Nonnull* platform = nb_platform_create(config);
thread_ids_t prv_thread_ids;
nb_app_start(platform, config->apps[0]);
// nb_app_start(platform, &system_info_app);
FuriThreadId nanobake_get_app_thread_id(size_t index) {
return *thread_ids_get(prv_thread_ids, index);
}
size_t nanobake_get_app_thread_count() {
return thread_ids_size(prv_thread_ids);
}
extern void nanobake_start(nb_config_t _Nonnull* config) {
prv_furi_init();
nb_hardware_t _Nonnull* hardware = nb_hardware_alloc(config);
nb_lvgl_init(hardware);
thread_ids_init(prv_thread_ids);
ESP_LOGI(TAG, "Starting services");
for(size_t i = 0; i < FLIPPER_SERVICES_COUNT; i++) {
ESP_LOGI(TAG, "Starting system service \"%s\"", FLIPPER_SERVICES[i]->name);
FuriThread* thread = furi_thread_alloc_ex(
FLIPPER_SERVICES[i]->name,
FLIPPER_SERVICES[i]->stack_size,
FLIPPER_SERVICES[i]->entry_point,
NULL
);
furi_thread_mark_as_service(thread);
furi_thread_set_appid(thread, FLIPPER_SERVICES[i]->id);
furi_thread_start(thread);
FuriThreadId thread_id = furi_thread_get_id(thread);
thread_ids_push_back(prv_thread_ids, thread_id);
}
ESP_LOGI(TAG, "Starting system apps");
for(size_t i = 0; i < FLIPPER_SYSTEM_APPS_COUNT; i++) {
ESP_LOGI(TAG, "Starting system app \"%s\"", FLIPPER_SYSTEM_APPS[i]->name);
FuriThread* thread = furi_thread_alloc_ex(
FLIPPER_SYSTEM_APPS[i]->name,
FLIPPER_SYSTEM_APPS[i]->stack_size,
FLIPPER_SYSTEM_APPS[i]->entry_point,
NULL
);
furi_thread_mark_as_service(thread);
furi_thread_set_appid(thread, FLIPPER_SYSTEM_APPS[i]->id);
furi_thread_start(thread);
FuriThreadId thread_id = furi_thread_get_id(thread);
thread_ids_push_back(prv_thread_ids, thread_id);
}
// ESP_LOGI(TAG, "Starting external apps");
//
// size_t external_apps_count = sizeof(*config->apps);
// for(size_t i = 0; i < FLIPPER_SERVICES_COUNT; i++) {
// ESP_LOGI(TAG, "Starting external app \"%s\"", FLIPPER_[i]->name);
//
// FuriThread* thread = furi_thread_alloc_ex(
// FLIPPER_SERVICES[i]->name,
// FLIPPER_SERVICES[i]->stack_size,
// FLIPPER_SERVICES[i]->entry_point,
// NULL
// );
// furi_thread_set_appid(thread, FLIPPER_SERVICES[i]->id);
// furi_thread_start(thread);
//
// FuriThreadId thread_id = furi_thread_get_id(thread);
// thread_ids_push_back(prv_thread_ids, thread_id);
// }
ESP_LOGI(TAG, "Startup complete");
}

Some files were not shown because too many files have changed in this diff Show More