初始版本

This commit is contained in:
xiaozhengsheng
2025-08-19 09:49:41 +08:00
parent 10f1ddf1c1
commit 6df0f7d96e
2974 changed files with 1712873 additions and 54 deletions

View File

@@ -0,0 +1,52 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef SER_CONFIG_5W_APP_H__
#define SER_CONFIG_5W_APP_H__
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif // SER_CONFIG_5W_APP_H__

View File

@@ -0,0 +1,106 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef SER_PHY_CONFIG_APP_H__
#define SER_PHY_CONFIG_APP_H__
#include "boards.h"
#include "ser_config.h"
#ifdef __cplusplus
extern "C" {
#endif
#if defined(SPI_MASTER_0_ENABLE)
#define SER_PHY_SPI_MASTER SPI_MASTER_0
#endif
#if defined(SPI_MASTER_1_ENABLE)
#define SER_PHY_SPI_MASTER SPI_MASTER_1
#endif
#if defined(SPI_MASTER_2_ENABLE)
#define SER_PHY_SPI_MASTER SPI_MASTER_2
#endif
#if (defined(SPI0_ENABLED) && (SPI0_ENABLED == 1)) || defined(SPI_MASTER_0_ENABLE)
#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(0)
#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM0_SCK_PIN
#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM0_MISO_PIN
#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM0_MOSI_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM0_SS_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM0_REQ_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM0_RDY_PIN
#elif (defined(SPI1_ENABLED) && (SPI1_ENABLED == 1)) || defined(SPI_MASTER_1_ENABLE)
#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(1)
#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM1_SCK_PIN
#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM1_MISO_PIN
#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM1_MOSI_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM1_SS_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM1_REQ_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM1_RDY_PIN
#elif (defined(SPI2_ENABLED) && (SPI2_ENABLED == 1)) || defined(SPI_MASTER_2_ENABLE)
#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(2)
#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM2_SCK_PIN
#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM2_MISO_PIN
#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM2_MOSI_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM2_SS_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM2_REQ_PIN
#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM2_RDY_PIN
#endif
#define CONN_CHIP_RESET_PIN_NO SER_CONN_CHIP_RESET_PIN /**< Pin used for reseting the connectivity. */
/* UART configuration */
#define UART_IRQ_PRIORITY APP_IRQ_PRIORITY_MID
#define SER_PHY_UART_RX SER_APP_RX_PIN
#define SER_PHY_UART_TX SER_APP_TX_PIN
#define SER_PHY_UART_CTS SER_APP_CTS_PIN
#define SER_PHY_UART_RTS SER_APP_RTS_PIN
#ifdef __cplusplus
}
#endif
#endif // SER_PHY_CONFIG_APP_H__

View File

@@ -0,0 +1,82 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef SER_PHY_CONFIG_CONN_H__
#define SER_PHY_CONFIG_CONN_H__
#include "boards.h"
#include "ser_config.h"
#ifdef __cplusplus
extern "C" {
#endif
/***********************************************************************************************//**
* SER_PHY layer configuration.
**************************************************************************************************/
#define SER_PHY_SPI_PPI_RDY_CH 0
#define SER_PHY_SPI_GPIOTE_RDY_CH 0
#ifdef NRF_SPIS0
#define SER_PHY_SPI_SLAVE_INSTANCE 0
#else
#define SER_PHY_SPI_SLAVE_INSTANCE 1
#endif
#define SER_PHY_SPI_SLAVE_REQ_PIN SER_CON_SPIS_REQ_PIN
#define SER_PHY_SPI_SLAVE_RDY_PIN SER_CON_SPIS_RDY_PIN
#define SER_PHY_SPI_SLAVE_SCK_PIN SER_CON_SPIS_SCK_PIN
#define SER_PHY_SPI_SLAVE_MISO_PIN SER_CON_SPIS_MISO_PIN
#define SER_PHY_SPI_SLAVE_MOSI_PIN SER_CON_SPIS_MOSI_PIN
#define SER_PHY_SPI_SLAVE_SS_PIN SER_CON_SPIS_CSN_PIN
/* UART configuration */
#define UART_IRQ_PRIORITY APP_IRQ_PRIORITY_LOWEST
#define SER_PHY_UART_RX SER_CON_RX_PIN
#define SER_PHY_UART_TX SER_CON_TX_PIN
#define SER_PHY_UART_CTS SER_CON_CTS_PIN
#define SER_PHY_UART_RTS SER_CON_RTS_PIN
#ifdef __cplusplus
}
#endif
#endif // SER_PHY_CONFIG_CONN_H__

View File

@@ -0,0 +1,198 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef SER_PHY_DEBUG_APP_H__
#define SER_PHY_DEBUG_APP_H__
#ifndef SER_PHY_DEBUG_APP_ENABLE
#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST(data)
#define DEBUG_EVT_SPI_MASTER_RAW_READY(data)
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(data)
#define DEBUG_EVT_SPI_MASTER_RAW_API_CALL(data)
#define DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE(data)
#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE(data)
#define DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(data)
#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(data)
#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(data)
#define DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(data)
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_GUARDED(data)
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_PASSED(data)
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_ABORTED(data)
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_RESTARTED(data)
#else
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
//Low level hardware events
typedef enum
{
SPI_MASTER_RAW_READY,
SPI_MASTER_RAW_REQUEST,
SPI_MASTER_RAW_XFER_DONE,
SPI_MASTER_RAW_API_CALL,
SPI_MASTER_RAW_READY_EDGE,
SPI_MASTER_RAW_REQUEST_EDGE,
SPI_MASTER_RAW_XFER_STARTED,
SPI_MASTER_RAW_XFER_GUARDED,
SPI_MASTER_RAW_XFER_PASSED,
SPI_MASTER_RAW_XFER_ABORTED,
SPI_MASTER_RAW_XFER_RESTARTED,
SPI_MASTER_PHY_TX_PKT_SENT,
SPI_MASTER_PHY_BUF_REQUEST,
SPI_MASTER_PHY_RX_PKT_RECEIVED,
SPI_MASTER_PHY_RX_PKT_DROPPED,
SPI_MASTER_EVT_MAX
} spi_master_raw_evt_type_t;
//Low level hardware event definition
typedef struct
{
spi_master_raw_evt_type_t evt;
uint32_t data;
} spi_master_raw_evt_t;
typedef void (*spi_master_raw_callback_t)(spi_master_raw_evt_t event);
void debug_init(spi_master_raw_callback_t spi_master_raw_evt_callback);
void debug_evt(spi_master_raw_evt_type_t evt, uint32_t data);
#define DEBUG_EVT(evt, data) \
do { \
debug_evt(evt, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_REQUEST, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_READY(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_READY, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_XFER_DONE, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_API_CALL(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_API_CALL, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_READY_EDGE, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_REQUEST_EDGE, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(data) \
do { \
DEBUG_EVT(SPI_MASTER_PHY_TX_PKT_SENT, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(data) \
do { \
DEBUG_EVT(SPI_MASTER_PHY_RX_PKT_DROPPED, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(data) \
do { \
DEBUG_EVT(SPI_MASTER_PHY_RX_PKT_RECEIVED, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(data) \
do { \
DEBUG_EVT(SPI_MASTER_PHY_BUF_REQUEST, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_GUARDED(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_XFER_GUARDED, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_PASSED(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_XFER_PASSED, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_ABORTED(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_XFER_ABORTED, data); \
} while (0);
#define DEBUG_EVT_SPI_MASTER_RAW_XFER_RESTARTED(data) \
do { \
DEBUG_EVT(SPI_MASTER_RAW_XFER_RESTARTED, data); \
} while (0);
#endif
#ifdef __cplusplus
}
#endif
#endif //SER_PHY_DEBUG_APP_H__

View File

@@ -0,0 +1,166 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef SER_PHY_DEBUG_CONN_H__
#define SER_PHY_DEBUG_CONN_H__
#ifndef SER_PHY_DEBUG_CONN_ENABLE
#define DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(data);
#define DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(data);
#define DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(data);
#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(data);
#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(data);
#define DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(data);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(data);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(data);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(data);
#else
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// low level hardware event types
typedef enum
{
SPI_SLAVE_RAW_BUFFERS_SET,
SPI_SLAVE_RAW_RX_XFER_DONE,
SPI_SLAVE_RAW_TX_XFER_DONE,
SPI_SLAVE_RAW_REQ_SET,
SPI_SLAVE_RAW_REQ_CLEARED,
SPI_SLAVE_PHY_BUF_REQUEST,
SPI_SLAVE_PHY_PKT_SENT,
SPI_SLAVE_PHY_PKT_RECEIVED,
SPI_SLAVE_PHY_PKT_DROPPED,
SPI_SLAVE_RAW_EVT_TYPE_MAX
} spi_slave_raw_evt_type_t;
// low level hardware event definition
typedef struct
{
spi_slave_raw_evt_type_t evt_type;
uint32_t data;
} spi_slave_raw_evt_t;
typedef void (*spi_slave_raw_callback_t)(spi_slave_raw_evt_t event);
void debug_init(spi_slave_raw_callback_t spi_slave_raw_evt_callback);
void debug_evt(spi_slave_raw_evt_type_t evt_type, uint32_t data);
#define DEBUG_EVT(evt, data) \
do { \
debug_evt(evt, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(data) \
do { \
DEBUG_EVT(SPI_SLAVE_RAW_RX_XFER_DONE, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(data) \
do { \
DEBUG_EVT(SPI_SLAVE_RAW_TX_XFER_DONE, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(data) \
do { \
DEBUG_EVT(SPI_SLAVE_RAW_BUFFERS_SET, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(data) \
do { \
DEBUG_EVT(SPI_SLAVE_RAW_REQ_SET, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(data) \
do { \
DEBUG_EVT(SPI_SLAVE_RAW_REQ_CLEARED, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(data) \
do { \
DEBUG_EVT(SPI_SLAVE_PHY_BUF_REQUEST, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(data) \
do { \
DEBUG_EVT(SPI_SLAVE_PHY_PKT_RECEIVED, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(data) \
do { \
DEBUG_EVT(SPI_SLAVE_PHY_PKT_DROPPED, data); \
} while (0);
#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(data) \
do { \
DEBUG_EVT(SPI_SLAVE_PHY_PKT_SENT, data); \
} while (0);
#endif
#ifdef __cplusplus
}
#endif
#endif //SER_PHY_DEBUG_CONN_H__

View File

@@ -0,0 +1,88 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "ser_phy.h"
#include "app_error.h"
__weak uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
return NRF_SUCCESS;
}
__weak uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
return NRF_SUCCESS;
}
__weak uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
return NRF_SUCCESS;
}
__weak void ser_phy_close(void)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
}
__weak void ser_phy_interrupts_enable(void)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
}
__weak void ser_phy_interrupts_disable(void)
{
/* A function stub. Function should be implemented according to ser_phy.h API. */
APP_ERROR_CHECK_BOOL(false);
}

View File

@@ -0,0 +1,308 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/** @file
*
* @defgroup ser_phy Serialization PHY
* @{
* @ingroup ble_sdk_lib_serialization
*
* @brief PHY layer for serialization.
*
* @details The @ref ser_phy library declares functions and definitions of data structures and
* identifiers (typedef enum) that are used as API of the serialization PHY layer.
*
* \par Rationale
* Each specific PHY layer (SPI, I2C, UART, low power UART etc.) should provide the same API. This
* allows the layer above (the HAL Transport layer), which is responsible for controlling the PHY
* layer, memory management, CRC, retransmission etc., to be hardware independent.
*
*
* \par Interlayer communication and control
* The PHY layer is controlled by the HAL transport layer by calling functions declared in
* the @ref ser_phy library.
*
* @par
* The PHY layer communicates events to the HAL transport layer by calling a callback function.
* A handler to this function is passed in the @ref ser_phy_open function. This callback function
* should be called with a parameter of type @ref ser_phy_evt_t, filled accordingly to an event to be
* passed. Types of supported events are defined in @ref ser_phy_evt_type_t.
*
* @par
* For example, to pass an event indicating that an RX packet has been successfully received, first a
* struct of type @ref ser_phy_evt_t must be filled:
* @code
* ser_phy_evt_t phy_evt;
* phy_evt.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
* phy_evt.evt_params.rx_pkt_received.p_buffer = (pointer to the RX buffer);
* phy_evt.evt_params.rx_pkt_received.num_of_bytes = (number of received bytes);
* @endcode
* Then, the callback function must be called:
* @code
* events_handler(phy_evt);
* @endcode
* All functions declared in the @ref ser_phy file (ser_phy.h) must be implemented. Some events specified in
* @ref ser_phy_evt_type_t are optional to implement.
*
* \par Transmitting a packet
* Each PHY layer is responsible for adding the PHY header to a packet to be sent. This header
* consists of a 16-bit field that carries the packet length (the uint16_encode function defined in
* app_util.h should be used to ensure endianness independence). A pointer to a packet to be sent
* and length of the packet are parameters of the @ref ser_phy_tx_pkt_send function. When a packet
* has been transmitted, an event of type @ref SER_PHY_EVT_TX_PKT_SENT should be emitted.
*
* \image html ser_phy_transport_tx.svg "TX - interlayer communication"
*
* \par Receiving a packet
* The PHY layer should be able to store only the PHY header (16-bit field carrying the packet
* length). After the PHY header has been received, the transmission is stopped and the PHY
* layer must send a request to the HAL transport layer for memory to store the packet - an event
* of type @ref SER_PHY_EVT_RX_BUF_REQUEST with event parameters defined in
* @ref ser_phy_evt_rx_buf_request_params_t (the uint16_decode function defined in app_util.h should
* be used for header decoding to ensure endianness independence). The transmission should be
* resumed when the @ref ser_phy_rx_buf_set function has been called.
*
* @par
* When the @ref ser_phy_rx_buf_set function parameter equals NULL, there is not
* enough memory to store the packet. However, the packet will be received to a dummy location to
* ensure continuous communication. After receiving has finished, an event of type
* @ref SER_PHY_EVT_RX_PKT_DROPPED is generated.
*
* \image html ser_phy_transport_rx_dropped.svg "RX dropping - interlayer communication"
*
* @par
* When the @ref ser_phy_rx_buf_set function parameter is different than NULL, the packet is
* received to a buffer pointed to by it. After receiving has finished, an event of type
* @ref SER_PHY_EVT_RX_PKT_RECEIVED is generated with event parameters defined in
* @ref ser_phy_evt_rx_pkt_received_params_t.
*
* \image html ser_phy_transport_rx_received.svg "RX - interlayer communication"
*
* \par PHY layer errors
* PHY layer errors can be signaled by an event of type @ref SER_PHY_EVT_RX_OVERFLOW_ERROR or
* @ref SER_PHY_EVT_TX_OVERREAD_ERROR or @ref SER_PHY_EVT_HW_ERROR with event parameters defined in
* @ref ser_phy_evt_hw_error_params_t.
*
* @par Available PHY layers
* The following PHY layers are available:
* - @ref ser_phy_spi_page
* - @ref ser_phy_spi_5W_page
* - @ref ser_phy_uart_page
* - @ref ser_phy_uart_hci_page
* <!-- - @ref ser_phy_usb_hci_page -->
*
*/
#ifndef SER_PHY_H__
#define SER_PHY_H__
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**@brief Serialization PHY module event types. */
typedef enum
{
SER_PHY_EVT_TX_PKT_SENT = 0, /**< Obligatory to implement. An event indicating that a TX packet
* has been transmitted. */
SER_PHY_EVT_RX_BUF_REQUEST, /**< Obligatory to implement. An event indicating that the PHY layer
* needs a buffer for an RX packet. The PHY flow should be blocked
* until the @ref ser_phy_rx_buf_set function is called. */
SER_PHY_EVT_RX_PKT_RECEIVED, /**< Obligatory to implement. An event indicating that an RX packet
* has been successfully received. */
SER_PHY_EVT_RX_PKT_DROPPED, /**< Obligatory to implement. An event indicating that the RX packet
* receiving has been finished but the packet was discarded because
* it was longer than available the buffer. */
SER_PHY_EVT_RX_OVERFLOW_ERROR, /**< Optional to implement. An event indicating that more
* information has been transmitted than the PHY module could
* handle. */
SER_PHY_EVT_TX_OVERREAD_ERROR, /**< Optional to implement. An event indicating that the PHY module
* was forced to transmit more information than possessed. */
SER_PHY_EVT_HW_ERROR, /**< Optional to implement. An event indicating a hardware error
* in the PHY module. */
SER_PHY_EVT_TYPE_MAX /**< Enumeration upper bound. */
} ser_phy_evt_type_t;
/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_RX_BUF_REQUEST. */
typedef struct
{
uint16_t num_of_bytes; /**< Length of a buffer in octets that the layer above the PHY module should
* deliver, so that the PHY module can receive a packet. */
} ser_phy_evt_rx_buf_request_params_t;
/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_RX_PKT_RECEIVED. */
typedef struct
{
uint8_t * p_buffer; /**< Pointer to a buffer containing the received packet. */
uint16_t num_of_bytes; /**< Length of the received packet in octets. */
} ser_phy_evt_rx_pkt_received_params_t;
/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_HW_ERROR. */
typedef struct
{
uint32_t error_code; /**< Hardware error code - specific for a microcontroller. */
uint8_t * p_buffer; /**< Pointer to the buffer that was processed when error occured. */
} ser_phy_evt_hw_error_params_t;
/**@brief A struct containing events from a Serialization PHY module.
*
* @note Some events do not have parameters, then whole information is contained in the evt_type.
*/
typedef struct
{
ser_phy_evt_type_t evt_type; /**< Type of event. */
union /**< Union alternative identified by evt_type in enclosing struct. */
{
/** Parameters of event of type @ref SER_PHY_EVT_RX_BUF_REQUEST. */
ser_phy_evt_rx_buf_request_params_t rx_buf_request;
/** Parameters of event of type @ref SER_PHY_EVT_RX_PKT_RECEIVED. */
ser_phy_evt_rx_pkt_received_params_t rx_pkt_received;
/** Parameters of the event of type @ref SER_PHY_EVT_HW_ERROR. */
ser_phy_evt_hw_error_params_t hw_error;
} evt_params;
} ser_phy_evt_t;
/**@brief A type of generic callback function handler to be used by all PHY module events.
*
* @param[in] event Serialization PHY module event.
*/
typedef void (*ser_phy_events_handler_t)(ser_phy_evt_t event);
/**@brief Function for opening and initializing the PHY module.
*
* @note The function initializes hardware and internal module states, and registers callback
* function to be used by all PHY module events.
*
* @warning If the function has been already called, the function @ref ser_phy_close has to be
* called before ser_phy_open can be called again.
*
* @param[in] events_handler Generic callback function handler to be used by all PHY module
* events.
*
* @retval NRF_SUCCESS Operation success.
* @retval NRF_ERROR_INVALID_STATE Operation failure. The function has been already called.
* To call it again, the function @ref ser_phy_close has to be
* called first.
* @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
* @retval NRF_ERROR_INVALID_PARAM Operation failure. Hardware initialization parameters are not
* supported.
*/
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler);
/**@brief Function for transmitting a packet.
*
* @note The function adds a packet pointed by p_buffer parameter to a transmission queue and
* schedules generation of an event of type @ref SER_PHY_EVT_TX_PKT_SENT upon transmission
* completion.
*
* @param[in] p_buffer Pointer to a buffer to transmit.
* @param[in] num_of_bytes Number of octets to transmit. Must be more than 0.
*
* @retval NRF_SUCCESS Operation success. Packet was added to the transmission queue
* and event will be send upon transmission completion.
* @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
* @retval NRF_ERROR_INVALID_PARAM Operation failure. The num_of_bytes parameter equal to 0.
* @retval NRF_ERROR_BUSY Operation failure. Transmitting of a packet in progress.
*/
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes);
/**@brief Function for setting an RX buffer and enabling reception of data (the PHY flow).
*
* @note The function has to be called as a response to an event of type
* @ref SER_PHY_EVT_RX_BUF_REQUEST. The function sets an RX buffer and enables reception of
* data (enables the PHY flow).
* Size of a buffer pointed by the p_buffer parameter should be at least equal to the
* num_of_bytes parameter passed within the event (@ref ser_phy_evt_rx_buf_request_params_t),
* or p_buffer should be equal to NULL if there is not enough memory.
* When p_buffer is different from NULL and num_of_bytes octets have been received, an event of
* type @ref SER_PHY_EVT_RX_PKT_RECEIVED is generated
* (@ref ser_phy_evt_rx_pkt_received_params_t).
* When p_buffer is equal to NULL, data is received to dummy location to ensure continuous
* communication. Then, if num_of_bytes octets have been received, an event of type
* @ref SER_PHY_EVT_RX_PKT_DROPPED is generated.
*
* @param[in] p_buffer Pointer to an RX buffer in which to receive.
*
* @retval NRF_SUCCESS Operation success.
* @retval NRF_ERROR_INVALID_STATE Operation failure. A buffer was set without request.
*/
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer);
/**@brief Function for closing the PHY module.
*
* @note The function disables hardware, resets internal module states, and unregisters the events
* callback function.
*/
void ser_phy_close(void);
/**@brief Function for enabling the PHY module interrupts.
*
* @note The function enables all interrupts that are used by the PHY module (and only those).
*/
void ser_phy_interrupts_enable(void);
/**@brief Function for disabling the PHY module interrupts.
*
* @note The function disables all interrupts that are used by the PHY module (and only those).
*/
void ser_phy_interrupts_disable(void);
#ifdef __cplusplus
}
#endif
#endif /* SER_PHY_H__ */
/** @} */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,185 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/** @file
*
* @defgroup ser_phy_hci HCI Serialization PHY
* @{
* @ingroup ble_sdk_lib_serialization
*
* @brief HCI PHY layer for serialization.
*
* @details This file contains declarations of functions and definitions of data structures and
* identifiers (typedef enum) used as API of the serialization HCI PHY layer.
*
*
*/
#ifndef SER_PHY_HCI_H__
#define SER_PHY_HCI_H__
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**@brief Serialization PHY HCI module events types. */
typedef enum
{
SER_PHY_HCI_SLIP_EVT_PKT_SENT = 0, /**< An event indicating that packet has been transmitted. */
SER_PHY_HCI_SLIP_EVT_ACK_SENT, /**< An event indicating that ack packet has been transmitted. */
SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED, /**< An event indicating that packet has been received. */
SER_PHY_HCI_SLIP_EVT_HW_ERROR, /**< An event indicating a hardware error in PHY HCI module. */
SER_PHY_HCI_SLIP_EVT_TYPE_MAX /**< Enumeration upper bound. */
} ser_phy_hci_slip_evt_type_t;
/**@brief Struct representing a PHY HCI packet. */
typedef struct
{
uint8_t * p_buffer; /**< Pointer to a buffer containing a packet. */
uint16_t num_of_bytes; /**< Length of a packet in octets. */
} ser_phy_hci_pkt_params_t;
/**@brief Struct containing parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_HW_ERROR. */
typedef struct
{
uint32_t error_code; /**< Hardware error code - specific for a microcontroller. */
} ser_phy_hci_evt_hw_error_params_t;
/**@brief Struct containing events from the Serialization PHY module.
*
* @note Some events do not have parameters, then the whole information is contained in the evt_type.
*/
typedef struct
{
ser_phy_hci_slip_evt_type_t evt_type; /**< Type of an event. */
union /**< Union alternative identified by evt_type in the enclosing struct. */
{
/** Parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED. */
ser_phy_hci_pkt_params_t received_pkt;
/** Parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_HW_ERROR. */
ser_phy_hci_evt_hw_error_params_t hw_error;
} evt_params;
} ser_phy_hci_slip_evt_t;
/**@brief Type of generic callback function handler to be used by all PHY HCI events.
*
* @param[in] event Serialization PHY HCI module event.
*/
typedef void (*ser_phy_hci_slip_event_handler_t)(ser_phy_hci_slip_evt_t *p_event);
/**@brief Function for opening and initializing a HCI SLIP PHY module.
*
* @note The function initializes hardware and internal module states and registers callback
* function to be used by all PHY HCI module events.
*
* @warning If the function has been already called, the function @ref ser_phy_hci_slip_close has to be
* called before ser_phy_hci_slip_open can be called again.
*
* @param[in] events_handler Generic callback function handler to be used by all PHY HCI module
* events.
*
* @retval NRF_SUCCESS Operation success.
* @retval NRF_ERROR_INVALID_STATE Operation failure. The function has been already called.
* To call it again, the function @ref ser_phy_hci_slip_close has to
* be called first.
* @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
* @retval NRF_ERROR_INVALID_PARAM Operation failure. Hardware initialization parameters are not
* supported.
*/
uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler);
/**@brief Function for resetting the module.*/
void ser_phy_hci_slip_reset(void);
/**@brief A function for transmitting a HCI SLIP packet.
*
* @note The function adds a packet pointed by p_buffer parameter to a transmission queue and
* schedules generation of an event of type @ref SER_PHY_HCI_SLIP_EVT_PKT_SENT upon transmission
* completion.
*
* @param[in] p_header Pointer to ser_phy_hci_pkt_params_t structure representing packet header.
* @param[in] p_payload Pointer to ser_phy_hci_pkt_params_t structure representing packet payload.
* @param[in] p_crc Pointer to ser_phy_hci_pkt_params_t structure representing packet crc.
*
* @retval NRF_SUCCESS Operation success. Packet was added to the transmission queue
* and event will be sent upon transmission completion.
* @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied in p_header parameter.
* NULL pointer is allowed for p_payload and p_crc parameters.
* @retval NRF_ERROR_INVALID_PARAM Operation failure. Number of bytes to be sent equals 0.
* @retval NRF_ERROR_BUSY Operation failure. Transmitting of a packet in progress.
*/
uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
const ser_phy_hci_pkt_params_t * p_payload,
const ser_phy_hci_pkt_params_t * p_crc);
/**@brief A function for freeing an RX buffer.
*
* @note The function has to be called as a response to event @ref SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED
* when an RX packet has been processed. The function frees the RX buffer and therefore enables
* reception of next incoming data.
* @param[in] p_buffer Pointer to an RX buffer which must be freed.
*
* @retval NRF_SUCCESS Operation success.
* @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
* @retval NRF_ERROR_INVALID_STATE Operation failure. A buffer was already free.
*/
uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer);
/**@brief A function for closing a PHY HCI module.
*
* @note The function disables hardware, resets internal module states, and unregisters the events
* callback function.
*/
void ser_phy_hci_slip_close(void);
#ifdef __cplusplus
}
#endif
#endif /* SER_PHY_HCI_H__ */
/** @} */

View File

@@ -0,0 +1,702 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <string.h>
#include "ser_phy_hci.h"
#include "ser_config.h"
#ifdef SER_CONNECTIVITY
#include "ser_phy_config_conn.h"
#else
#include "ser_phy_config_app.h"
#endif
#include "nrf_drv_uart.h"
#include "app_error.h"
#include "app_util_platform.h"
#define APP_SLIP_END 0xC0 /**< SLIP code for identifying the beginning and end of a packet frame.. */
#define APP_SLIP_ESC 0xDB /**< SLIP escape code. This code is used to specify that the following character is specially encoded. */
#define APP_SLIP_ESC_END 0xDC /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xC0.. */
#define APP_SLIP_ESC_ESC 0xDD /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xDB. */
#define HDR_SIZE 4
#define CRC_SIZE 2
#define PKT_SIZE (SER_HAL_TRANSPORT_MAX_PKT_SIZE + HDR_SIZE + CRC_SIZE)
static const nrf_drv_uart_t m_uart = NRF_DRV_UART_INSTANCE(0);
static const nrf_drv_uart_config_t m_uart_config = {
.pseltxd = SER_PHY_UART_TX,
.pselrxd = SER_PHY_UART_RX,
.pselrts = SER_PHY_UART_RTS,
.pselcts = SER_PHY_UART_CTS,
.p_context = NULL,
.interrupt_priority = UART_IRQ_PRIORITY,
#if defined(UARTE_PRESENT) && defined(UART_PRESENT)
.use_easy_dma = true,
#endif
// These values are common for application and connectivity, they are
// defined in "ser_config.h".
.hwfc = SER_PHY_UART_FLOW_CTRL,
.parity = SER_PHY_UART_PARITY,
.baudrate = (nrf_uart_baudrate_t)SER_PHY_UART_BAUDRATE
};
typedef struct {
ser_phy_hci_pkt_params_t header;
ser_phy_hci_pkt_params_t payload;
ser_phy_hci_pkt_params_t crc;
} ser_phy_hci_slip_pkt_t;
static ser_phy_hci_slip_pkt_t m_tx_curr_packet;
static ser_phy_hci_slip_pkt_t m_tx_next_packet;
static ser_phy_hci_slip_evt_t m_ser_phy_hci_slip_event;
static ser_phy_hci_slip_event_handler_t m_ser_phy_hci_slip_event_handler; /**< Event handler for upper layer */
static uint8_t m_tx_buf0[SER_PHY_HCI_SLIP_TX_BUF_SIZE];
static uint8_t m_tx_buf1[SER_PHY_HCI_SLIP_TX_BUF_SIZE];
static uint8_t * mp_tx_buf;
static uint8_t m_tx_bytes;
static enum {
PHASE_BEGIN,
PHASE_HEADER,
PHASE_PAYLOAD,
PHASE_CRC,
PHASE_ACK_END,
// The following three elements have to have consecutive values,
// 'tx_buf_fill()' relies on this.
PHASE_PACKET_END,
PHASE_PRE_IDLE = PHASE_PACKET_END + 1,
PHASE_IDLE = PHASE_PRE_IDLE + 1
} volatile m_tx_phase;
static bool volatile m_tx_in_progress;
static bool volatile m_tx_pending;
#define NO_EVENT SER_PHY_HCI_SLIP_EVT_TYPE_MAX
static ser_phy_hci_slip_evt_type_t m_tx_evt_type;
static ser_phy_hci_slip_evt_type_t m_tx_pending_evt_type;
static uint8_t m_small_buffer[HDR_SIZE];
static uint8_t m_big_buffer[PKT_SIZE];
static uint8_t * mp_small_buffer = NULL;
static uint8_t * mp_big_buffer = NULL;
static uint8_t * mp_buffer = NULL;
static uint8_t m_rx_buf[1];
static bool m_rx_escape;
// The function returns false to signal that no more bytes can be passed to be
// sent (put into the TX buffer) until UART transmission is done.
static bool tx_buf_put(uint8_t data_byte)
{
ASSERT(m_tx_bytes < SER_PHY_HCI_SLIP_TX_BUF_SIZE);
mp_tx_buf[m_tx_bytes] = data_byte;
++m_tx_bytes;
bool flush = false;
ser_phy_hci_slip_evt_type_t slip_evt_type = NO_EVENT;
if (m_tx_phase == PHASE_ACK_END)
{
// Send buffer, then signal that an acknowledge packet has been sent.
flush = true;
slip_evt_type = SER_PHY_HCI_SLIP_EVT_ACK_SENT;
}
else if (m_tx_phase == PHASE_PACKET_END)
{
// Send buffer, then signal that a packet with payload has been sent.
flush = true;
slip_evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT;
}
else if (m_tx_bytes >= SER_PHY_HCI_SLIP_TX_BUF_SIZE)
{
// Send buffer (because it is filled up), but don't signal anything,
// since the packet sending is not complete yet.
flush = true;
}
if (flush)
{
// If some TX transfer is being done at the moment, a new one cannot be
// started, it must be scheduled to be performed later.
if (m_tx_in_progress)
{
m_tx_pending_evt_type = slip_evt_type;
m_tx_pending = true;
// No more buffers available, can't continue filling.
return false;
}
m_tx_in_progress = true;
m_tx_evt_type = slip_evt_type;
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buf, m_tx_bytes));
// Switch to the second buffer.
mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
m_tx_bytes = 0;
}
return true;
}
static void tx_buf_fill(void)
{
static ser_phy_hci_pkt_params_t * mp_tx_data = NULL;
static uint32_t m_tx_index;
bool can_continue = true;
do {
static uint8_t tx_escaped_data = 0;
if (tx_escaped_data != 0)
{
can_continue = tx_buf_put(tx_escaped_data);
tx_escaped_data = 0;
++m_tx_index;
}
else switch (m_tx_phase)
{
case PHASE_BEGIN:
can_continue = tx_buf_put(APP_SLIP_END);
mp_tx_data = &m_tx_curr_packet.header;
m_tx_index = 0;
m_tx_phase = PHASE_HEADER;
tx_escaped_data = 0;
break;
case PHASE_ACK_END:
case PHASE_PACKET_END:
can_continue = tx_buf_put(APP_SLIP_END);
// [this is needed for the '++m_tx_phase;' below]
m_tx_phase = PHASE_PACKET_END;
// no break, intentional fall-through
case PHASE_PRE_IDLE:
// In PHASE_PRE_IDLE the sending process is almost finished, only
// the NRF_DRV_UART_EVT_TX_DONE event is needed before it can switch
// to PHASE_IDLE. But during this waiting a new packet may appear
// (i.e. 'ser_phy_hci_slip_tx_pkt_send()' may be called), hence
// the following pointer must be checked before switching the phase,
// just like right after writing whole packet to buffer (i.e. in
// PHASE_PACKET_END). Therefore, the following code is common for
// these two cases.
if (m_tx_next_packet.header.p_buffer != NULL)
{
m_tx_curr_packet = m_tx_next_packet;
m_tx_next_packet.header.p_buffer = NULL;
m_tx_phase = PHASE_BEGIN;
break;
}
// Go to the next phase:
// PHASE_PACKET_END -> PHASE_PRE_IDLE
// PHASE_PRE_IDLE -> PHASE_IDLE
++m_tx_phase;
return;
default:
if (m_tx_index < mp_tx_data->num_of_bytes)
{
ASSERT(mp_tx_data->p_buffer != NULL);
uint8_t data = mp_tx_data->p_buffer[m_tx_index];
if (data == APP_SLIP_END)
{
data = APP_SLIP_ESC;
tx_escaped_data = APP_SLIP_ESC_END;
}
else if (data == APP_SLIP_ESC)
{
tx_escaped_data = APP_SLIP_ESC_ESC;
}
else
{
++m_tx_index;
}
can_continue = tx_buf_put(data);
}
else
{
mp_tx_data->p_buffer = NULL;
if (m_tx_phase == PHASE_HEADER)
{
if (m_tx_curr_packet.payload.p_buffer == NULL)
{
// No payload -> ACK packet.
m_tx_phase = PHASE_ACK_END;
}
else
{
mp_tx_data = &m_tx_curr_packet.payload;
m_tx_index = 0;
m_tx_phase = PHASE_PAYLOAD;
}
}
else if (m_tx_phase == PHASE_PAYLOAD)
{
if (m_tx_curr_packet.crc.p_buffer == NULL)
{
// Packet without CRC.
m_tx_phase = PHASE_PACKET_END;
}
else
{
mp_tx_data = &m_tx_curr_packet.crc;
m_tx_index = 0;
m_tx_phase = PHASE_CRC;
}
}
else
{
ASSERT(m_tx_phase == PHASE_CRC);
m_tx_phase = PHASE_PACKET_END;
}
}
break;
}
} while (can_continue);
}
uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
const ser_phy_hci_pkt_params_t * p_payload,
const ser_phy_hci_pkt_params_t * p_crc)
{
if (p_header == NULL)
{
return NRF_ERROR_NULL;
}
CRITICAL_REGION_ENTER();
// If some packet is already transmitted, schedule this new one to be sent
// as next. A critical region is needed here to ensure that the transmission
// won't finish before the following assignments are done.
if (m_tx_phase != PHASE_IDLE)
{
m_tx_next_packet.header = *p_header;
if (p_payload == NULL)
{
m_tx_next_packet.payload.p_buffer = NULL;
}
else
{
m_tx_next_packet.payload = *p_payload;
}
if (p_crc == NULL)
{
m_tx_next_packet.crc.p_buffer = NULL;
}
else
{
m_tx_next_packet.crc = *p_crc;
}
}
else
{
m_tx_curr_packet.header = *p_header;
if (p_payload == NULL)
{
m_tx_curr_packet.payload.p_buffer = NULL;
}
else
{
m_tx_curr_packet.payload = *p_payload;
}
if (p_crc == NULL)
{
m_tx_curr_packet.crc.p_buffer = NULL;
}
else
{
m_tx_curr_packet.crc = *p_crc;
}
m_tx_phase = PHASE_BEGIN;
tx_buf_fill();
}
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* Function returns false when last byte in packet is detected.*/
static bool slip_decode(uint8_t * p_received_byte)
{
switch (*p_received_byte)
{
case APP_SLIP_END:
return false;
case APP_SLIP_ESC:
m_rx_escape = true;
break;
case APP_SLIP_ESC_END:
if (m_rx_escape == true)
{
m_rx_escape = false;
*p_received_byte = APP_SLIP_END;
}
break;
case APP_SLIP_ESC_ESC:
if (m_rx_escape == true)
{
m_rx_escape = false;
*p_received_byte = APP_SLIP_ESC;
}
break;
/* Normal character - decoding not needed*/
default:
break;
}
return true;
}
static void ser_phi_hci_rx_byte(uint8_t rx_byte)
{
static bool rx_sync = false;
uint8_t received_byte = rx_byte;
static bool big_buff_in_use = false;
static uint32_t m_rx_index;
/* Test received byte for SLIP packet start: 0xC0*/
if (!rx_sync)
{
if (received_byte == APP_SLIP_END)
{
m_rx_index = 0;
rx_sync = true;
}
return;
}
/* Additional check needed in case rx_sync flag was set by end of previous packet*/
if ((m_rx_index) == 0 && (received_byte == APP_SLIP_END))
{
return;
}
/* Check if small (ACK) buffer is available*/
if ((mp_small_buffer != NULL) && (big_buff_in_use == false))
{
if (m_rx_index == 0)
{
mp_buffer = mp_small_buffer;
}
/* Check if switch between small and big buffer is needed*/
if (m_rx_index == sizeof (m_small_buffer) && received_byte != APP_SLIP_END)
{
/* Check if big (PKT) buffer is available*/
if (mp_big_buffer != NULL)
{
/* Switch to big buffer*/
memcpy(m_big_buffer, m_small_buffer, sizeof (m_small_buffer));
mp_buffer = m_big_buffer;
}
else
{
/* Small buffer is too small and big buffer not available - cannot continue reception*/
rx_sync = false;
return;
}
}
/* Check if big buffer is full */
if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
{
/* Do not notify upper layer - the packet is too big and cannot be handled by slip */
rx_sync = false;
return;
}
/* Decode byte. Will return false when it is 0xC0 - end of packet*/
if (slip_decode(&received_byte))
{
/* Write Rx byte only if it is not escape char */
if (!m_rx_escape)
{
mp_buffer[m_rx_index++] = received_byte;
}
}
else
{
/* Reset pointers to signalise buffers are locked waiting for upper layer */
if (mp_buffer == mp_small_buffer)
{
mp_small_buffer = NULL;
}
else
{
mp_big_buffer = NULL;
}
/* Report packet reception end*/
m_ser_phy_hci_slip_event.evt_type =
SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
rx_sync = false;
}
}
else if (mp_big_buffer != NULL)
{
big_buff_in_use = true;
mp_buffer = mp_big_buffer;
/* Check if big buffer is full */
if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
{
/* Do not notify upper layer - the packet is too big and cannot be handled by slip */
rx_sync = false;
return;
}
/* Decode byte*/
if (slip_decode(&received_byte))
{
/* Write Rx byte only if it is not escape char */
if (!m_rx_escape)
{
mp_buffer[m_rx_index++] = received_byte;
}
}
else
{
// Mark the big buffer as locked (it should be freed by the upper
// layer).
mp_big_buffer = NULL;
big_buff_in_use = false;
/* Report packet reception end*/
m_ser_phy_hci_slip_event.evt_type =
SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
rx_sync = false;
}
}
else
{
/* Both buffers are not available - cannot continue reception*/
rx_sync = false;
return;
}
}
uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer)
{
uint32_t err_code = NRF_SUCCESS;
if (p_buffer == NULL)
{
return NRF_ERROR_NULL;
}
else if (p_buffer == m_small_buffer)
{
/* Free small buffer*/
if (mp_small_buffer == NULL)
{
mp_small_buffer = m_small_buffer;
}
else
{
err_code = NRF_ERROR_INVALID_STATE;
}
}
else if (p_buffer == m_big_buffer)
{
/* Free big buffer*/
if (mp_big_buffer == NULL)
{
mp_big_buffer = m_big_buffer;
}
else
{
err_code = NRF_ERROR_INVALID_STATE;
}
}
return err_code;
}
static void uart_event_handler(nrf_drv_uart_event_t * p_event,
void * p_context)
{
(void)p_context;
switch (p_event->type)
{
case NRF_DRV_UART_EVT_ERROR:
// Process the error only if this is a parity or overrun error.
// Break and framing errors will always occur before the other
// side becomes active.
if (p_event->data.error.error_mask &
(NRF_UART_ERROR_PARITY_MASK | NRF_UART_ERROR_OVERRUN_MASK))
{
// Pass error source to upper layer
m_ser_phy_hci_slip_event.evt_type =
SER_PHY_HCI_SLIP_EVT_HW_ERROR;
m_ser_phy_hci_slip_event.evt_params.hw_error.error_code =
p_event->data.error.error_mask;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
}
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
break;
case NRF_DRV_UART_EVT_TX_DONE:
// If there is a pending transfer (the second buffer is ready to
// be sent), start it immediately.
if (m_tx_pending)
{
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buf, m_tx_bytes));
// Switch to the buffer that has just been sent completely
// and now can be filled again.
mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
m_tx_bytes = 0;
m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
m_tx_evt_type = m_tx_pending_evt_type;
m_tx_pending = false;
}
else
{
m_tx_in_progress = false;
m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
}
// If needed, notify the upper layer that the packet transfer is
// complete (note that this notification may result in another
// packet send request, so everything must be cleaned up above).
if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT)
{
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
}
// And if the sending process is not yet finished, look what is
// to be done next.
if (m_tx_phase != PHASE_IDLE)
{
tx_buf_fill();
}
break;
case NRF_DRV_UART_EVT_RX_DONE:
{
uint8_t rx_byte = m_rx_buf[0];
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
if (p_event->data.rxtx.bytes)
{
ser_phi_hci_rx_byte(rx_byte);
}
}
break;
default:
APP_ERROR_CHECK(NRF_ERROR_INTERNAL);
}
}
void ser_phy_hci_slip_reset(void)
{
mp_tx_buf = m_tx_buf0;
m_tx_bytes = 0;
m_tx_phase = PHASE_IDLE;
m_tx_in_progress = false;
m_tx_pending = false;
m_rx_escape = false;
mp_small_buffer = m_small_buffer;
mp_big_buffer = m_big_buffer;
}
uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler)
{
uint32_t err_code;
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
// Check if function was not called before.
if (m_ser_phy_hci_slip_event_handler != NULL)
{
return NRF_ERROR_INVALID_STATE;
}
m_ser_phy_hci_slip_event_handler = events_handler;
err_code = nrf_drv_uart_init(&m_uart, &m_uart_config, uart_event_handler);
if (err_code != NRF_SUCCESS)
{
return NRF_ERROR_INVALID_PARAM;
}
ser_phy_hci_slip_reset();
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
return NRF_SUCCESS;
}
void ser_phy_hci_slip_close(void)
{
nrf_drv_uart_uninit(&m_uart);
m_ser_phy_hci_slip_event_handler = NULL;
}

View File

@@ -0,0 +1,737 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <string.h>
#include "ser_phy_hci.h"
#include "ser_config.h"
#ifdef SER_CONNECTIVITY
#include "ser_phy_config_conn.h"
#else
#include "ser_phy_config_app.h"
#endif
#include "app_usbd_cdc_acm.h"
#include "nrf_drv_clock.h"
#include "app_error.h"
#include "app_util_platform.h"
#define NRF_LOG_MODULE_NAME sphy_cdc
#include "nrf_log.h"
NRF_LOG_MODULE_REGISTER();
#define APP_SLIP_END 0xC0 /**< SLIP code for identifying the beginning and end of a packet frame.. */
#define APP_SLIP_ESC 0xDB /**< SLIP escape code. This code is used to specify that the following character is specially encoded. */
#define APP_SLIP_ESC_END 0xDC /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xC0.. */
#define APP_SLIP_ESC_ESC 0xDD /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xDB. */
#define HDR_SIZE 4
#define CRC_SIZE 2
#define PKT_SIZE (SER_HAL_TRANSPORT_MAX_PKT_SIZE + HDR_SIZE + CRC_SIZE)
static void cdc_acm_user_ev_handler(app_usbd_class_inst_t const * p_inst,
app_usbd_cdc_acm_user_event_t event);
#define CDC_ACM_COMM_INTERFACE 0
#define CDC_ACM_COMM_EPIN NRF_DRV_USBD_EPIN2
#define CDC_ACM_DATA_INTERFACE 1
#define CDC_ACM_DATA_EPIN NRF_DRV_USBD_EPIN1
#define CDC_ACM_DATA_EPOUT NRF_DRV_USBD_EPOUT1
APP_USBD_CDC_ACM_GLOBAL_DEF(m_app_cdc_acm,
cdc_acm_user_ev_handler,
CDC_ACM_COMM_INTERFACE,
CDC_ACM_DATA_INTERFACE,
CDC_ACM_COMM_EPIN,
CDC_ACM_DATA_EPIN,
CDC_ACM_DATA_EPOUT,
APP_USBD_CDC_COMM_PROTOCOL_NONE
);
static bool volatile m_port_open;
typedef struct {
ser_phy_hci_pkt_params_t header;
ser_phy_hci_pkt_params_t payload;
ser_phy_hci_pkt_params_t crc;
} ser_phy_hci_slip_pkt_t;
static ser_phy_hci_slip_pkt_t m_tx_curr_packet;
static ser_phy_hci_slip_pkt_t m_tx_next_packet;
static ser_phy_hci_slip_evt_t m_ser_phy_hci_slip_event;
static ser_phy_hci_slip_event_handler_t m_ser_phy_hci_slip_event_handler; /**< Event handler for upper layer */
static uint8_t m_tx_buf0[NRF_DRV_USBD_EPSIZE];
static uint8_t m_tx_buf1[NRF_DRV_USBD_EPSIZE];
static uint8_t * mp_tx_buf;
static uint8_t m_tx_bytes;
static enum {
PHASE_BEGIN,
PHASE_HEADER,
PHASE_PAYLOAD,
PHASE_CRC,
PHASE_ACK_END,
// The following three elements have to have consecutive values,
// 'tx_buf_fill()' relies on this.
PHASE_PACKET_END,
PHASE_PRE_IDLE = PHASE_PACKET_END + 1,
PHASE_IDLE = PHASE_PRE_IDLE + 1
} volatile m_tx_phase;
static bool volatile m_tx_in_progress;
static bool volatile m_tx_pending;
#define NO_EVENT SER_PHY_HCI_SLIP_EVT_TYPE_MAX
static ser_phy_hci_slip_evt_type_t m_tx_evt_type;
static ser_phy_hci_slip_evt_type_t m_tx_pending_evt_type;
static ser_phy_hci_pkt_params_t * mp_tx_data = NULL;
static uint32_t m_tx_index;
static uint8_t m_small_buffer[HDR_SIZE];
static uint8_t m_big_buffer[PKT_SIZE];
static uint8_t * mp_small_buffer = NULL;
static uint8_t * mp_big_buffer = NULL;
static uint8_t * mp_buffer = NULL;
static uint32_t m_rx_index;
static uint8_t m_rx_byte;
static bool m_rx_escape;
// The function returns false to signal that no more bytes can be passed to be
// sent (put into the TX buffer) until UART transmission is done.
static bool tx_buf_put(uint8_t data_byte)
{
ASSERT(m_tx_bytes < SER_PHY_HCI_SLIP_TX_BUF_SIZE);
mp_tx_buf[m_tx_bytes] = data_byte;
++m_tx_bytes;
bool flush = false;
ser_phy_hci_slip_evt_type_t slip_evt_type = NO_EVENT;
if (m_tx_phase == PHASE_ACK_END)
{
// Send buffer, then signal that an acknowledge packet has been sent.
flush = true;
slip_evt_type = SER_PHY_HCI_SLIP_EVT_ACK_SENT;
}
else if (m_tx_phase == PHASE_PACKET_END)
{
// Send buffer, then signal that a packet with payload has been sent.
flush = true;
slip_evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT;
}
else if (m_tx_bytes >= SER_PHY_HCI_SLIP_TX_BUF_SIZE)
{
// Send buffer (because it is filled up), but don't signal anything,
// since the packet sending is not complete yet.
flush = true;
}
if (flush)
{
// If some TX transfer is being done at the moment, a new one cannot be
// started, it must be scheduled to be performed later.
if (m_tx_in_progress)
{
m_tx_pending_evt_type = slip_evt_type;
m_tx_pending = true;
// No more buffers available, can't continue filling.
return false;
}
if (m_port_open)
{
m_tx_in_progress = true;
m_tx_evt_type = slip_evt_type;
APP_ERROR_CHECK(app_usbd_cdc_acm_write(&m_app_cdc_acm,
mp_tx_buf, m_tx_bytes));
}
// Switch to the second buffer.
mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
m_tx_bytes = 0;
}
return true;
}
static void tx_buf_fill(void)
{
bool can_continue = true;
do {
static uint8_t tx_escaped_data = 0;
if (tx_escaped_data != 0)
{
can_continue = tx_buf_put(tx_escaped_data);
tx_escaped_data = 0;
++m_tx_index;
}
else switch (m_tx_phase)
{
case PHASE_BEGIN:
can_continue = tx_buf_put(APP_SLIP_END);
mp_tx_data = &m_tx_curr_packet.header;
m_tx_index = 0;
m_tx_phase = PHASE_HEADER;
tx_escaped_data = 0;
break;
case PHASE_ACK_END:
case PHASE_PACKET_END:
can_continue = tx_buf_put(APP_SLIP_END);
// [this is needed for the '++m_tx_phase;' below]
m_tx_phase = PHASE_PACKET_END;
// no break, intentional fall-through
case PHASE_PRE_IDLE:
// In PHASE_PRE_IDLE the sending process is almost finished, only
// the NRF_DRV_UART_EVT_TX_DONE event is needed before it can switch
// to PHASE_IDLE. But during this waiting a new packet may appear
// (i.e. 'ser_phy_hci_slip_tx_pkt_send()' may be called), hence
// the following pointer must be checked before switching the phase,
// just like right after writing whole packet to buffer (i.e. in
// PHASE_PACKET_END). Therefore, the following code is common for
// these two cases.
if (m_tx_next_packet.header.p_buffer != NULL)
{
m_tx_curr_packet = m_tx_next_packet;
m_tx_next_packet.header.p_buffer = NULL;
m_tx_phase = PHASE_BEGIN;
break;
}
// Go to the next phase:
// PHASE_PACKET_END -> PHASE_PRE_IDLE
// PHASE_PRE_IDLE -> PHASE_IDLE
++m_tx_phase;
return;
default:
ASSERT(mp_tx_data->p_buffer != NULL);
if (m_tx_index < mp_tx_data->num_of_bytes)
{
uint8_t data = mp_tx_data->p_buffer[m_tx_index];
if (data == APP_SLIP_END)
{
data = APP_SLIP_ESC;
tx_escaped_data = APP_SLIP_ESC_END;
}
else if (data == APP_SLIP_ESC)
{
tx_escaped_data = APP_SLIP_ESC_ESC;
}
else
{
++m_tx_index;
}
can_continue = tx_buf_put(data);
}
else
{
mp_tx_data->p_buffer = NULL;
if (m_tx_phase == PHASE_HEADER)
{
if (m_tx_curr_packet.payload.p_buffer == NULL)
{
// No payload -> ACK packet.
m_tx_phase = PHASE_ACK_END;
}
else
{
mp_tx_data = &m_tx_curr_packet.payload;
m_tx_index = 0;
m_tx_phase = PHASE_PAYLOAD;
}
}
else if (m_tx_phase == PHASE_PAYLOAD)
{
if (m_tx_curr_packet.crc.p_buffer == NULL)
{
// Packet without CRC.
m_tx_phase = PHASE_PACKET_END;
}
else
{
mp_tx_data = &m_tx_curr_packet.crc;
m_tx_index = 0;
m_tx_phase = PHASE_CRC;
}
}
else
{
ASSERT(m_tx_phase == PHASE_CRC);
m_tx_phase = PHASE_PACKET_END;
}
}
break;
}
} while (can_continue);
}
uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
const ser_phy_hci_pkt_params_t * p_payload,
const ser_phy_hci_pkt_params_t * p_crc)
{
if (p_header == NULL)
{
return NRF_ERROR_NULL;
}
if (!m_port_open)
{
return NRF_SUCCESS;
}
CRITICAL_REGION_ENTER();
// If some packet is already transmitted, schedule this new one to be sent
// as next. A critical region is needed here to ensure that the transmission
// won't finish before the following assignments are done.
if (m_tx_phase != PHASE_IDLE)
{
m_tx_next_packet.header = *p_header;
if (p_payload == NULL)
{
m_tx_next_packet.payload.p_buffer = NULL;
}
else
{
m_tx_next_packet.payload = *p_payload;
}
if (p_crc == NULL)
{
m_tx_next_packet.crc.p_buffer = NULL;
}
else
{
m_tx_next_packet.crc = *p_crc;
}
}
else
{
m_tx_curr_packet.header = *p_header;
if (p_payload == NULL)
{
m_tx_curr_packet.payload.p_buffer = NULL;
}
else
{
m_tx_curr_packet.payload = *p_payload;
}
if (p_crc == NULL)
{
m_tx_curr_packet.crc.p_buffer = NULL;
}
else
{
m_tx_curr_packet.crc = *p_crc;
}
m_tx_phase = PHASE_BEGIN;
tx_buf_fill();
}
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* Function returns false when last byte in packet is detected.*/
static bool slip_decode(uint8_t * p_received_byte)
{
switch (*p_received_byte)
{
case APP_SLIP_END:
return false;
case APP_SLIP_ESC:
m_rx_escape = true;
break;
case APP_SLIP_ESC_END:
if (m_rx_escape == true)
{
m_rx_escape = false;
*p_received_byte = APP_SLIP_END;
}
break;
case APP_SLIP_ESC_ESC:
if (m_rx_escape == true)
{
m_rx_escape = false;
*p_received_byte = APP_SLIP_ESC;
}
break;
/* Normal character - decoding not needed*/
default:
break;
}
return true;
}
static void ser_phi_hci_rx_byte(uint8_t rx_byte)
{
static bool rx_sync = false;
uint8_t received_byte = rx_byte;
static bool big_buff_in_use = false;
/* Test received byte for SLIP packet start: 0xC0*/
if (!rx_sync)
{
if (received_byte == APP_SLIP_END)
{
m_rx_index = 0;
rx_sync = true;
}
return;
}
/* Additional check needed in case rx_sync flag was set by end of previous packet*/
if ((m_rx_index) == 0 && (received_byte == APP_SLIP_END))
{
return;
}
/* Check if small (ACK) buffer is available*/
if ((mp_small_buffer != NULL) && (big_buff_in_use == false))
{
if (m_rx_index == 0)
{
mp_buffer = mp_small_buffer;
}
/* Check if switch between small and big buffer is needed*/
if (m_rx_index == sizeof (m_small_buffer) /*NEW!!!*/ && received_byte != APP_SLIP_END)
{
/* Check if big (PKT) buffer is available*/
if (mp_big_buffer != NULL)
{
/* Switch to big buffer*/
memcpy(m_big_buffer, m_small_buffer, sizeof (m_small_buffer));
mp_buffer = m_big_buffer;
}
else
{
/* Small buffer is too small and big buffer not available - cannot continue reception*/
rx_sync = false;
return;
}
}
/* Check if big buffer is full */
if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
{
/* Do not notify upper layer - the packet is too big and cannot be handled by slip */
rx_sync = false;
return;
}
/* Decode byte. Will return false when it is 0xC0 - end of packet*/
if (slip_decode(&received_byte))
{
/* Write Rx byte only if it is not escape char */
if (!m_rx_escape)
{
mp_buffer[m_rx_index++] = received_byte;
}
}
else
{
/* Reset pointers to signalise buffers are locked waiting for upper layer */
if (mp_buffer == mp_small_buffer)
{
mp_small_buffer = NULL;
}
else
{
mp_big_buffer = NULL;
}
/* Report packet reception end*/
m_ser_phy_hci_slip_event.evt_type =
SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
rx_sync = false;
}
}
else if (mp_big_buffer != NULL)
{
big_buff_in_use = true;
mp_buffer = mp_big_buffer;
/* Check if big buffer is full */
if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
{
/* Do not notify upper layer - the packet is too big and cannot be handled by slip */
rx_sync = false;
return;
}
/* Decode byte*/
if (slip_decode(&received_byte))
{
/* Write Rx byte only if it is not escape char */
if (!m_rx_escape)
{
mp_buffer[m_rx_index++] = received_byte;
}
}
else
{
// Mark the big buffer as locked (it should be freed by the upper
// layer).
mp_big_buffer = NULL;
big_buff_in_use = false;
/* Report packet reception end*/
m_ser_phy_hci_slip_event.evt_type =
SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
rx_sync = false;
}
}
else
{
/* Both buffers are not available - cannot continue reception*/
rx_sync = false;
return;
}
}
uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer)
{
uint32_t err_code = NRF_SUCCESS;
if (p_buffer == NULL)
{
return NRF_ERROR_NULL;
}
else if (p_buffer == m_small_buffer)
{
/* Free small buffer*/
if (mp_small_buffer == NULL)
{
mp_small_buffer = m_small_buffer;
}
else
{
err_code = NRF_ERROR_INVALID_STATE;
}
}
else if (p_buffer == m_big_buffer)
{
/* Free big buffer*/
if (mp_big_buffer == NULL)
{
mp_big_buffer = m_big_buffer;
}
else
{
err_code = NRF_ERROR_INVALID_STATE;
}
}
return err_code;
}
static void cdc_acm_user_ev_handler(app_usbd_class_inst_t const * p_inst,
app_usbd_cdc_acm_user_event_t event)
{
app_usbd_cdc_acm_t const * p_cdc_acm = app_usbd_cdc_acm_class_get(p_inst);
switch (event)
{
case APP_USBD_CDC_ACM_USER_EVT_PORT_OPEN:
NRF_LOG_DEBUG("EVT_PORT_OPEN");
if (!m_port_open)
{
ret_code_t ret_code;
m_port_open = true;
do {
ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1);
if (ret_code == NRF_SUCCESS)
{
ser_phi_hci_rx_byte(m_rx_byte);
}
else if (ret_code != NRF_ERROR_IO_PENDING)
{
APP_ERROR_CHECK(ret_code);
}
} while (ret_code == NRF_SUCCESS);
}
break;
case APP_USBD_CDC_ACM_USER_EVT_PORT_CLOSE:
NRF_LOG_DEBUG("EVT_PORT_CLOSE");
if (m_tx_in_progress)
{
m_ser_phy_hci_slip_event.evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT;
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
m_tx_in_progress = false;
}
m_port_open = false;
break;
case APP_USBD_CDC_ACM_USER_EVT_TX_DONE:
// If there is a pending transfer (the second buffer is ready to
// be sent), start it immediately.
if (m_tx_pending)
{
APP_ERROR_CHECK(app_usbd_cdc_acm_write(p_cdc_acm,
mp_tx_buf, m_tx_bytes));
// Switch to the buffer that has just been sent completely
// and now can be filled again.
mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
m_tx_bytes = 0;
m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
m_tx_evt_type = m_tx_pending_evt_type;
m_tx_pending = false;
}
else
{
m_tx_in_progress = false;
m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
}
// If needed, notify the upper layer that the packet transfer is
// complete (note that this notification may result in another
// packet send request, so everything must be cleaned up above).
if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT)
{
m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
}
// And if the sending process is not yet finished, look what is
// to be done next.
if (m_tx_phase != PHASE_IDLE)
{
tx_buf_fill();
}
break;
case APP_USBD_CDC_ACM_USER_EVT_RX_DONE:
{
ret_code_t ret_code;
do
{
ser_phi_hci_rx_byte(m_rx_byte);
ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1);
} while (ret_code == NRF_SUCCESS);
}
break;
default:
break;
}
}
void ser_phy_hci_slip_reset(void)
{
mp_tx_buf = m_tx_buf0;
m_tx_bytes = 0;
m_tx_phase = PHASE_IDLE;
m_tx_in_progress = false;
m_tx_pending = false;
m_rx_escape = false;
mp_small_buffer = m_small_buffer;
mp_big_buffer = m_big_buffer;
}
uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler)
{
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
// Check if function was not called before.
if (m_ser_phy_hci_slip_event_handler != NULL)
{
return NRF_ERROR_INVALID_STATE;
}
ret_code_t ret = app_usbd_class_append(
app_usbd_cdc_acm_class_inst_get(&m_app_cdc_acm));
if (ret != NRF_SUCCESS)
{
return ret;
}
m_ser_phy_hci_slip_event_handler = events_handler;
ser_phy_hci_slip_reset();
return NRF_SUCCESS;
}
void ser_phy_hci_slip_close(void)
{
m_ser_phy_hci_slip_event_handler = NULL;
}

View File

@@ -0,0 +1,382 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup ser_phy_spi_phy_driver_slave ser_phy_nrf51_spi_slave.c
* @{
* @ingroup ser_phy_spi_phy_driver_slave
*
* @brief SPI_RAW PHY slave driver.
*/
#include <stddef.h>
#include <string.h>
#include "app_error.h"
#include "app_util.h"
#include "app_util_platform.h"
#include "app_timer.h"
#include "ser_phy.h"
#include "ser_phy_hci.h"
#include "crc16.h"
#include "nrf_soc.h"
#include "ser_phy_debug_comm.h"
static bool m_flag_nohci_init = false;
static bool m_flag_expect_ack;
static bool m_flag_buffer_reqested = false;
static uint16_t m_rx_packet_length;
static uint8_t * m_p_rx_packet;
static uint16_t m_rx_pending_packet_length;
static uint8_t * m_p_rx_pending_packet;
static uint16_t m_rx_allocated_packet_length;
static uint8_t * m_p_rx_allocated_packet;
static uint8_t * m_p_tx_packet = NULL;
static uint16_t m_tx_packet_length;
static ser_phy_events_handler_t m_ser_phy_callback = NULL;
#define PKT_HDR_SIZE 4 /**< Packet header size in number of bytes. */
#define PKT_CRC_SIZE 2 /**< Packet CRC size in number of bytes. */
static void ser_phy_nohci_assert(bool cond)
{
APP_ERROR_CHECK_BOOL(cond);
}
static void ser_phy_event_callback(ser_phy_evt_t event)
{
if (m_ser_phy_callback)
{
m_ser_phy_callback(event);
}
}
static void memory_request_callback(uint16_t size)
{
ser_phy_evt_t event;
DEBUG_EVT_HCI_PHY_EVT_BUF_REQUEST(0);
event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
event.evt_params.rx_buf_request.num_of_bytes = size;
ser_phy_event_callback(event);
}
static void packet_received_callback(uint8_t * pBuffer, uint16_t size)
{
ser_phy_evt_t event;
DEBUG_EVT_HCI_PHY_EVT_RX_PKT_RECEIVED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
event.evt_params.rx_pkt_received.num_of_bytes = size;
event.evt_params.rx_pkt_received.p_buffer = pBuffer;
ser_phy_event_callback(event);
}
static void packet_dropped_callback(void)
{
ser_phy_evt_t event;
DEBUG_EVT_HCI_PHY_EVT_RX_PKT_DROPPED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
ser_phy_event_callback(event);
}
static void packet_transmitted_callback(void)
{
ser_phy_evt_t event;
DEBUG_EVT_HCI_PHY_EVT_TX_PKT_SENT(0);
event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
ser_phy_event_callback(event);
}
static void hci_slip_event_handler(ser_phy_hci_slip_evt_t * p_event)
{
if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT )
{
DEBUG_EVT_SLIP_PACKET_TXED(0);
if (!m_flag_expect_ack)
{
m_p_tx_packet = NULL;
packet_transmitted_callback();
}
else
{
ser_phy_nohci_assert(false); // packet was send as a ACK packet, callback should be with ACK_SENT
}
}
else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT )
{
DEBUG_EVT_SLIP_ACK_TXED(0);
if (m_flag_expect_ack)
{
m_p_tx_packet = NULL;
packet_transmitted_callback();
}
else
{
ser_phy_nohci_assert(false); // packet was send as a normal packet, callback should be with PKT_SENT
}
}
else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED )
{
CRITICAL_REGION_ENTER();
if (m_p_rx_packet == NULL)
{
m_p_rx_packet = p_event->evt_params.received_pkt.p_buffer;
m_rx_packet_length = p_event->evt_params.received_pkt.num_of_bytes;
m_p_rx_allocated_packet = m_p_rx_packet;
m_rx_allocated_packet_length = m_rx_packet_length;
m_flag_buffer_reqested = true;
memory_request_callback(m_rx_allocated_packet_length);
}
else if (m_p_rx_pending_packet == NULL)
{
m_p_rx_pending_packet = p_event->evt_params.received_pkt.p_buffer;
m_rx_pending_packet_length = p_event->evt_params.received_pkt.num_of_bytes;
}
else
{
// both buffers are not released; this is fault
ser_phy_nohci_assert(false);
}
CRITICAL_REGION_EXIT();
}
else
{
// no other callbacks are expected
ser_phy_nohci_assert(false);
}
}
/* ser_phy API function */
void ser_phy_interrupts_enable(void)
{
NVIC_EnableIRQ(UART0_IRQn);
return;
}
/* ser_phy API function */
void ser_phy_interrupts_disable(void)
{
NVIC_DisableIRQ(UART0_IRQn);
return;
}
/* ser_phy API function */
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
uint32_t status = NRF_SUCCESS;
if (m_flag_buffer_reqested)
{
m_flag_buffer_reqested = false;
if (p_buffer)
{
memcpy(p_buffer, m_p_rx_allocated_packet, m_rx_allocated_packet_length);
packet_received_callback(p_buffer, m_rx_allocated_packet_length);
}
else
{
packet_dropped_callback();
}
CRITICAL_REGION_ENTER();
if (m_p_rx_allocated_packet == m_p_rx_packet && (m_p_rx_pending_packet == NULL))
{
// packet is copied and there is no pending packet
(void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
m_p_rx_packet = NULL;
m_p_rx_allocated_packet = NULL;
}
else if (m_p_rx_allocated_packet == m_p_rx_packet && (m_p_rx_pending_packet != NULL))
{
// there is a pending packet - request memory for it
m_p_rx_allocated_packet = m_p_rx_pending_packet;
m_rx_allocated_packet_length = m_rx_pending_packet_length;
m_flag_buffer_reqested = true;
}
else if (m_p_rx_allocated_packet == m_p_rx_pending_packet )
{
// the pending packet was serviced - release both
m_p_rx_allocated_packet = NULL;
(void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
m_p_rx_packet = NULL;
(void) ser_phy_hci_slip_rx_buf_free(m_p_rx_pending_packet);
m_p_rx_pending_packet = NULL;
}
else
{
// no other calls are expected
ser_phy_nohci_assert(false);
}
CRITICAL_REGION_EXIT();
// request memory for a pending
if (m_p_rx_allocated_packet)
{
memory_request_callback(m_rx_allocated_packet_length);
}
}
else
{
status = NRF_ERROR_BUSY;
}
return status;
}
/* ser_phy API function */
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
uint32_t status = NRF_SUCCESS;
uint32_t err_code;
if ( p_buffer == NULL || num_of_bytes == 0)
{
return NRF_ERROR_NULL;
}
if ( m_p_tx_packet == NULL)
{
m_tx_packet_length = num_of_bytes;
m_p_tx_packet = (uint8_t *)p_buffer;
if (m_tx_packet_length <= PKT_HDR_SIZE + PKT_CRC_SIZE)
{
ser_phy_hci_pkt_params_t pkt; // all packets smaller than 6 goes as ACK
m_flag_expect_ack = true;
pkt.p_buffer = (uint8_t *)m_p_tx_packet;
pkt.num_of_bytes = m_tx_packet_length;
DEBUG_EVT_SLIP_ACK_TX(0);
err_code = ser_phy_hci_slip_tx_pkt_send(&pkt, NULL, NULL); // this will look like ACK for slip
ser_phy_nohci_assert(err_code == NRF_SUCCESS);
}
else
{
ser_phy_hci_pkt_params_t header; // this is fake header - just first 4 bytes
ser_phy_hci_pkt_params_t crc; // this is fake header - just last 2 bytes
ser_phy_hci_pkt_params_t payload; // this is fake payload - all except for header and crc
m_flag_expect_ack = false;
header.p_buffer = (uint8_t *)m_p_tx_packet;
header.num_of_bytes = PKT_HDR_SIZE;
crc.p_buffer = (uint8_t *)m_p_tx_packet + m_tx_packet_length - PKT_CRC_SIZE;
crc.num_of_bytes = PKT_CRC_SIZE;
payload.p_buffer = (uint8_t *)m_p_tx_packet + PKT_HDR_SIZE;
payload.num_of_bytes = m_tx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE;
DEBUG_EVT_SLIP_PACKET_TX(0);
err_code = ser_phy_hci_slip_tx_pkt_send(&header, &payload, &crc); // this will look like normal packet for slip
ser_phy_nohci_assert(err_code == NRF_SUCCESS);
}
}
else
{
status = NRF_ERROR_BUSY;
}
return status;
}
/* ser_phy API function */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
uint32_t err_code;
if (m_flag_nohci_init)
{
return NRF_ERROR_INVALID_STATE;
}
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
err_code = ser_phy_hci_slip_open(hci_slip_event_handler);
if (err_code != NRF_SUCCESS)
{
return err_code;
}
m_ser_phy_callback = events_handler;
m_flag_nohci_init = true;
return NRF_SUCCESS;
}
/* ser_phy API function */
void ser_phy_close(void)
{
m_ser_phy_callback = NULL;
ser_phy_hci_slip_close();
m_flag_nohci_init = false;
}

View File

@@ -0,0 +1,823 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup ser_phy_spi_5W_phy_driver_master ser_phy_nrf51_spi_5W_master.c
* @{
* @ingroup ser_phy_spi_5W_phy_driver_master
*
* @brief SPI_5W_RAW PHY master driver.
*/
#include <stdio.h>
#include "app_util.h"
#include "app_util_platform.h"
#include "boards.h"
#include "nrf_error.h"
#include "nrf_gpio.h"
#include "nrf_drv_gpiote.h"
#include "ser_config.h"
#include "ser_config_5W_app.h"
#include "ser_phy.h"
#include "ser_phy_config_app.h"
#include "spi_5W_master.h"
#include "ser_phy_debug_app.h"
#include "app_error.h"
#define notUSE_PendSV
#ifdef USE_PendSV
#define SW_IRQn PendSV_IRQn
#define SW_IRQ_Handler() PendSV_Handler()
#define SET_Pend_SW_IRQ() SCB->ICSR = SCB->ICSR | SCB_ICSR_PENDSVSET_Msk //NVIC_SetPendingIRQ(PendSV_IRQn) - PendSV_IRQn is a negative - does not work with CMSIS
#else
#define SW_IRQn SWI3_IRQn
#define SW_IRQ_Handler() SWI3_IRQHandler()
#define SET_Pend_SW_IRQ() NVIC_SetPendingIRQ(SWI3_IRQn)
#endif
#define SER_PHY_SPI_5W_MTU_SIZE SER_PHY_SPI_MTU_SIZE
typedef enum
{
SER_PHY_STATE_IDLE = 0,
SER_PHY_STATE_TX_HEADER,
SER_PHY_STATE_TX_WAIT_FOR_RDY,
SER_PHY_STATE_TX_PAYLOAD,
SER_PHY_STATE_RX_WAIT_FOR_RDY,
SER_PHY_STATE_TX_ZERO_HEADER,
SER_PHY_STATE_RX_HEADER,
SER_PHY_STATE_MEMORY_REQUEST,
SER_PHY_STATE_RX_PAYLOAD,
SER_PHY_STATE_DISABLED
} ser_phy_spi_master_state_t;
typedef enum
{
SER_PHY_EVT_GPIO_RDY = 0,
SER_PHY_EVT_GPIO_REQ,
SER_PHY_EVT_SPI_TRANSFER_DONE,
SER_PHY_EVT_TX_API_CALL,
SER_PHY_EVT_RX_API_CALL
} ser_phy_event_source_t;
#define _static static
_static uint8_t * mp_tx_buffer = NULL;
_static uint16_t m_tx_buf_len = 0;
_static uint8_t * mp_rx_buffer = NULL;
_static uint16_t m_rx_buf_len = 0;
_static uint8_t m_recv_buffer[SER_PHY_SPI_5W_MTU_SIZE];
_static uint8_t m_len_buffer[SER_PHY_HEADER_SIZE + 1] = { 0 }; //len is asymmetric for 5W, there is a 1 byte guard when receiving
_static uint16_t m_tx_packet_length = 0;
_static uint16_t m_accumulated_tx_packet_length = 0;
_static uint16_t m_current_tx_packet_length = 0;
_static uint16_t m_rx_packet_length = 0;
_static uint16_t m_accumulated_rx_packet_length = 0;
_static uint16_t m_current_rx_packet_length = 0;
_static volatile bool m_pend_req_flag = 0;
_static volatile bool m_pend_rdy_flag = 0;
_static volatile bool m_pend_xfer_flag = 0;
_static volatile bool m_pend_rx_api_flag = 0;
_static volatile bool m_pend_tx_api_flag = 0;
_static volatile bool m_slave_ready_flag = false;
_static volatile bool m_slave_request_flag = false;
_static ser_phy_events_handler_t m_callback_events_handler = NULL;
_static ser_phy_spi_master_state_t m_spi_master_state = SER_PHY_STATE_DISABLED;
static void ser_phy_switch_state(ser_phy_event_source_t evt_src);
static void spi_master_raw_assert(bool cond)
{
APP_ERROR_CHECK_BOOL(cond);
}
void SW_IRQ_Handler()
{
if (m_pend_req_flag)
{
m_pend_req_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_REQUEST(0);
ser_phy_switch_state(SER_PHY_EVT_GPIO_REQ);
}
if (m_pend_rdy_flag)
{
m_pend_rdy_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_READY(0);
ser_phy_switch_state(SER_PHY_EVT_GPIO_RDY);
}
if (m_pend_xfer_flag)
{
m_pend_xfer_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(0);
ser_phy_switch_state(SER_PHY_EVT_SPI_TRANSFER_DONE);
}
if (m_pend_rx_api_flag)
{
m_pend_rx_api_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
ser_phy_switch_state(SER_PHY_EVT_RX_API_CALL);
}
if (m_pend_tx_api_flag)
{
m_pend_tx_api_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
ser_phy_switch_state(SER_PHY_EVT_TX_API_CALL);
}
}
#ifndef _SPI_5W_
static void ser_phy_spi_master_ready(nrf_drv_gpiote_pin_t pin,
nrf_gpiote_polarity_t action)
{
if (nrf_gpio_pin_read(pin) == 0)
{
m_slave_ready_flag = true;
m_pend_rdy_flag = true;
}
else
{
m_slave_ready_flag = false;
}
DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE((uint32_t) !m_slave_ready_flag);
SET_Pend_SW_IRQ();
}
#endif
static void ser_phy_spi_master_request(nrf_drv_gpiote_pin_t pin,
nrf_gpiote_polarity_t action)
{
if (nrf_gpio_pin_read(pin) == 0)
{
m_slave_request_flag = true;
m_pend_req_flag = true;
}
else
{
m_slave_request_flag = false;
}
DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE((uint32_t) !m_slave_request_flag);
SET_Pend_SW_IRQ();
}
/* Send event SER_PHY_EVT_TX_PKT_SENT */
static __INLINE void callback_packet_sent()
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_PKT_DROPPED */
static __INLINE void callback_packet_dropped()
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_PKT_RECEIVED */
static __INLINE void callback_packet_received()
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
event.evt_params.rx_pkt_received.p_buffer = mp_rx_buffer;
event.evt_params.rx_pkt_received.num_of_bytes = m_rx_buf_len;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_BUF_REQUEST */
static __INLINE void callback_mem_request()
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
event.evt_params.rx_buf_request.num_of_bytes = m_rx_buf_len;
m_callback_events_handler(event);
}
static __INLINE void copy_buff(uint8_t * const p_dest, uint8_t const * const p_src, uint16_t len)
{
uint16_t index;
for (index = 0; index < len; index++)
{
p_dest[index] = p_src[index];
}
return;
}
static __INLINE void buffer_release(uint8_t * * const pp_buffer, uint16_t * const p_buf_len)
{
*pp_buffer = NULL;
*p_buf_len = 0;
}
static uint16_t compute_current_packet_length(const uint16_t packet_length,
const uint16_t accumulated_packet_length)
{
uint16_t current_packet_length = packet_length - accumulated_packet_length;
if (current_packet_length > SER_PHY_SPI_5W_MTU_SIZE)
{
current_packet_length = SER_PHY_SPI_5W_MTU_SIZE;
}
return current_packet_length;
}
static __INLINE uint32_t header_send(const uint16_t length)
{
uint16_t buf_len_size = uint16_encode(length, m_len_buffer);
return spi_master_send_recv(SER_PHY_SPI_MASTER, m_len_buffer, buf_len_size, NULL, 0);
}
static __INLINE uint32_t frame_send()
{
uint32_t err_code;
m_current_tx_packet_length = compute_current_packet_length(m_tx_packet_length,
m_accumulated_tx_packet_length);
err_code =
spi_master_send_recv(SER_PHY_SPI_MASTER,
&mp_tx_buffer[m_accumulated_tx_packet_length],
m_current_tx_packet_length,
NULL,
0);
m_accumulated_tx_packet_length += m_current_tx_packet_length;
return err_code;
}
static __INLINE uint32_t header_get()
{
return spi_master_send_recv(SER_PHY_SPI_MASTER, NULL, 0, m_len_buffer, SER_PHY_HEADER_SIZE + 1); //add 0 byte guard when receiving
}
static __INLINE uint32_t frame_get()
{
uint32_t err_code;
m_current_rx_packet_length = compute_current_packet_length(m_rx_packet_length,
m_accumulated_rx_packet_length);
if (m_current_rx_packet_length < SER_PHY_SPI_5W_MTU_SIZE)
{
m_current_rx_packet_length++; //take into account guard byte when receiving
}
err_code = spi_master_send_recv(SER_PHY_SPI_MASTER,
NULL,
0,
m_recv_buffer,
m_current_rx_packet_length);
return err_code;
}
/**
* \brief Master driver main state machine
* Executed only in the context of PendSV_Handler()
* For UML graph, please refer to SDK documentation
*/
static void ser_phy_switch_state(ser_phy_event_source_t evt_src)
{
uint32_t err_code = NRF_SUCCESS;
static bool m_waitForReadyFlag = false; //local scheduling flag to defer RDY events
switch (m_spi_master_state)
{
case SER_PHY_STATE_IDLE:
if (evt_src == SER_PHY_EVT_GPIO_REQ)
{
m_waitForReadyFlag = false;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else if (evt_src == SER_PHY_EVT_TX_API_CALL)
{
spi_master_raw_assert(mp_tx_buffer != NULL); //api event with tx_buffer == NULL has no sense
m_waitForReadyFlag = false;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
err_code = header_send(m_tx_buf_len);
}
else
{
m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
}
}
break;
case SER_PHY_STATE_TX_WAIT_FOR_RDY:
if (evt_src == SER_PHY_EVT_GPIO_RDY)
{
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
err_code = header_send(m_tx_buf_len);
}
break;
case SER_PHY_STATE_RX_WAIT_FOR_RDY:
if (evt_src == SER_PHY_EVT_GPIO_RDY)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
break;
case SER_PHY_STATE_TX_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
m_tx_packet_length = m_tx_buf_len;
m_accumulated_tx_packet_length = 0;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
err_code = frame_send();
}
else
{
m_waitForReadyFlag = true;
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
{
m_waitForReadyFlag = false;
m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
err_code = frame_send();
}
break;
case SER_PHY_STATE_TX_PAYLOAD:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
if (m_accumulated_tx_packet_length < m_tx_packet_length)
{
if (m_slave_ready_flag)
{
err_code = frame_send();
}
else
{
m_waitForReadyFlag = true;
}
}
else
{
spi_master_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
//Release TX buffer
buffer_release(&mp_tx_buffer, &m_tx_buf_len);
callback_packet_sent();
if ( m_slave_request_flag)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else
{
m_spi_master_state = SER_PHY_STATE_IDLE; //m_Tx_buffer is NULL - have to wait for API event
}
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag )
{
m_waitForReadyFlag = false;
err_code = frame_send();
}
break;
case SER_PHY_STATE_TX_ZERO_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_RX_HEADER;
err_code = header_get();
}
else
{
m_waitForReadyFlag = true;
}
}
else if ( (evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
{
m_waitForReadyFlag = false;
m_spi_master_state = SER_PHY_STATE_RX_HEADER;
err_code = header_get();
}
break;
case SER_PHY_STATE_RX_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
m_spi_master_state = SER_PHY_STATE_MEMORY_REQUEST;
m_rx_buf_len = uint16_decode(&(m_len_buffer[1])); //skip guard when receiving
m_rx_packet_length = m_rx_buf_len;
callback_mem_request();
}
break;
case SER_PHY_STATE_MEMORY_REQUEST:
if (evt_src == SER_PHY_EVT_RX_API_CALL)
{
m_accumulated_rx_packet_length = 0;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
err_code = frame_get();
}
else
{
m_waitForReadyFlag = true;
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
{
m_waitForReadyFlag = false;
m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
err_code = frame_get();
}
break;
case SER_PHY_STATE_RX_PAYLOAD:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
if (mp_rx_buffer)
{
copy_buff(&(mp_rx_buffer[m_accumulated_rx_packet_length]),
&(m_recv_buffer[1]),
m_current_rx_packet_length - 1); //skip guard byte when receiving
}
m_accumulated_rx_packet_length += (m_current_rx_packet_length - 1);
if (m_accumulated_rx_packet_length < m_rx_packet_length)
{
if (m_slave_ready_flag)
{
err_code = frame_get();
}
else
{
m_waitForReadyFlag = true;
}
}
else
{
spi_master_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
if (mp_rx_buffer == NULL)
{
callback_packet_dropped();
}
else
{
callback_packet_received();
}
//Release RX buffer
buffer_release(&mp_rx_buffer, &m_rx_buf_len);
if ((mp_tx_buffer != NULL)) //mp_tx_buffer !=NULL, this means that API_EVT was scheduled
{
if (m_slave_ready_flag )
{
err_code = header_send(m_tx_buf_len);
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
}
else
{
m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
}
}
else if (m_slave_request_flag)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else
{
m_spi_master_state = SER_PHY_STATE_IDLE;
}
}
}
else if ( evt_src == SER_PHY_EVT_GPIO_RDY && m_waitForReadyFlag)
{
m_waitForReadyFlag = false;
err_code = frame_get();
}
break;
default:
break;
}
if (err_code != NRF_SUCCESS)
{
(void)err_code;
}
}
/* SPI master event handler */
static void ser_phy_spi_master_event_handler(spi_master_evt_t spi_master_evt)
{
switch (spi_master_evt.type)
{
case SPI_MASTER_EVT_TRANSFER_COMPLETED:
/* Switch state */
m_pend_xfer_flag = true;
SET_Pend_SW_IRQ();
break;
default:
break;
}
}
static void ser_phy_init_pendSV(void)
{
NVIC_SetPriority(SW_IRQn, APP_IRQ_PRIORITY_MID);
NVIC_EnableIRQ(SW_IRQn);
}
static void ser_phy_init_gpiote(void)
{
if (!nrf_drv_gpiote_is_init())
{
(void)nrf_drv_gpiote_init();
}
NVIC_SetPriority(GPIOTE_IRQn, APP_IRQ_PRIORITY_HIGH);
nrf_drv_gpiote_in_config_t config = GPIOTE_CONFIG_IN_SENSE_TOGGLE(true);
/* Enable pullup to ensure high state while connectivity device is reset */
config.pull = NRF_GPIO_PIN_PULLUP;
(void)nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST, &config,
ser_phy_spi_master_request);
nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,true);
m_slave_request_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST));
#ifdef _SPI_5W_
m_slave_ready_flag = true;
#else
(void)nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_READY, &config,
ser_phy_spi_master_ready);
nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,true);
m_slave_ready_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_READY));
#endif
NVIC_ClearPendingIRQ(SW_IRQn);
}
static void ser_phy_deinit_gpiote(void)
{
nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST);
#ifndef _SPI_5W_
nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_READY);
#endif
}
/* ser_phy API function */
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
if (p_buffer == NULL)
{
return NRF_ERROR_NULL;
}
if (num_of_bytes == 0)
{
return NRF_ERROR_INVALID_PARAM;
}
if (mp_tx_buffer != NULL)
{
return NRF_ERROR_BUSY;
}
//ser_phy_interrupts_disable();
CRITICAL_REGION_ENTER();
mp_tx_buffer = (uint8_t *)p_buffer;
m_tx_buf_len = num_of_bytes;
m_pend_tx_api_flag = true;
SET_Pend_SW_IRQ();
//ser_phy_interrupts_enable();
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* ser_phy API function */
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
if (m_spi_master_state != SER_PHY_STATE_MEMORY_REQUEST)
{
return NRF_ERROR_INVALID_STATE;
}
//ser_phy_interrupts_disable();
CRITICAL_REGION_ENTER();
mp_rx_buffer = p_buffer;
m_pend_rx_api_flag = true;
SET_Pend_SW_IRQ();
//ser_phy_interrupts_enable();
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* ser_phy API function */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
if (m_spi_master_state != SER_PHY_STATE_DISABLED)
{
return NRF_ERROR_INVALID_STATE;
}
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
uint32_t err_code = NRF_SUCCESS;
m_spi_master_state = SER_PHY_STATE_IDLE;
m_callback_events_handler = events_handler;
ser_phy_init_gpiote();
/* Configure SPI Master driver */
spi_master_config_t spi_master_config;
spi_master_config.SPI_Freq = SPI_FREQUENCY_FREQUENCY_M1;
spi_master_config.SPI_Pin_SCK = SER_PHY_SPI_MASTER_PIN_SCK;
spi_master_config.SPI_Pin_MISO = SER_PHY_SPI_MASTER_PIN_MISO;
spi_master_config.SPI_Pin_MOSI = SER_PHY_SPI_MASTER_PIN_MOSI;
spi_master_config.SPI_Pin_SS = SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT;
spi_master_config.SPI_ORDER = SPI_CONFIG_ORDER_LsbFirst;
spi_master_config.SPI_CPOL = SPI_CONFIG_CPOL_ActiveHigh;
spi_master_config.SPI_CPHA = SPI_CONFIG_CPHA_Leading;
err_code = spi_master_open(SER_PHY_SPI_MASTER, &spi_master_config);
if (err_code != NRF_SUCCESS)
{
return err_code;
}
#ifdef _SPI_5W_
spi_5W_master_evt_handler_reg(SER_PHY_SPI_MASTER, ser_phy_spi_master_event_handler);
#else
spi_master_evt_handler_reg(SER_PHY_SPI_MASTER, ser_phy_spi_master_event_handler);
#endif
ser_phy_init_pendSV();
return err_code;
}
/* ser_phy API function */
void ser_phy_close(void)
{
m_spi_master_state = SER_PHY_STATE_DISABLED;
m_callback_events_handler = NULL;
buffer_release(&mp_tx_buffer, &m_tx_buf_len);
buffer_release(&mp_rx_buffer, &m_rx_buf_len);
m_tx_packet_length = 0;
m_accumulated_tx_packet_length = 0;
m_current_tx_packet_length = 0;
m_rx_packet_length = 0;
m_accumulated_rx_packet_length = 0;
m_current_rx_packet_length = 0;
ser_phy_deinit_gpiote();
spi_master_close(SER_PHY_SPI_MASTER);
}
/* ser_phy API function */
void ser_phy_interrupts_enable(void)
{
NVIC_EnableIRQ(SW_IRQn);
}
/* ser_phy API function */
void ser_phy_interrupts_disable(void)
{
NVIC_DisableIRQ(SW_IRQn);
}
#ifdef SER_PHY_DEBUG_APP_ENABLE
static spi_master_raw_callback_t m_spi_master_raw_evt_callback;
void debug_evt(spi_master_raw_evt_type_t evt, uint32_t data)
{
if (m_spi_master_raw_evt_callback)
{
spi_master_raw_evt_t e;
e.evt = evt;
e.data = data;
m_spi_master_raw_evt_callback(e);
}
}
void debug_init(spi_master_raw_callback_t spi_master_raw_evt_callback)
{
m_spi_master_raw_evt_callback = spi_master_raw_evt_callback;
}
#endif
/** @} */

View File

@@ -0,0 +1,644 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup ser_phy_spi_5W_phy_driver_slave ser_phy_nrf51_spi_5W_slave.c
* @{
* @ingroup ser_phy_spi_5W_phy_driver_slave
*
* @brief SPI_5W_RAW PHY slave driver.
*/
#include <stddef.h>
#include <string.h>
#include "boards.h"
#include "nrf_drv_spis.h"
#include "ser_phy.h"
#include "ser_config.h"
#include "nrf_gpio.h"
#include "nrf_gpiote.h"
#include "nrf_soc.h"
#include "app_error.h"
#include "app_util.h"
#include "ser_phy_config_conn.h"
#include "ser_phy_debug_conn.h"
#include "app_error.h"
#define _static static
#define SER_PHY_SPI_5W_MTU_SIZE SER_PHY_SPI_MTU_SIZE
#define SER_PHY_SPI_DEF_CHARACTER 0xFF //SPI default character. Character clocked out in case of an ignored transaction
#define SER_PHY_SPI_ORC_CHARACTER 0xFF //SPI over-read character. Character clocked out after an over-read of the transmit buffer
static nrf_drv_spis_t m_spis = NRF_DRV_SPIS_INSTANCE(SER_PHY_SPI_SLAVE_INSTANCE);
#define _SPI_5W_
//SPI raw peripheral device configuration data
typedef struct
{
int32_t pin_req; //SPI /REQ pin. -1 for not using
int32_t pin_rdy; //SPI /RDY pin. -1 for not using
int32_t ppi_rdy_ch; //SPI /RDY ppi ready channel
int32_t gpiote_rdy_ch; //SPI /RDY pin ready channel
} spi_slave_raw_trasp_cfg_t;
/**@brief States of the SPI transaction state machine. */
typedef enum
{
SPI_RAW_STATE_UNKNOWN,
SPI_RAW_STATE_SETUP_HEADER,
SPI_RAW_STATE_RX_HEADER,
SPI_RAW_STATE_MEM_REQUESTED,
SPI_RAW_STATE_RX_PAYLOAD,
SPI_RAW_STATE_TX_HEADER,
SPI_RAW_STATE_TX_PAYLOAD,
} trans_state_t;
_static spi_slave_raw_trasp_cfg_t m_spi_slave_raw_config;
_static uint16_t m_accumulated_rx_packet_length;
_static uint16_t m_rx_packet_length;
_static uint16_t m_current_rx_frame_length;
_static uint16_t m_accumulated_tx_packet_length;
_static uint16_t m_tx_packet_length;
_static uint16_t m_current_tx_frame_length;
_static uint8_t m_header_rx_buffer[SER_PHY_HEADER_SIZE + 1]; // + 1 for '0' guard in SPI_5W
_static uint8_t m_header_tx_buffer[SER_PHY_HEADER_SIZE + 1]; // + 1 for '0' guard in SPI_5W
_static uint8_t m_tx_frame_buffer[SER_PHY_SPI_5W_MTU_SIZE];
_static uint8_t m_rx_frame_buffer[SER_PHY_SPI_5W_MTU_SIZE];
_static uint8_t m_zero_buff[SER_PHY_SPI_5W_MTU_SIZE] = { 0 }; //ROM'able declaration - all guard bytes
_static uint8_t * volatile m_p_rx_buffer = NULL;
_static const uint8_t * volatile m_p_tx_buffer = NULL;
_static bool m_trash_payload_flag;
_static bool m_buffer_reqested_flag;
_static trans_state_t m_trans_state = SPI_RAW_STATE_UNKNOWN;
_static ser_phy_events_handler_t m_ser_phy_callback = NULL;
static void spi_slave_raw_assert(bool cond)
{
APP_ERROR_CHECK_BOOL(cond);
}
static void callback_ser_phy_event(ser_phy_evt_t event)
{
if (m_ser_phy_callback)
{
m_ser_phy_callback(event);
}
return;
}
static void callback_memory_request(uint16_t size)
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
event.evt_params.rx_buf_request.num_of_bytes = size;
callback_ser_phy_event(event);
return;
}
static void callback_packet_received(uint8_t * pBuffer, uint16_t size)
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
event.evt_params.rx_pkt_received.num_of_bytes = size;
event.evt_params.rx_pkt_received.p_buffer = pBuffer;
callback_ser_phy_event(event);
return;
}
static void callback_packet_dropped()
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
callback_ser_phy_event(event);
return;
}
static void callback_packet_transmitted(void)
{
ser_phy_evt_t event;
event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
callback_ser_phy_event(event);
return;
}
static void copy_buff(uint8_t * const p_dest, uint8_t const * const p_src, uint16_t len)
{
uint16_t index;
for (index = 0; index < len; index++)
{
p_dest[index] = p_src[index];
}
return;
}
/* Function computes current packet length */
static uint16_t compute_current_frame_length(const uint16_t packet_length,
const uint16_t accumulated_packet_length)
{
uint16_t current_packet_length = packet_length - accumulated_packet_length;
if (current_packet_length > SER_PHY_SPI_5W_MTU_SIZE)
{
current_packet_length = SER_PHY_SPI_5W_MTU_SIZE;
}
return current_packet_length;
}
static uint32_t header_get()
{
uint32_t err_code;
err_code = nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buff,
SER_PHY_HEADER_SIZE,
m_header_rx_buffer,
SER_PHY_HEADER_SIZE);
return err_code;
}
static uint32_t frame_get()
{
uint32_t err_code;
m_current_rx_frame_length = compute_current_frame_length(m_rx_packet_length,
m_accumulated_rx_packet_length);
if (!m_trash_payload_flag)
{
err_code =
nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buff,
m_current_rx_frame_length,
&(m_p_rx_buffer[m_accumulated_rx_packet_length]),
m_current_rx_frame_length);
}
else
{
err_code = nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buff,
m_current_rx_frame_length,
m_rx_frame_buffer,
m_current_rx_frame_length);
}
return err_code;
}
static uint32_t header_send(uint16_t len)
{
uint32_t err_code;
m_header_tx_buffer[0] = (uint8_t) 0; //this is guard byte
(void)uint16_encode(len, &(m_header_tx_buffer[1]));
err_code = nrf_drv_spis_buffers_set(&m_spis,
m_header_tx_buffer,
SER_PHY_HEADER_SIZE + 1,
m_header_rx_buffer,
SER_PHY_HEADER_SIZE + 1);
return err_code;
}
static uint32_t frame_send()
{
uint32_t err_code;
m_current_tx_frame_length = compute_current_frame_length(m_tx_packet_length,
m_accumulated_tx_packet_length);
if (m_current_tx_frame_length == SER_PHY_SPI_5W_MTU_SIZE)
{
m_current_tx_frame_length -= 1; //extra space for guard byte must be taken into account for MTU
}
m_tx_frame_buffer[0] = 0; //guard byte
copy_buff(&(m_tx_frame_buffer[1]),
&(m_p_tx_buffer[m_accumulated_tx_packet_length]),
m_current_tx_frame_length);
err_code = nrf_drv_spis_buffers_set(&m_spis,
m_tx_frame_buffer,
m_current_tx_frame_length + 1,
m_rx_frame_buffer,
m_current_tx_frame_length + 1);
return err_code;
}
static void set_ready_line(void)
{
#ifndef _SPI_5W_
//toggle - this should go high - but toggle is unsafe
uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.gpiote_rdy_ch);
*(uint32_t *)rdy_task = 1;
#endif
return;
}
static void set_request_line(void)
{
//active low logic - set is 0
nrf_gpio_pin_clear(m_spi_slave_raw_config.pin_req);
DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
return;
}
static void clear_request_line(void)
{
//active low logic - clear is 1
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(0);
return;
}
/**
* \brief Slave driver main state machine
* For UML graph, please refer to SDK documentation
*/
static void spi_slave_event_handle(nrf_drv_spis_event_t event)
{
static uint32_t err_code = NRF_SUCCESS;
static uint16_t packetLength;
switch (m_trans_state)
{
case SPI_RAW_STATE_SETUP_HEADER:
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
break;
case SPI_RAW_STATE_RX_HEADER:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
spi_slave_raw_assert(event.rx_amount == SER_PHY_HEADER_SIZE);
packetLength = uint16_decode(m_header_rx_buffer);
if (packetLength != 0 )
{
m_trans_state = SPI_RAW_STATE_MEM_REQUESTED;
m_buffer_reqested_flag = true;
m_rx_packet_length = packetLength;
callback_memory_request(packetLength);
}
else
{
if (m_p_tx_buffer)
{
clear_request_line();
m_trans_state = SPI_RAW_STATE_TX_HEADER;
err_code = header_send(m_tx_packet_length);
}
else
{
//there is nothing to send - zero response facilitates pooling - but perhaps, it should be assert
err_code = header_send(0);
}
}
}
break;
case SPI_RAW_STATE_MEM_REQUESTED:
if (event.evt_type == NRF_DRV_SPIS_EVT_TYPE_MAX) //This is API dummy event
{
m_buffer_reqested_flag = false;
m_trans_state = SPI_RAW_STATE_RX_PAYLOAD;
m_accumulated_rx_packet_length = 0;
err_code = frame_get();
}
break;
case SPI_RAW_STATE_RX_PAYLOAD:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
spi_slave_raw_assert(event.rx_amount == m_current_rx_frame_length);
m_accumulated_rx_packet_length += m_current_rx_frame_length;
if (m_accumulated_rx_packet_length < m_rx_packet_length )
{
err_code = frame_get();
}
else
{
spi_slave_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
if (!m_trash_payload_flag)
{
callback_packet_received(m_p_rx_buffer, m_accumulated_rx_packet_length);
}
else
{
callback_packet_dropped();
}
}
}
break;
case SPI_RAW_STATE_TX_HEADER:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
spi_slave_raw_assert(event.tx_amount == SER_PHY_HEADER_SIZE + 1);
m_trans_state = SPI_RAW_STATE_TX_PAYLOAD;
m_accumulated_tx_packet_length = 0;
err_code = frame_send();
}
break;
case SPI_RAW_STATE_TX_PAYLOAD:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
spi_slave_raw_assert(event.tx_amount == m_current_tx_frame_length + 1);
m_accumulated_tx_packet_length += m_current_tx_frame_length;
if ( m_accumulated_tx_packet_length < m_tx_packet_length )
{
err_code = frame_send();
}
else
{
spi_slave_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
//clear pointer before callback
m_p_tx_buffer = NULL;
callback_packet_transmitted();
//spi slave TX transfer is possible only when RX is ready, so return to waiting for a header
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
}
}
break;
default:
err_code = NRF_ERROR_INVALID_STATE;
break;
}
APP_ERROR_CHECK(err_code);
}
#ifndef _SPI_5W_
static void spi_slave_gpiote_init(void)
{
if (!nrf_drv_gpiote_is_init())
{
(void)nrf_drv_gpiote_init();
}
nrf_drv_gpiote_out_config_t config = GPIOTE_CONFIG_OUT_TASK_TOGGLE(true);
(void)nrf_drv_gpiote_out_init(m_spi_slave_raw_config.gpiote_rdy_ch, &config);
return;
}
static void spi_slave_ppi_init(void)
{
uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.gpiote_rdy_ch);
//Configure PPI channel to clear /RDY line
NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].EEP = (uint32_t)(&NRF_SPIS1->EVENTS_END);
NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].TEP = rdy_task;
//this works only for channels 0..15 - but soft device is using 8-15 anyway
NRF_PPI->CHEN |= (1 << m_spi_slave_raw_config.ppi_rdy_ch);
return;
}
#endif
static void spi_slave_gpio_init(void)
{
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_req);
#ifndef _SPI_5W_
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_rdy);
nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_rdy);
#endif
return;
}
/* ser_phy API function */
void ser_phy_interrupts_enable(void)
{
NVIC_EnableIRQ(nrfx_get_irq_number(m_spis.p_reg));
}
/* ser_phy API function */
void ser_phy_interrupts_disable(void)
{
NVIC_DisableIRQ(nrfx_get_irq_number(m_spis.p_reg));
}
/* ser_phy API function */
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
uint32_t status = NRF_SUCCESS;
nrf_drv_spis_event_t event;
ser_phy_interrupts_disable();
if (m_buffer_reqested_flag && (m_trans_state == SPI_RAW_STATE_MEM_REQUESTED))
{
m_p_rx_buffer = p_buffer;
if (m_p_rx_buffer)
{
m_trash_payload_flag = false;
}
else
{
m_trash_payload_flag = true;
}
event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition with dummy event
event.rx_amount = 0;
event.tx_amount = 0;
spi_slave_event_handle(event);
}
else
{
status = NRF_ERROR_BUSY;
}
ser_phy_interrupts_enable();
return status;
}
/* ser_phy API function */
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
uint32_t status = NRF_SUCCESS;
if ( p_buffer == NULL || num_of_bytes == 0)
{
return NRF_ERROR_NULL;
}
ser_phy_interrupts_disable();
if ( m_p_tx_buffer == NULL)
{
m_tx_packet_length = num_of_bytes;
m_p_tx_buffer = p_buffer;
set_request_line();
}
else
{
status = NRF_ERROR_BUSY;
}
ser_phy_interrupts_enable();
return status;
}
/* ser_phy API function */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
uint32_t err_code;
nrf_drv_spis_config_t spi_slave_config;
nrf_drv_spis_event_t event;
if (m_trans_state != SPI_RAW_STATE_UNKNOWN)
{
return NRF_ERROR_INVALID_STATE;
}
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
//one ppi channel and one gpiote channel are used to drive RDY line
m_spi_slave_raw_config.pin_req = SER_PHY_SPI_SLAVE_REQ_PIN;
m_spi_slave_raw_config.pin_rdy = SER_PHY_SPI_SLAVE_RDY_PIN;
m_spi_slave_raw_config.ppi_rdy_ch = SER_PHY_SPI_PPI_RDY_CH;
m_spi_slave_raw_config.gpiote_rdy_ch = SER_PHY_SPI_GPIOTE_RDY_CH;
spi_slave_gpio_init();
#ifndef _SPI_5W_
spi_slave_gpiote_init();
spi_slave_ppi_init();
#endif
spi_slave_config.miso_pin = SER_CON_SPIS_MISO_PIN;
spi_slave_config.mosi_pin = SER_CON_SPIS_MOSI_PIN;
spi_slave_config.sck_pin = SER_CON_SPIS_SCK_PIN;
spi_slave_config.csn_pin = SER_CON_SPIS_CSN_PIN;
spi_slave_config.mode = NRF_DRV_SPIS_MODE_0;
spi_slave_config.bit_order = NRF_DRV_SPIS_BIT_ORDER_LSB_FIRST;
spi_slave_config.def = SER_PHY_SPI_DEF_CHARACTER;
spi_slave_config.orc = SER_PHY_SPI_ORC_CHARACTER;
spi_slave_config.csn_pullup = NRF_GPIO_PIN_PULLUP;
spi_slave_config.irq_priority = APP_IRQ_PRIORITY_LOWEST;
//keep /CS high when init
nrf_gpio_cfg_input(spi_slave_config.csn_pin, NRF_GPIO_PIN_PULLUP);
err_code = nrf_drv_spis_init(&m_spis, &spi_slave_config, spi_slave_event_handle);
APP_ERROR_CHECK(err_code);
if (err_code == NRF_SUCCESS)
{
m_ser_phy_callback = events_handler;
m_trans_state = SPI_RAW_STATE_SETUP_HEADER;
event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition for dummy event
event.rx_amount = 0;
event.tx_amount = 0;
spi_slave_event_handle(event);
}
return err_code;
}
/* ser_phy API function */
void ser_phy_close(void)
{
nrf_drv_spis_uninit(&m_spis);
m_ser_phy_callback = NULL;
m_trans_state = SPI_RAW_STATE_UNKNOWN;
}

View File

@@ -0,0 +1,804 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup ser_phy_spi_phy_driver_master ser_phy_nrf51_spi_master.c
* @{
* @ingroup ser_phy_spi_phy_driver_master
*
* @brief SPI_RAW PHY master driver.
*/
#include <stdio.h>
#include "nrf_drv_gpiote.h"
#include "nrf_drv_spi.h"
#include "ser_phy.h"
#include "ser_config.h"
#include "app_util.h"
#include "app_util_platform.h"
#include "app_error.h"
#include "nrf_error.h"
#include "nrf_gpio.h"
#include "nrf_gpiote.h"
#include "boards.h"
#include "app_error.h"
#include "ser_phy_config_app.h"
#include "ser_phy_debug_app.h"
#define notUSE_PendSV
#ifdef USE_PendSV
#define SW_IRQn PendSV_IRQn
#define SW_IRQ_Handler() PendSV_Handler()
#define SET_Pend_SW_IRQ() SCB->ICSR = SCB->ICSR | SCB_ICSR_PENDSVSET_Msk //NVIC_SetPendingIRQ(PendSV_IRQn) - PendSV_IRQn is a negative - does not work with CMSIS
#else
#define SW_IRQn SWI3_IRQn
#define SW_IRQ_Handler() SWI3_IRQHandler()
#define SET_Pend_SW_IRQ() NVIC_SetPendingIRQ(SWI3_IRQn)
#endif /* USE_PendSV */
typedef enum
{
SER_PHY_STATE_IDLE = 0,
SER_PHY_STATE_TX_HEADER,
SER_PHY_STATE_TX_WAIT_FOR_RDY,
SER_PHY_STATE_TX_PAYLOAD,
SER_PHY_STATE_RX_WAIT_FOR_RDY,
SER_PHY_STATE_TX_ZERO_HEADER,
SER_PHY_STATE_RX_HEADER,
SER_PHY_STATE_MEMORY_REQUEST,
SER_PHY_STATE_RX_PAYLOAD,
SER_PHY_STATE_DISABLED
} ser_phy_spi_master_state_t;
typedef enum
{
SER_PHY_EVT_GPIO_RDY = 0,
SER_PHY_EVT_GPIO_REQ,
SER_PHY_EVT_SPI_TRANSFER_DONE,
SER_PHY_EVT_TX_API_CALL,
SER_PHY_EVT_RX_API_CALL
} ser_phy_event_source_t;
#define _static static
_static uint8_t * mp_tx_buffer = NULL;
_static uint16_t m_tx_buf_len = 0;
_static uint8_t * mp_rx_buffer = NULL;
_static uint16_t m_rx_buf_len = 0;
_static uint8_t m_frame_buffer[SER_PHY_SPI_MTU_SIZE];
_static uint8_t m_header_buffer[SER_PHY_HEADER_SIZE] = { 0 };
_static uint16_t m_tx_packet_length = 0;
_static uint16_t m_accumulated_tx_packet_length = 0;
_static uint16_t m_current_tx_packet_length = 0;
_static uint16_t m_rx_packet_length = 0;
_static uint16_t m_accumulated_rx_packet_length = 0;
_static uint16_t m_current_rx_packet_length = 0;
_static volatile bool m_pend_req_flag = 0;
_static volatile bool m_pend_rdy_flag = 0;
_static volatile bool m_pend_xfer_flag = 0;
_static volatile bool m_pend_rx_api_flag = 0;
_static volatile bool m_pend_tx_api_flag = 0;
_static volatile bool m_slave_ready_flag = false;
_static volatile bool m_slave_request_flag = false;
_static ser_phy_events_handler_t m_callback_events_handler = NULL;
_static ser_phy_spi_master_state_t m_spi_master_state = SER_PHY_STATE_DISABLED;
_static const nrf_drv_spi_t m_spi_master = SER_PHY_SPI_MASTER_INSTANCE;
static void ser_phy_switch_state(ser_phy_event_source_t evt_src);
static void spi_master_raw_assert(bool cond)
{
APP_ERROR_CHECK_BOOL(cond);
}
void SW_IRQ_Handler()
{
if (m_pend_req_flag)
{
m_pend_req_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_REQUEST(0);
ser_phy_switch_state(SER_PHY_EVT_GPIO_REQ);
}
if (m_pend_rdy_flag)
{
m_pend_rdy_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_READY(0);
ser_phy_switch_state(SER_PHY_EVT_GPIO_RDY);
}
if (m_pend_xfer_flag)
{
m_pend_xfer_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(0);
ser_phy_switch_state(SER_PHY_EVT_SPI_TRANSFER_DONE);
}
if (m_pend_rx_api_flag)
{
m_pend_rx_api_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
ser_phy_switch_state(SER_PHY_EVT_RX_API_CALL);
}
if (m_pend_tx_api_flag)
{
m_pend_tx_api_flag = false;
DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
ser_phy_switch_state(SER_PHY_EVT_TX_API_CALL);
}
}
static void ser_phy_spi_master_ready(nrf_drv_gpiote_pin_t pin,
nrf_gpiote_polarity_t action)
{
if (nrf_gpio_pin_read(pin) == 0)
{
m_slave_ready_flag = true;
m_pend_rdy_flag = true;
}
else
{
m_slave_ready_flag = false;
}
DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE((uint32_t) !m_slave_ready_flag);
SET_Pend_SW_IRQ();
}
static void ser_phy_spi_master_request(nrf_drv_gpiote_pin_t pin,
nrf_gpiote_polarity_t action)
{
if (nrf_gpio_pin_read(pin) == 0)
{
m_slave_request_flag = true;
m_pend_req_flag = true;
}
else
{
m_slave_request_flag = false;
}
DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE((uint32_t) !m_slave_request_flag);
SET_Pend_SW_IRQ();
}
/* Send event SER_PHY_EVT_TX_PKT_SENT */
static __INLINE void callback_packet_sent()
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(0);
event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_PKT_DROPPED */
static __INLINE void callback_packet_dropped()
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_PKT_RECEIVED */
static __INLINE void callback_packet_received()
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
event.evt_params.rx_pkt_received.p_buffer = mp_rx_buffer;
event.evt_params.rx_pkt_received.num_of_bytes = m_rx_buf_len;
m_callback_events_handler(event);
}
/* Send event SER_PHY_EVT_RX_BUF_REQUEST */
static __INLINE void callback_mem_request()
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(0);
event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
event.evt_params.rx_buf_request.num_of_bytes = m_rx_buf_len;
m_callback_events_handler(event);
}
/* Release buffer */
static __INLINE void buffer_release(uint8_t * * const pp_buffer,
uint16_t * const p_buf_len)
{
*pp_buffer = NULL;
*p_buf_len = 0;
}
/* Function computes current packet length */
static uint16_t compute_current_packet_length(const uint16_t packet_length,
const uint16_t accumulated_packet_length)
{
uint16_t current_packet_length = packet_length - accumulated_packet_length;
if (current_packet_length > SER_PHY_SPI_MTU_SIZE)
{
current_packet_length = SER_PHY_SPI_MTU_SIZE;
}
return current_packet_length;
}
static __INLINE uint32_t header_send(const uint16_t length)
{
uint8_t buf_len_size = uint16_encode(length, m_header_buffer);
return nrf_drv_spi_transfer(&m_spi_master, m_header_buffer, buf_len_size, NULL, 0);
}
static __INLINE uint32_t frame_send()
{
uint32_t err_code;
m_current_tx_packet_length = compute_current_packet_length(m_tx_packet_length,
m_accumulated_tx_packet_length);
err_code =
nrf_drv_spi_transfer(&m_spi_master,
&mp_tx_buffer[m_accumulated_tx_packet_length],
m_current_tx_packet_length,
NULL,
0);
m_accumulated_tx_packet_length += m_current_tx_packet_length;
return err_code;
}
static __INLINE uint32_t header_get()
{
return nrf_drv_spi_transfer(&m_spi_master, NULL, 0, m_header_buffer, SER_PHY_HEADER_SIZE);
}
static __INLINE uint32_t frame_get()
{
uint32_t err_code;
m_current_rx_packet_length = compute_current_packet_length(m_rx_packet_length,
m_accumulated_rx_packet_length);
if (mp_rx_buffer)
{
err_code = nrf_drv_spi_transfer(&m_spi_master,
NULL,
0,
&(mp_rx_buffer[m_accumulated_rx_packet_length]),
m_current_rx_packet_length);
}
else
{
err_code = nrf_drv_spi_transfer(&m_spi_master,
NULL,
0,
m_frame_buffer,
m_current_rx_packet_length);
}
return err_code;
}
/**
* \brief Master driver main state machine
* Executed only in the context of PendSV_Handler()
* For UML graph, please refer to SDK documentation
*/
static void ser_phy_switch_state(ser_phy_event_source_t evt_src)
{
uint32_t err_code = NRF_SUCCESS;
static bool m_wait_for_ready_flag = false; //local scheduling flag to defer RDY events
switch (m_spi_master_state)
{
case SER_PHY_STATE_IDLE:
if (evt_src == SER_PHY_EVT_GPIO_REQ)
{
m_wait_for_ready_flag = false;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else if (evt_src == SER_PHY_EVT_TX_API_CALL)
{
spi_master_raw_assert(mp_tx_buffer != NULL); //api event with tx_buffer == NULL has no sense
m_wait_for_ready_flag = false;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
err_code = header_send(m_tx_buf_len);
}
else
{
m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
}
}
break;
case SER_PHY_STATE_TX_WAIT_FOR_RDY:
if (evt_src == SER_PHY_EVT_GPIO_RDY)
{
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
err_code = header_send(m_tx_buf_len);
}
break;
case SER_PHY_STATE_RX_WAIT_FOR_RDY:
if (evt_src == SER_PHY_EVT_GPIO_RDY)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
break;
case SER_PHY_STATE_TX_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
m_tx_packet_length = m_tx_buf_len;
m_accumulated_tx_packet_length = 0;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
err_code = frame_send();
}
else
{
m_wait_for_ready_flag = true;
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
{
m_wait_for_ready_flag = false;
m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
err_code = frame_send();
}
break;
case SER_PHY_STATE_TX_PAYLOAD:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
if (m_accumulated_tx_packet_length < m_tx_packet_length)
{
if (m_slave_ready_flag)
{
err_code = frame_send();
}
else
{
m_wait_for_ready_flag = true;
}
}
else
{
spi_master_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
buffer_release(&mp_tx_buffer, &m_tx_buf_len);
callback_packet_sent();
if ( m_slave_request_flag)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else
{
m_spi_master_state = SER_PHY_STATE_IDLE; //m_Tx_buffer is NULL - have to wait for API event
}
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag )
{
m_wait_for_ready_flag = false;
err_code = frame_send();
}
break;
case SER_PHY_STATE_TX_ZERO_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_RX_HEADER;
err_code = header_get();
}
else
{
m_wait_for_ready_flag = true;
}
}
else if ( (evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
{
m_wait_for_ready_flag = false;
m_spi_master_state = SER_PHY_STATE_RX_HEADER;
err_code = header_get();
}
break;
case SER_PHY_STATE_RX_HEADER:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
m_spi_master_state = SER_PHY_STATE_MEMORY_REQUEST;
m_rx_buf_len = uint16_decode(m_header_buffer);
m_rx_packet_length = m_rx_buf_len;
callback_mem_request();
}
break;
case SER_PHY_STATE_MEMORY_REQUEST:
if (evt_src == SER_PHY_EVT_RX_API_CALL)
{
m_accumulated_rx_packet_length = 0;
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
err_code = frame_get();
}
else
{
m_wait_for_ready_flag = true;
}
}
else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
{
m_wait_for_ready_flag = false;
m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
err_code = frame_get();
}
break;
case SER_PHY_STATE_RX_PAYLOAD:
if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
{
m_accumulated_rx_packet_length += m_current_rx_packet_length;
if (m_accumulated_rx_packet_length < m_rx_packet_length)
{
if (m_slave_ready_flag)
{
err_code = frame_get();
}
else
{
m_wait_for_ready_flag = true;
}
}
else
{
spi_master_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
if (mp_rx_buffer == NULL)
{
callback_packet_dropped();
}
else
{
callback_packet_received();
}
buffer_release(&mp_rx_buffer, &m_rx_buf_len);
if (mp_tx_buffer != NULL) //mp_tx_buffer !=NULL, this means that API_EVT was scheduled
{
if (m_slave_ready_flag )
{
err_code = header_send(m_tx_buf_len);
m_spi_master_state = SER_PHY_STATE_TX_HEADER;
}
else
{
m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
}
}
else if (m_slave_request_flag)
{
if (m_slave_ready_flag)
{
m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
err_code = header_send(0);
}
else
{
m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
}
}
else
{
m_spi_master_state = SER_PHY_STATE_IDLE;
}
}
}
else if ( evt_src == SER_PHY_EVT_GPIO_RDY && m_wait_for_ready_flag)
{
m_wait_for_ready_flag = false;
err_code = frame_get();
}
break;
default:
break;
}
if (err_code != NRF_SUCCESS)
{
(void)err_code;
}
}
static void ser_phy_spi_master_event_handler(nrf_drv_spi_evt_t const * p_event,
void * p_context)
{
switch (p_event->type)
{
case NRF_DRV_SPI_EVENT_DONE:
/* Switch state */
m_pend_xfer_flag = true;
SET_Pend_SW_IRQ();
break;
default:
break;
}
}
static void ser_phy_init_PendSV(void)
{
NVIC_SetPriority(SW_IRQn, APP_IRQ_PRIORITY_MID);
NVIC_EnableIRQ(SW_IRQn);
}
static ret_code_t ser_phy_init_gpiote(void)
{
if (!nrf_drv_gpiote_is_init())
{
(void)nrf_drv_gpiote_init();
}
NVIC_SetPriority(GPIOTE_IRQn, APP_IRQ_PRIORITY_HIGH);
nrf_drv_gpiote_in_config_t config = GPIOTE_CONFIG_IN_SENSE_TOGGLE(true);
/* Enable pullup to ensure high state while connectivity device is reset */
config.pull = NRF_GPIO_PIN_PULLUP;
ret_code_t err_code = nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,
&config, ser_phy_spi_master_request);
if (err_code != NRF_SUCCESS)
{
return err_code;
}
nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,true);
err_code = nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,
&config, ser_phy_spi_master_ready);
if (err_code != NRF_SUCCESS)
{
return err_code;
}
nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,true);
m_slave_request_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST));
m_slave_ready_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_READY));
NVIC_ClearPendingIRQ(SW_IRQn);
return NRF_SUCCESS;
}
static void ser_phy_deinit_gpiote(void)
{
nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST);
nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_READY);
}
/* ser_phy API function */
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
if (p_buffer == NULL)
{
return NRF_ERROR_NULL;
}
if (num_of_bytes == 0)
{
return NRF_ERROR_INVALID_PARAM;
}
if (mp_tx_buffer != NULL)
{
return NRF_ERROR_BUSY;
}
//ser_phy_interrupts_disable();
CRITICAL_REGION_ENTER();
mp_tx_buffer = (uint8_t *)p_buffer;
m_tx_buf_len = num_of_bytes;
m_pend_tx_api_flag = true;
SET_Pend_SW_IRQ();
//ser_phy_interrupts_enable();
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* ser_phy API function */
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
if (m_spi_master_state != SER_PHY_STATE_MEMORY_REQUEST)
{
return NRF_ERROR_INVALID_STATE;
}
//ser_phy_interrupts_disable();
CRITICAL_REGION_ENTER();
mp_rx_buffer = p_buffer;
m_pend_rx_api_flag = true;
SET_Pend_SW_IRQ();
//ser_phy_interrupts_enable();
CRITICAL_REGION_EXIT();
return NRF_SUCCESS;
}
/* ser_phy API function */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
if (m_spi_master_state != SER_PHY_STATE_DISABLED)
{
return NRF_ERROR_INVALID_STATE;
}
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
uint32_t err_code = NRF_SUCCESS;
m_spi_master_state = SER_PHY_STATE_IDLE;
m_callback_events_handler = events_handler;
nrf_drv_spi_config_t spi_master_config = {
.sck_pin = SER_PHY_SPI_MASTER_PIN_SCK,
.mosi_pin = SER_PHY_SPI_MASTER_PIN_MOSI,
.miso_pin = SER_PHY_SPI_MASTER_PIN_MISO,
.ss_pin = SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT,
.irq_priority = APP_IRQ_PRIORITY_MID,
.orc = 0,
.frequency = SER_PHY_SPI_FREQUENCY,
.mode = NRF_DRV_SPI_MODE_0,
.bit_order = NRF_DRV_SPI_BIT_ORDER_LSB_FIRST,
};
err_code = nrf_drv_spi_init(&m_spi_master,
&spi_master_config,
ser_phy_spi_master_event_handler,
NULL);
if (err_code != NRF_SUCCESS)
{
return err_code;
}
err_code = ser_phy_init_gpiote();
ser_phy_init_PendSV();
return err_code;
}
/* ser_phy API function */
void ser_phy_close(void)
{
m_spi_master_state = SER_PHY_STATE_DISABLED;
m_callback_events_handler = NULL;
buffer_release(&mp_tx_buffer, &m_tx_buf_len);
buffer_release(&mp_rx_buffer, &m_rx_buf_len);
m_tx_packet_length = 0;
m_accumulated_tx_packet_length = 0;
m_current_tx_packet_length = 0;
m_rx_packet_length = 0;
m_accumulated_rx_packet_length = 0;
m_current_rx_packet_length = 0;
ser_phy_deinit_gpiote();
nrf_drv_spi_uninit(&m_spi_master);
}
/* ser_phy API function */
/* only PendSV may interact with ser_phy layer, other interrupts are internal */
void ser_phy_interrupts_enable(void)
{
NVIC_EnableIRQ(SW_IRQn);
}
/* ser_phy API function */
void ser_phy_interrupts_disable(void)
{
NVIC_DisableIRQ(SW_IRQn);
}
/** @} */

View File

@@ -0,0 +1,613 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup ser_phy_spi_phy_driver_slave ser_phy_nrf51_spi_slave.c
* @{
* @ingroup ser_phy_spi_phy_driver_slave
*
* @brief SPI_RAW PHY slave driver.
*/
#include <stddef.h>
#include <string.h>
#include "app_error.h"
#include "app_util.h"
#include "boards.h"
#include "nrf_gpio.h"
#include "nrf_drv_gpiote.h"
#include "nrf_soc.h"
#include "nrf_drv_spis.h"
#include "ser_config.h"
#include "ser_phy.h"
#include "ser_phy_config_conn.h"
#include "ser_phy_debug_conn.h"
#define SER_PHY_SPI_DEF_CHARACTER 0xFF //SPI default character. Character clocked out in case of an ignored transaction
#define SER_PHY_SPI_ORC_CHARACTER 0xFF //SPI over-read character. Character clocked out after an over-read of the transmit buffer
static nrf_drv_spis_t m_spis = NRF_DRV_SPIS_INSTANCE(SER_PHY_SPI_SLAVE_INSTANCE);
#ifdef NRF_SPIS0
#define SPI_SLAVE_REG NRF_SPIS0
#else
#define SPI_SLAVE_REG NRF_SPIS1
#endif
//SPI raw peripheral device configuration data
typedef struct
{
int32_t pin_req; //SPI /REQ pin. -1 for not using
int32_t pin_rdy; //SPI /RDY pin. -1 for not using
int32_t ppi_rdy_ch; //SPI /RDY ppi ready channel
int32_t gpiote_rdy_ch; //SPI /RDY pin ready channel
} spi_slave_raw_trasp_cfg_t;
/**@brief States of the SPI transaction state machine. */
typedef enum
{
SPI_RAW_STATE_UNKNOWN,
SPI_RAW_STATE_SETUP_HEADER,
SPI_RAW_STATE_RX_HEADER,
SPI_RAW_STATE_MEM_REQUESTED,
SPI_RAW_STATE_RX_PAYLOAD,
SPI_RAW_STATE_TX_HEADER,
SPI_RAW_STATE_TX_PAYLOAD,
} trans_state_t;
#define _static static
static spi_slave_raw_trasp_cfg_t m_spi_slave_raw_config;
_static uint16_t m_accumulated_rx_packet_length;
_static uint16_t m_rx_packet_length;
_static uint16_t m_current_rx_frame_length;
_static uint16_t m_accumulated_tx_packet_length;
_static uint16_t m_tx_packet_length;
_static uint16_t m_current_tx_frame_length;
_static uint8_t m_header_rx_buffer[SER_PHY_HEADER_SIZE];
_static uint8_t m_header_tx_buffer[SER_PHY_HEADER_SIZE];
_static uint8_t m_frame_buffer[SER_PHY_SPI_MTU_SIZE]; //trash storage
_static uint8_t m_zero_buffer[SER_PHY_SPI_MTU_SIZE] = { 0 }; //ROM'able declaration
_static uint8_t * volatile m_p_rx_buffer = NULL;
_static const uint8_t * volatile m_p_tx_buffer = NULL;
_static bool m_trash_payload_flag;
_static bool m_buffer_reqested_flag;
_static trans_state_t m_trans_state = SPI_RAW_STATE_UNKNOWN;
_static ser_phy_events_handler_t m_ser_phy_callback = NULL;
static void spi_slave_raw_assert(bool cond)
{
APP_ERROR_CHECK_BOOL(cond);
}
static void callback_ser_phy_event(ser_phy_evt_t event)
{
if (m_ser_phy_callback)
{
m_ser_phy_callback(event);
}
}
static void callback_memory_request(uint16_t size)
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(0);
event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
event.evt_params.rx_buf_request.num_of_bytes = size;
callback_ser_phy_event(event);
}
static void callback_packet_received(uint8_t * pBuffer, uint16_t size)
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
event.evt_params.rx_pkt_received.num_of_bytes = size;
event.evt_params.rx_pkt_received.p_buffer = pBuffer;
callback_ser_phy_event(event);
}
static void callback_packet_dropped()
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(0);
event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
callback_ser_phy_event(event);
}
static void callback_packet_transmitted(void)
{
ser_phy_evt_t event;
DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(0);
event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
callback_ser_phy_event(event);
}
/* Function computes current packet length */
static uint16_t compute_current_frame_length(const uint16_t packet_length,
const uint16_t accumulated_packet_length)
{
uint16_t current_packet_length = packet_length - accumulated_packet_length;
if (current_packet_length > SER_PHY_SPI_MTU_SIZE)
{
current_packet_length = SER_PHY_SPI_MTU_SIZE;
}
return current_packet_length;
}
static uint32_t header_get()
{
uint32_t err_code;
err_code = nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buffer,
SER_PHY_HEADER_SIZE,
m_header_rx_buffer,
SER_PHY_HEADER_SIZE);
return err_code;
}
static uint32_t frame_get()
{
uint32_t err_code;
m_current_rx_frame_length = compute_current_frame_length(m_rx_packet_length,
m_accumulated_rx_packet_length);
if (!m_trash_payload_flag)
{
err_code =
nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buffer,
m_current_rx_frame_length,
&(m_p_rx_buffer[m_accumulated_rx_packet_length]),
m_current_rx_frame_length);
}
else
{
err_code = nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) m_zero_buffer,
m_current_rx_frame_length,
m_frame_buffer,
m_current_rx_frame_length);
}
return err_code;
}
static uint32_t header_send(uint16_t len)
{
uint32_t err_code;
(void) uint16_encode(len, m_header_tx_buffer);
err_code =
nrf_drv_spis_buffers_set(&m_spis,
m_header_tx_buffer,
sizeof (m_header_tx_buffer),
m_header_rx_buffer,
sizeof (m_header_tx_buffer));
return err_code;
}
static uint32_t frame_send()
{
uint32_t err_code;
m_current_tx_frame_length = compute_current_frame_length(m_tx_packet_length,
m_accumulated_tx_packet_length);
err_code =
nrf_drv_spis_buffers_set(&m_spis,
(uint8_t *) &(m_p_tx_buffer[m_accumulated_tx_packet_length]),
m_current_tx_frame_length,
m_frame_buffer,
m_current_tx_frame_length);
return err_code;
}
static void set_ready_line(void)
{
//toggle - this should go high - but toggle is unsafe
uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.pin_rdy);
*(uint32_t *)rdy_task = 1;
return;
}
static void set_request_line(void)
{
//active low logic - set is 0
nrf_gpio_pin_clear(m_spi_slave_raw_config.pin_req);
DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
}
static void clear_request_line(void)
{
//active low logic - clear is 1
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
}
/**
* \brief Slave driver main state machine
* For UML graph, please refer to SDK documentation
*/
static void spi_slave_event_handle(nrf_drv_spis_event_t event)
{
uint32_t err_code = NRF_SUCCESS;
static uint16_t packetLength;
switch (m_trans_state)
{
case SPI_RAW_STATE_SETUP_HEADER:
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
break;
case SPI_RAW_STATE_RX_HEADER:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
spi_slave_raw_assert(event.rx_amount == SER_PHY_HEADER_SIZE);
packetLength = uint16_decode(m_header_rx_buffer);
if (packetLength != 0 )
{
m_trans_state = SPI_RAW_STATE_MEM_REQUESTED;
m_buffer_reqested_flag = true;
m_rx_packet_length = packetLength;
callback_memory_request(packetLength);
}
else
{
if (m_p_tx_buffer)
{
clear_request_line();
m_trans_state = SPI_RAW_STATE_TX_HEADER;
err_code = header_send(m_tx_packet_length);
}
else
{
//there is nothing to send - zero response facilitates pooling - but perhaps, it should be assert
err_code = header_send(0);
}
}
}
break;
case SPI_RAW_STATE_MEM_REQUESTED:
if (event.evt_type == NRF_DRV_SPIS_EVT_TYPE_MAX) //This is API dummy event
{
m_buffer_reqested_flag = false;
m_trans_state = SPI_RAW_STATE_RX_PAYLOAD;
m_accumulated_rx_packet_length = 0;
err_code = frame_get();
}
break;
case SPI_RAW_STATE_RX_PAYLOAD:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
spi_slave_raw_assert(event.rx_amount == m_current_rx_frame_length);
m_accumulated_rx_packet_length += m_current_rx_frame_length;
if (m_accumulated_rx_packet_length < m_rx_packet_length )
{
err_code = frame_get();
}
else
{
spi_slave_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
if (!m_trash_payload_flag)
{
callback_packet_received(m_p_rx_buffer, m_accumulated_rx_packet_length);
}
else
{
callback_packet_dropped();
}
}
}
break;
case SPI_RAW_STATE_TX_HEADER:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
spi_slave_raw_assert(event.tx_amount == SER_PHY_HEADER_SIZE);
m_trans_state = SPI_RAW_STATE_TX_PAYLOAD;
m_accumulated_tx_packet_length = 0;
err_code = frame_send();
}
break;
case SPI_RAW_STATE_TX_PAYLOAD:
if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
set_ready_line();
}
if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
{
DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
spi_slave_raw_assert(event.tx_amount == m_current_tx_frame_length);
m_accumulated_tx_packet_length += m_current_tx_frame_length;
if ( m_accumulated_tx_packet_length < m_tx_packet_length )
{
err_code = frame_send();
}
else
{
spi_slave_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
//clear pointer before callback
m_p_tx_buffer = NULL;
callback_packet_transmitted();
//spi slave TX transfer is possible only when RX is ready, so return to waiting for a header
m_trans_state = SPI_RAW_STATE_RX_HEADER;
err_code = header_get();
}
}
break;
default:
err_code = NRF_ERROR_INVALID_STATE;
break;
}
APP_ERROR_CHECK(err_code);
}
static void spi_slave_gpiote_init(void)
{
if (!nrf_drv_gpiote_is_init())
{
(void)nrf_drv_gpiote_init();
}
nrf_drv_gpiote_out_config_t config = GPIOTE_CONFIG_OUT_TASK_TOGGLE(true);
(void) nrf_drv_gpiote_out_init(m_spi_slave_raw_config.pin_rdy, &config);
(void) nrf_drv_gpiote_out_task_enable(m_spi_slave_raw_config.pin_rdy);
return;
}
static void spi_slave_ppi_init(void)
{
uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.pin_rdy);
//Configure PPI channel to clear /RDY line
NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].EEP = (uint32_t)(&SPI_SLAVE_REG->EVENTS_END);
NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].TEP = rdy_task;
//this works only for channels 0..15 - but soft device is using 8-15 anyway
NRF_PPI->CHEN |= (1 << m_spi_slave_raw_config.ppi_rdy_ch);
return;
}
static void spi_slave_gpio_init(void)
{
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_req);
nrf_gpio_pin_set(m_spi_slave_raw_config.pin_rdy);
nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_rdy);
}
/* ser_phy API function */
void ser_phy_interrupts_enable(void)
{
(void)sd_nvic_EnableIRQ(nrfx_get_irq_number(m_spis.p_reg));
}
/* ser_phy API function */
void ser_phy_interrupts_disable(void)
{
(void)sd_nvic_DisableIRQ(nrfx_get_irq_number(m_spis.p_reg));
}
/* ser_phy API function */
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
uint32_t status = NRF_SUCCESS;
nrf_drv_spis_event_t event;
ser_phy_interrupts_disable();
if (m_buffer_reqested_flag && (m_trans_state == SPI_RAW_STATE_MEM_REQUESTED))
{
m_p_rx_buffer = p_buffer;
if (m_p_rx_buffer)
{
m_trash_payload_flag = false;
}
else
{
m_trash_payload_flag = true;
}
event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition with dummy event
event.rx_amount = 0;
event.tx_amount = 0;
spi_slave_event_handle(event);
}
else
{
status = NRF_ERROR_BUSY;
}
ser_phy_interrupts_enable();
return status;
}
/* ser_phy API function */
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
uint32_t status = NRF_SUCCESS;
if ( p_buffer == NULL || num_of_bytes == 0)
{
return NRF_ERROR_NULL;
}
ser_phy_interrupts_disable();
if ( m_p_tx_buffer == NULL)
{
m_tx_packet_length = num_of_bytes;
m_p_tx_buffer = p_buffer;
set_request_line();
}
else
{
status = NRF_ERROR_BUSY;
}
ser_phy_interrupts_enable();
return status;
}
/* ser_phy API function */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
uint32_t err_code;
nrf_drv_spis_config_t spi_slave_config;
nrf_drv_spis_event_t event;
if (m_trans_state != SPI_RAW_STATE_UNKNOWN)
{
return NRF_ERROR_INVALID_STATE;
}
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
//one ppi channel and one gpiote channel are used to drive RDY line
m_spi_slave_raw_config.pin_req = SER_PHY_SPI_SLAVE_REQ_PIN;
m_spi_slave_raw_config.pin_rdy = SER_PHY_SPI_SLAVE_RDY_PIN;
m_spi_slave_raw_config.ppi_rdy_ch = SER_PHY_SPI_PPI_RDY_CH;
m_spi_slave_raw_config.gpiote_rdy_ch = SER_PHY_SPI_GPIOTE_RDY_CH;
spi_slave_gpio_init();
spi_slave_gpiote_init();
spi_slave_ppi_init();
spi_slave_config.miso_pin = SER_PHY_SPI_SLAVE_MISO_PIN;
spi_slave_config.mosi_pin = SER_PHY_SPI_SLAVE_MOSI_PIN;
spi_slave_config.sck_pin = SER_PHY_SPI_SLAVE_SCK_PIN;
spi_slave_config.csn_pin = SER_PHY_SPI_SLAVE_SS_PIN;
spi_slave_config.mode = NRF_DRV_SPIS_MODE_0;
spi_slave_config.bit_order = NRF_DRV_SPIS_BIT_ORDER_LSB_FIRST;
spi_slave_config.def = SER_PHY_SPI_DEF_CHARACTER;
spi_slave_config.orc = SER_PHY_SPI_ORC_CHARACTER;
spi_slave_config.irq_priority = APP_IRQ_PRIORITY_LOWEST;
spi_slave_config.miso_drive = NRF_DRV_SPIS_DEFAULT_MISO_DRIVE;
//use /CS pullup because state of the line might be undefined when master redefines PIO lines
spi_slave_config.csn_pullup = NRF_GPIO_PIN_PULLUP;
//keep /CS high when init
nrf_gpio_cfg_input(spi_slave_config.csn_pin, NRF_GPIO_PIN_PULLUP);
err_code = nrf_drv_spis_init(&m_spis, &spi_slave_config, spi_slave_event_handle);
APP_ERROR_CHECK(err_code);
if (err_code == NRF_SUCCESS)
{
m_ser_phy_callback = events_handler;
m_trans_state = SPI_RAW_STATE_SETUP_HEADER;
event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition for dummy event
event.rx_amount = 0;
event.tx_amount = 0;
spi_slave_event_handle(event);
}
return err_code;
}
/* ser_phy API function */
void ser_phy_close(void)
{
nrf_drv_spis_uninit(&m_spis);
m_ser_phy_callback = NULL;
m_trans_state = SPI_RAW_STATE_UNKNOWN;
}

View File

@@ -0,0 +1,363 @@
/**
* Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "ser_phy.h"
#include "ser_config.h"
#ifdef SER_CONNECTIVITY
#include "ser_phy_config_conn.h"
#else
#include "ser_phy_config_app.h"
#endif
#include "nrf_drv_uart.h"
#include "app_error.h"
#include "app_util.h"
#include "app_util_platform.h"
#define UART_TRANSFER_MAX 255
static const nrf_drv_uart_t m_uart = NRF_DRV_UART_INSTANCE(0);
static const nrf_drv_uart_config_t m_uart_config = {
.pseltxd = SER_PHY_UART_TX,
.pselrxd = SER_PHY_UART_RX,
.pselrts = SER_PHY_UART_RTS,
.pselcts = SER_PHY_UART_CTS,
.p_context = NULL,
.interrupt_priority = UART_IRQ_PRIORITY,
#if defined(NRF_DRV_UART_WITH_UARTE) && defined(NRF_DRV_UART_WITH_UART)
.use_easy_dma = true,
#endif
// These values are common for application and connectivity, they are
// defined in "ser_config.h".
.hwfc = SER_PHY_UART_FLOW_CTRL,
.parity = SER_PHY_UART_PARITY,
.baudrate = (nrf_uart_baudrate_t)SER_PHY_UART_BAUDRATE
};
static bool volatile m_tx_in_progress;
static uint8_t m_tx_header_buf[SER_PHY_HEADER_SIZE];
static uint16_t m_bytes_to_transmit;
static uint8_t const * mp_tx_buffer;
static uint8_t m_rx_header_buf[SER_PHY_HEADER_SIZE];
static uint16_t m_bytes_to_receive;
static uint8_t m_rx_drop_buf[1];
static ser_phy_events_handler_t m_ser_phy_event_handler;
static ser_phy_evt_t m_ser_phy_rx_event;
static void packet_sent_callback(void)
{
static ser_phy_evt_t const event = {
.evt_type = SER_PHY_EVT_TX_PKT_SENT,
};
m_ser_phy_event_handler(event);
}
static void buffer_request_callback(uint16_t num_of_bytes)
{
m_ser_phy_rx_event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
m_ser_phy_rx_event.evt_params.rx_buf_request.num_of_bytes = num_of_bytes;
m_ser_phy_event_handler(m_ser_phy_rx_event);
}
static void packet_received_callback(void)
{
m_ser_phy_event_handler(m_ser_phy_rx_event);
}
static void packet_dropped_callback(void)
{
static ser_phy_evt_t const event = {
.evt_type = SER_PHY_EVT_RX_PKT_DROPPED,
};
m_ser_phy_event_handler(event);
}
static void hardware_error_callback(uint32_t hw_error)
{
ser_phy_evt_t event = {
.evt_type = SER_PHY_EVT_HW_ERROR,
.evt_params.hw_error.error_code = hw_error,
};
m_ser_phy_event_handler(event);
}
static void packet_rx_start(void)
{
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_header_buf,
SER_PHY_HEADER_SIZE));
}
static void packet_byte_drop(void)
{
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_drop_buf, 1));
}
static void uart_event_handler(nrf_drv_uart_event_t * p_event,
void * p_context)
{
(void)p_context;
switch (p_event->type)
{
case NRF_DRV_UART_EVT_ERROR:
// Process the error only if this is a parity or overrun error.
// Break and framing errors will always occur before the other
// side becomes active.
if (p_event->data.error.error_mask &
(NRF_UART_ERROR_PARITY_MASK | NRF_UART_ERROR_OVERRUN_MASK))
{
// Pass error source to upper layer.
hardware_error_callback(p_event->data.error.error_mask);
}
packet_rx_start();
break;
case NRF_DRV_UART_EVT_TX_DONE:
if (p_event->data.rxtx.p_data == m_tx_header_buf)
{
#if (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
if (m_bytes_to_transmit > UART_TRANSFER_MAX)
{
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buffer,
UART_TRANSFER_MAX));
}
else
#endif // (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
{
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buffer,
m_bytes_to_transmit));
}
}
else
{
#if (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
ASSERT(p_event->data.rxtx.bytes <= m_bytes_to_transmit);
m_bytes_to_transmit -= p_event->data.rxtx.bytes;
if (m_bytes_to_transmit != 0)
{
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart,
p_event->data.rxtx.p_data + p_event->data.rxtx.bytes,
m_bytes_to_transmit < UART_TRANSFER_MAX ?
m_bytes_to_transmit : UART_TRANSFER_MAX));
}
else
#endif // (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
{
m_tx_in_progress = false;
packet_sent_callback();
}
}
break;
case NRF_DRV_UART_EVT_RX_DONE:
if (p_event->data.rxtx.p_data == m_rx_header_buf)
{
m_bytes_to_receive = uint16_decode(m_rx_header_buf);
buffer_request_callback(m_bytes_to_receive);
}
else if (p_event->data.rxtx.p_data == m_rx_drop_buf)
{
--m_bytes_to_receive;
if (m_bytes_to_receive != 0)
{
packet_byte_drop();
}
else
{
packet_dropped_callback();
packet_rx_start();
}
}
else
{
#if (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
ASSERT(p_event->data.rxtx.bytes <= m_bytes_to_receive);
m_bytes_to_receive -= p_event->data.rxtx.bytes;
if (m_bytes_to_receive != 0)
{
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart,
p_event->data.rxtx.p_data + p_event->data.rxtx.bytes,
m_bytes_to_receive < UART_TRANSFER_MAX ?
m_bytes_to_receive : UART_TRANSFER_MAX));
}
else
#endif // (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
{
packet_received_callback();
packet_rx_start();
}
}
break;
default:
APP_ERROR_CHECK(NRF_ERROR_INTERNAL);
}
}
/** API FUNCTIONS */
uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
{
uint32_t err_code;
if (events_handler == NULL)
{
return NRF_ERROR_NULL;
}
// Check if function was not called before.
if (m_ser_phy_event_handler != NULL)
{
return NRF_ERROR_INVALID_STATE;
}
err_code = nrf_drv_uart_init(&m_uart, &m_uart_config, uart_event_handler);
if (err_code != NRF_SUCCESS)
{
return NRF_ERROR_INVALID_PARAM;
}
m_ser_phy_event_handler = events_handler;
packet_rx_start();
return err_code;
}
uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
{
if (p_buffer == NULL)
{
return NRF_ERROR_NULL;
}
else if (num_of_bytes == 0)
{
return NRF_ERROR_INVALID_PARAM;
}
bool busy;
CRITICAL_REGION_ENTER();
busy = m_tx_in_progress;
m_tx_in_progress = true;
CRITICAL_REGION_EXIT();
if (busy)
{
return NRF_ERROR_BUSY;
}
(void)uint16_encode(num_of_bytes, m_tx_header_buf);
mp_tx_buffer = p_buffer;
m_bytes_to_transmit = num_of_bytes;
APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, m_tx_header_buf,
SER_PHY_HEADER_SIZE));
return NRF_SUCCESS;
}
uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
{
if (m_ser_phy_rx_event.evt_type != SER_PHY_EVT_RX_BUF_REQUEST)
{
return NRF_ERROR_INVALID_STATE;
}
m_ser_phy_rx_event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
m_ser_phy_rx_event.evt_params.rx_pkt_received.p_buffer = p_buffer;
m_ser_phy_rx_event.evt_params.rx_pkt_received.num_of_bytes =
m_bytes_to_receive;
// If there is not enough memory to receive the packet (no buffer was
// provided), drop its data byte by byte (using an internal 1-byte buffer).
if (p_buffer == NULL)
{
packet_byte_drop();
}
#if (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
else if (m_bytes_to_receive > UART_TRANSFER_MAX)
{
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, p_buffer, UART_TRANSFER_MAX));
}
#endif // (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
else
{
APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, p_buffer, m_bytes_to_receive));
}
return NRF_SUCCESS;
}
void ser_phy_close(void)
{
nrf_drv_uart_uninit(&m_uart);
m_ser_phy_event_handler = NULL;
}
void ser_phy_interrupts_enable(void)
{
IRQn_Type irqn;
#if defined(NRF_DRV_UART_WITH_UARTE)
irqn = nrfx_get_irq_number(m_uart.uarte.p_reg);
#else
irqn = nrfx_get_irq_number(m_uart.uart.p_reg);
#endif
NVIC_EnableIRQ(irqn);
}
void ser_phy_interrupts_disable(void)
{
IRQn_Type irqn;
#if defined(NRF_DRV_UART_WITH_UARTE)
irqn = nrfx_get_irq_number(m_uart.uarte.p_reg);
#else
irqn = nrfx_get_irq_number(m_uart.uart.p_reg);
#endif
NVIC_DisableIRQ(irqn);
}