File indexing completed on 2025-05-11 08:22:59
0001
0002
0003
0004
0005
0006
0007
0008
0009 #include "fsl_enet.h"
0010 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
0011 #include "fsl_cache.h"
0012 #endif
0013
0014
0015
0016
0017
0018
0019 #ifndef FSL_COMPONENT_ID
0020 #define FSL_COMPONENT_ID "platform.drivers.enet"
0021 #endif
0022
0023
0024 #define ENET_FRAME_MACLEN 6U
0025
0026 #define ENET_MDC_FREQUENCY 2500000U
0027
0028 #define ENET_NANOSECOND_ONE_SECOND 1000000000U
0029
0030
0031 enum
0032 {
0033 kENET_Ring0 = 0U,
0034 #if FSL_FEATURE_ENET_QUEUE > 1
0035 kENET_Ring1 = 1U,
0036 kENET_Ring2 = 2U
0037 #endif
0038 };
0039
0040
0041
0042
0043
0044
0045 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
0046 const clock_ip_name_t s_enetClock[] = ENET_CLOCKS;
0047 #if defined(FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE) && FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE
0048 const clock_ip_name_t s_enetExtraClock[] = ENET_EXTRA_CLOCKS;
0049 #endif
0050 #endif
0051
0052
0053 static const IRQn_Type s_enetTxIrqId[] = ENET_Transmit_IRQS;
0054
0055 static const IRQn_Type s_enetRxIrqId[] = ENET_Receive_IRQS;
0056 #if defined(ENET_ENHANCEDBUFFERDESCRIPTOR_MODE) && ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0057
0058 static const IRQn_Type s_enetTsIrqId[] = ENET_Ts_IRQS;
0059
0060 static const IRQn_Type s_enet1588TimerIrqId[] = ENET_1588_Timer_IRQS;
0061 #endif
0062
0063 static const IRQn_Type s_enetErrIrqId[] = ENET_Error_IRQS;
0064
0065
0066 static ENET_Type *const s_enetBases[] = ENET_BASE_PTRS;
0067
0068
0069 static enet_handle_t *s_ENETHandle[ARRAY_SIZE(s_enetBases)];
0070
0071
0072 #if FSL_FEATURE_ENET_QUEUE > 1
0073 static enet_isr_ring_t s_enetTxIsr[ARRAY_SIZE(s_enetBases)];
0074 static enet_isr_ring_t s_enetRxIsr[ARRAY_SIZE(s_enetBases)];
0075 #else
0076 static enet_isr_t s_enetTxIsr[ARRAY_SIZE(s_enetBases)];
0077 static enet_isr_t s_enetRxIsr[ARRAY_SIZE(s_enetBases)];
0078 #endif
0079 static enet_isr_t s_enetErrIsr[ARRAY_SIZE(s_enetBases)];
0080 static enet_isr_t s_enetTsIsr[ARRAY_SIZE(s_enetBases)];
0081 static enet_isr_t s_enet1588TimerIsr[ARRAY_SIZE(s_enetBases)];
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 static void ENET_SetMacController(ENET_Type *base,
0098 enet_handle_t *handle,
0099 const enet_config_t *config,
0100 const enet_buffer_config_t *bufferConfig,
0101 uint8_t *macAddr,
0102 uint32_t srcClock_Hz);
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static void ENET_SetHandler(ENET_Type *base,
0113 enet_handle_t *handle,
0114 const enet_config_t *config,
0115 const enet_buffer_config_t *bufferConfig,
0116 uint32_t srcClock_Hz);
0117
0118
0119
0120
0121
0122
0123
0124
0125 static void ENET_SetTxBufferDescriptors(enet_handle_t *handle,
0126 const enet_config_t *config,
0127 const enet_buffer_config_t *bufferConfig);
0128
0129
0130
0131
0132
0133
0134
0135
0136 static void ENET_SetRxBufferDescriptors(enet_handle_t *handle,
0137 const enet_config_t *config,
0138 const enet_buffer_config_t *bufferConfig);
0139
0140
0141
0142
0143
0144
0145
0146
0147 static void ENET_UpdateReadBuffers(ENET_Type *base, enet_handle_t *handle, uint8_t ringId);
0148
0149
0150
0151
0152 static uint16_t ENET_IncreaseIndex(uint16_t index, uint16_t max);
0153
0154
0155
0156
0157 static status_t ENET_RxBufferAllocAll(ENET_Type *base, enet_handle_t *handle);
0158
0159
0160
0161
0162 static void ENET_RxBufferFreeAll(ENET_Type *base, enet_handle_t *handle);
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 uint32_t ENET_GetInstance(ENET_Type *base)
0175 {
0176 uint32_t instance;
0177
0178
0179 for (instance = 0; instance < ARRAY_SIZE(s_enetBases); instance++)
0180 {
0181 if (s_enetBases[instance] == base)
0182 {
0183 break;
0184 }
0185 }
0186
0187 assert(instance < ARRAY_SIZE(s_enetBases));
0188
0189 return instance;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 void ENET_GetDefaultConfig(enet_config_t *config)
0207 {
0208
0209 assert(config != NULL);
0210
0211
0212 (void)memset(config, 0, sizeof(enet_config_t));
0213
0214
0215 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0216 config->miiMode = kENET_RgmiiMode;
0217 #else
0218 config->miiMode = kENET_RmiiMode;
0219 #endif
0220 config->miiSpeed = kENET_MiiSpeed100M;
0221 config->miiDuplex = kENET_MiiFullDuplex;
0222
0223 config->ringNum = 1;
0224
0225
0226 config->rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 status_t ENET_Up(ENET_Type *base,
0258 enet_handle_t *handle,
0259 const enet_config_t *config,
0260 const enet_buffer_config_t *bufferConfig,
0261 uint8_t *macAddr,
0262 uint32_t srcClock_Hz)
0263 {
0264
0265 assert(handle != NULL);
0266 assert(config != NULL);
0267 assert(bufferConfig != NULL);
0268 assert(macAddr != NULL);
0269 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
0270 assert(config->ringNum <= (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
0271
0272 status_t result = kStatus_Success;
0273
0274
0275 ENET_SetTxBufferDescriptors(handle, config, bufferConfig);
0276
0277
0278 ENET_SetRxBufferDescriptors(handle, config, bufferConfig);
0279
0280
0281 ENET_SetMacController(base, handle, config, bufferConfig, macAddr, srcClock_Hz);
0282
0283
0284 ENET_SetHandler(base, handle, config, bufferConfig, srcClock_Hz);
0285
0286
0287 if (handle->rxBuffAlloc != NULL)
0288 {
0289 result = ENET_RxBufferAllocAll(base, handle);
0290 }
0291
0292 return result;
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 status_t ENET_Init(ENET_Type *base,
0324 enet_handle_t *handle,
0325 const enet_config_t *config,
0326 const enet_buffer_config_t *bufferConfig,
0327 uint8_t *macAddr,
0328 uint32_t srcClock_Hz)
0329 {
0330 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
0331 uint32_t instance = ENET_GetInstance(base);
0332
0333
0334 (void)CLOCK_EnableClock(s_enetClock[instance]);
0335
0336 #if defined(FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE) && FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE
0337
0338 (void)CLOCK_EnableClock(s_enetExtraClock[instance]);
0339 #endif
0340 #endif
0341
0342 ENET_Reset(base);
0343
0344 return ENET_Up(base, handle, config, bufferConfig, macAddr, srcClock_Hz);
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354 void ENET_Down(ENET_Type *base)
0355 {
0356 uint32_t instance = ENET_GetInstance(base);
0357 enet_handle_t *handle = s_ENETHandle[instance];
0358
0359
0360 base->EIMR = 0;
0361
0362
0363 base->ECR &= ~ENET_ECR_ETHEREN_MASK;
0364
0365 if (handle->rxBuffFree != NULL)
0366 {
0367 ENET_RxBufferFreeAll(base, handle);
0368 }
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378 void ENET_Deinit(ENET_Type *base)
0379 {
0380 ENET_Down(base);
0381
0382 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
0383
0384 (void)CLOCK_DisableClock(s_enetClock[ENET_GetInstance(base)]);
0385
0386 #if defined(FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE) && FSL_FEATURE_ENET_HAS_EXTRA_CLOCK_GATE
0387
0388 (void)CLOCK_DisableClock(s_enetExtraClock[ENET_GetInstance(base)]);
0389 #endif
0390 #endif
0391 }
0392
0393
0394
0395
0396 void ENET_SetCallback(enet_handle_t *handle, enet_callback_t callback, void *userData)
0397 {
0398 assert(handle != NULL);
0399
0400
0401 handle->callback = callback;
0402 handle->userData = userData;
0403 }
0404
0405 #if FSL_FEATURE_ENET_QUEUE > 1
0406 void ENET_SetRxISRHandler(ENET_Type *base, enet_isr_ring_t ISRHandler)
0407 {
0408 uint32_t instance = ENET_GetInstance(base);
0409
0410 s_enetRxIsr[instance] = ISRHandler;
0411 (void)EnableIRQ(s_enetRxIrqId[instance]);
0412 }
0413
0414 void ENET_SetTxISRHandler(ENET_Type *base, enet_isr_ring_t ISRHandler)
0415 {
0416 uint32_t instance = ENET_GetInstance(base);
0417
0418 s_enetTxIsr[instance] = ISRHandler;
0419 (void)EnableIRQ(s_enetTxIrqId[instance]);
0420 }
0421 #else
0422 void ENET_SetRxISRHandler(ENET_Type *base, enet_isr_t ISRHandler)
0423 {
0424 uint32_t instance = ENET_GetInstance(base);
0425
0426 s_enetRxIsr[instance] = ISRHandler;
0427 (void)EnableIRQ(s_enetRxIrqId[instance]);
0428 }
0429
0430 void ENET_SetTxISRHandler(ENET_Type *base, enet_isr_t ISRHandler)
0431 {
0432 uint32_t instance = ENET_GetInstance(base);
0433
0434 s_enetTxIsr[instance] = ISRHandler;
0435 (void)EnableIRQ(s_enetTxIrqId[instance]);
0436 }
0437 #endif
0438
0439 void ENET_SetErrISRHandler(ENET_Type *base, enet_isr_t ISRHandler)
0440 {
0441 uint32_t instance = ENET_GetInstance(base);
0442
0443 s_enetErrIsr[instance] = ISRHandler;
0444 (void)EnableIRQ(s_enetErrIrqId[instance]);
0445 }
0446
0447 #if defined(ENET_ENHANCEDBUFFERDESCRIPTOR_MODE) && ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0448 void ENET_SetTsISRHandler(ENET_Type *base, enet_isr_t ISRHandler)
0449 {
0450 uint32_t instance = ENET_GetInstance(base);
0451
0452 s_enetTsIsr[instance] = ISRHandler;
0453 (void)EnableIRQ(s_enetTsIrqId[instance]);
0454 }
0455
0456 void ENET_Set1588TimerISRHandler(ENET_Type *base, enet_isr_t ISRHandler)
0457 {
0458 uint32_t instance = ENET_GetInstance(base);
0459
0460 s_enet1588TimerIsr[instance] = ISRHandler;
0461 (void)EnableIRQ(s_enet1588TimerIrqId[instance]);
0462 }
0463 #endif
0464
0465 static void ENET_SetHandler(ENET_Type *base,
0466 enet_handle_t *handle,
0467 const enet_config_t *config,
0468 const enet_buffer_config_t *bufferConfig,
0469 uint32_t srcClock_Hz)
0470 {
0471 uint8_t count;
0472 uint32_t instance = ENET_GetInstance(base);
0473 const enet_buffer_config_t *buffCfg = bufferConfig;
0474
0475
0476 (void)memset(handle, 0, sizeof(enet_handle_t));
0477
0478 for (count = 0; count < config->ringNum; count++)
0479 {
0480 assert(buffCfg->rxBuffSizeAlign * buffCfg->rxBdNumber > config->rxMaxFrameLen);
0481
0482 handle->rxBdRing[count].rxBdBase = buffCfg->rxBdStartAddrAlign;
0483 handle->rxBuffSizeAlign[count] = buffCfg->rxBuffSizeAlign;
0484 handle->rxBdRing[count].rxRingLen = buffCfg->rxBdNumber;
0485 handle->rxMaintainEnable[count] = buffCfg->rxMaintainEnable;
0486 handle->txBdRing[count].txBdBase = buffCfg->txBdStartAddrAlign;
0487 handle->txBuffSizeAlign[count] = buffCfg->txBuffSizeAlign;
0488 handle->txBdRing[count].txRingLen = buffCfg->txBdNumber;
0489 handle->txMaintainEnable[count] = buffCfg->txMaintainEnable;
0490 handle->txDirtyRing[count].txDirtyBase = buffCfg->txFrameInfo;
0491 handle->txDirtyRing[count].txRingLen = buffCfg->txBdNumber;
0492 buffCfg++;
0493 }
0494
0495 handle->ringNum = config->ringNum;
0496 handle->rxBuffAlloc = config->rxBuffAlloc;
0497 handle->rxBuffFree = config->rxBuffFree;
0498 handle->callback = config->callback;
0499 handle->userData = config->userData;
0500 #if defined(FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID) && FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID
0501 handle->enetClock = srcClock_Hz;
0502 #endif
0503
0504
0505 s_ENETHandle[instance] = handle;
0506
0507
0508 if (0U != (config->interrupt & (uint32_t)ENET_TX_INTERRUPT))
0509 {
0510 ENET_SetTxISRHandler(base, ENET_TransmitIRQHandler);
0511 }
0512 if (0U != (config->interrupt & (uint32_t)ENET_RX_INTERRUPT))
0513 {
0514 ENET_SetRxISRHandler(base, ENET_ReceiveIRQHandler);
0515 }
0516 if (0U != (config->interrupt & (uint32_t)ENET_ERR_INTERRUPT))
0517 {
0518 ENET_SetErrISRHandler(base, ENET_ErrorIRQHandler);
0519 }
0520 }
0521
0522 static void ENET_SetMacController(ENET_Type *base,
0523 enet_handle_t *handle,
0524 const enet_config_t *config,
0525 const enet_buffer_config_t *bufferConfig,
0526 uint8_t *macAddr,
0527 uint32_t srcClock_Hz)
0528 {
0529 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0530 if (FSL_FEATURE_ENET_INSTANCE_HAS_AVBn(base) == 1)
0531 {
0532
0533 if (config->miiSpeed == kENET_MiiSpeed1000M)
0534 {
0535
0536 assert(config->miiMode == kENET_RgmiiMode);
0537 assert(config->miiDuplex == kENET_MiiFullDuplex);
0538 }
0539 }
0540 #endif
0541
0542 uint32_t rcr = 0;
0543 uint32_t tcr = 0;
0544 uint32_t ecr = base->ECR;
0545 uint32_t macSpecialConfig = config->macSpecialConfig;
0546 uint32_t maxFrameLen = config->rxMaxFrameLen;
0547 uint32_t configVal = 0;
0548
0549
0550 if (0U != (macSpecialConfig & (uint32_t)kENET_ControlVLANTagEnable))
0551 {
0552 maxFrameLen = (ENET_FRAME_MAX_FRAMELEN + ENET_FRAME_VLAN_TAGLEN);
0553 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0554 if (FSL_FEATURE_ENET_INSTANCE_HAS_AVBn(base) == 1)
0555 {
0556 if (0U != (macSpecialConfig & (uint32_t)kENET_ControlSVLANEnable))
0557 {
0558
0559 maxFrameLen += ENET_FRAME_VLAN_TAGLEN;
0560 }
0561 ecr |= (uint32_t)(((macSpecialConfig & (uint32_t)kENET_ControlSVLANEnable) != 0U) ?
0562 (ENET_ECR_SVLANEN_MASK | ENET_ECR_SVLANDBL_MASK) :
0563 0U) |
0564 (uint32_t)(((macSpecialConfig & (uint32_t)kENET_ControlVLANUseSecondTag) != 0U) ?
0565 ENET_ECR_VLANUSE2ND_MASK :
0566 0U);
0567 }
0568 #endif
0569 }
0570
0571
0572 rcr = ((0U != (macSpecialConfig & (uint32_t)kENET_ControlRxPayloadCheckEnable)) ? ENET_RCR_NLC_MASK : 0U) |
0573 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlFlowControlEnable)) ? ENET_RCR_CFEN_MASK : 0U) |
0574 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlFlowControlEnable)) ? ENET_RCR_FCE_MASK : 0U) |
0575 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlRxPadRemoveEnable)) ? ENET_RCR_PADEN_MASK : 0U) |
0576 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlRxBroadCastRejectEnable)) ? ENET_RCR_BC_REJ_MASK : 0U) |
0577 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlPromiscuousEnable)) ? ENET_RCR_PROM_MASK : 0U) |
0578 ENET_RCR_MAX_FL(maxFrameLen) | ENET_RCR_CRCFWD_MASK;
0579
0580
0581 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0582 if (FSL_FEATURE_ENET_INSTANCE_HAS_AVBn(base) == 1)
0583 {
0584 if (config->miiMode == kENET_RgmiiMode)
0585 {
0586 rcr |= ENET_RCR_RGMII_EN_MASK;
0587 }
0588 else
0589 {
0590 rcr &= ~ENET_RCR_RGMII_EN_MASK;
0591 }
0592
0593 if (config->miiSpeed == kENET_MiiSpeed1000M)
0594 {
0595 ecr |= ENET_ECR_SPEED_MASK;
0596 }
0597 else
0598 {
0599 ecr &= ~ENET_ECR_SPEED_MASK;
0600 }
0601 }
0602 #endif
0603 rcr |= ENET_RCR_MII_MODE_MASK;
0604 if (config->miiMode == kENET_RmiiMode)
0605 {
0606 rcr |= ENET_RCR_RMII_MODE_MASK;
0607 }
0608
0609
0610 if (config->miiSpeed == kENET_MiiSpeed10M)
0611 {
0612 rcr |= ENET_RCR_RMII_10T_MASK;
0613 }
0614
0615
0616 if (config->miiDuplex == kENET_MiiHalfDuplex)
0617 {
0618 rcr |= ENET_RCR_DRT_MASK;
0619 }
0620
0621 if ((0U != (config->macSpecialConfig & (uint32_t)kENET_ControlMIILoopEnable)) &&
0622 (config->miiMode != kENET_RmiiMode))
0623 {
0624 rcr |= ENET_RCR_LOOP_MASK;
0625 rcr &= ~ENET_RCR_DRT_MASK;
0626 }
0627 base->RCR = rcr;
0628
0629
0630 tcr = base->TCR & ~(ENET_TCR_FDEN_MASK | ENET_TCR_ADDINS_MASK);
0631 tcr |= ((kENET_MiiHalfDuplex != config->miiDuplex) ? (uint32_t)ENET_TCR_FDEN_MASK : 0U) |
0632 ((0U != (macSpecialConfig & (uint32_t)kENET_ControlMacAddrInsert)) ? (uint32_t)ENET_TCR_ADDINS_MASK : 0U);
0633 base->TCR = tcr;
0634
0635
0636 base->TACC = config->txAccelerConfig;
0637 base->RACC = config->rxAccelerConfig;
0638
0639
0640 if (0U != (macSpecialConfig & (uint32_t)kENET_ControlFlowControlEnable))
0641 {
0642 uint32_t reemReg;
0643 base->OPD = config->pauseDuration;
0644 reemReg = ENET_RSEM_RX_SECTION_EMPTY(config->rxFifoEmptyThreshold);
0645 #if defined(FSL_FEATURE_ENET_HAS_RECEIVE_STATUS_THRESHOLD) && FSL_FEATURE_ENET_HAS_RECEIVE_STATUS_THRESHOLD
0646 reemReg |= ENET_RSEM_STAT_SECTION_EMPTY(config->rxFifoStatEmptyThreshold);
0647 #endif
0648 base->RSEM = reemReg;
0649 }
0650
0651
0652 if (0U != (macSpecialConfig & (uint32_t)kENET_ControlStoreAndFwdDisable))
0653 {
0654
0655 configVal = ((uint32_t)config->txFifoWatermark) & ENET_TFWR_TFWR_MASK;
0656 base->TFWR = configVal;
0657
0658 configVal = ((uint32_t)config->rxFifoFullThreshold) & ENET_RSFL_RX_SECTION_FULL_MASK;
0659 base->RSFL = configVal;
0660 }
0661 else
0662 {
0663
0664 base->TFWR = ENET_TFWR_STRFWD_MASK;
0665 base->RSFL = 0;
0666 }
0667
0668
0669 if (0U !=
0670 (config->txAccelerConfig & ((uint32_t)kENET_TxAccelIpCheckEnabled | (uint32_t)kENET_TxAccelProtoCheckEnabled)))
0671 {
0672 base->TFWR = ENET_TFWR_STRFWD_MASK;
0673 }
0674 if (0U != ((config->rxAccelerConfig &
0675 ((uint32_t)kENET_RxAccelIpCheckEnabled | (uint32_t)kENET_RxAccelProtoCheckEnabled))))
0676 {
0677 base->RSFL = 0;
0678 }
0679
0680
0681 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0682 base->TDSR = MEMORY_ConvertMemoryMapAddress((uintptr_t)bufferConfig->txBdStartAddrAlign, kMEMORY_Local2DMA);
0683 base->RDSR = MEMORY_ConvertMemoryMapAddress((uintptr_t)bufferConfig->rxBdStartAddrAlign, kMEMORY_Local2DMA);
0684 #else
0685 base->TDSR = (uint32_t)(uintptr_t)bufferConfig->txBdStartAddrAlign;
0686 base->RDSR = (uint32_t)(uintptr_t)bufferConfig->rxBdStartAddrAlign;
0687 #endif
0688 base->MRBR = (uint32_t)bufferConfig->rxBuffSizeAlign;
0689
0690 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0691 if (FSL_FEATURE_ENET_INSTANCE_HAS_AVBn(base) == 1)
0692 {
0693 const enet_buffer_config_t *buffCfg = bufferConfig;
0694
0695 if (config->ringNum > 1U)
0696 {
0697
0698 buffCfg++;
0699 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0700 base->TDSR1 = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->txBdStartAddrAlign, kMEMORY_Local2DMA);
0701 base->RDSR1 = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->rxBdStartAddrAlign, kMEMORY_Local2DMA);
0702 #else
0703 base->TDSR1 = (uint32_t)(uintptr_t)buffCfg->txBdStartAddrAlign;
0704 base->RDSR1 = (uint32_t)(uintptr_t)buffCfg->rxBdStartAddrAlign;
0705 #endif
0706 base->MRBR1 = (uint32_t)buffCfg->rxBuffSizeAlign;
0707
0708 base->DMACFG[0] = ENET_DMACFG_DMA_CLASS_EN_MASK;
0709 }
0710 if (config->ringNum > 2U)
0711 {
0712
0713 buffCfg++;
0714 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0715 base->TDSR2 = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->txBdStartAddrAlign, kMEMORY_Local2DMA);
0716 base->RDSR2 = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->rxBdStartAddrAlign, kMEMORY_Local2DMA);
0717 #else
0718 base->TDSR2 = (uint32_t)(uintptr_t)buffCfg->txBdStartAddrAlign;
0719 base->RDSR2 = (uint32_t)(uintptr_t)buffCfg->rxBdStartAddrAlign;
0720 #endif
0721 base->MRBR2 = (uint32_t)buffCfg->rxBuffSizeAlign;
0722
0723 base->DMACFG[1] = ENET_DMACFG_DMA_CLASS_EN_MASK;
0724 }
0725
0726
0727
0728
0729
0730 base->QOS |= ENET_QOS_TX_SCHEME(1);
0731 }
0732 #endif
0733
0734
0735 ENET_SetMacAddr(base, macAddr);
0736
0737
0738 if (!ENET_GetSMI(base))
0739 {
0740 ENET_SetSMI(base, srcClock_Hz,
0741 ((0U != (config->macSpecialConfig & (uint32_t)kENET_ControlSMIPreambleDisable)) ? true : false));
0742 }
0743
0744
0745 #if defined(FSL_FEATURE_ENET_HAS_INTERRUPT_COALESCE) && FSL_FEATURE_ENET_HAS_INTERRUPT_COALESCE
0746 uint8_t queue = 0;
0747
0748 if (NULL != config->intCoalesceCfg)
0749 {
0750 uint32_t intMask = (ENET_EIMR_TXB_MASK | ENET_EIMR_RXB_MASK);
0751
0752 #if FSL_FEATURE_ENET_QUEUE > 1
0753 if (FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) > 1)
0754 {
0755 intMask |= ENET_EIMR_TXB2_MASK | ENET_EIMR_RXB2_MASK | ENET_EIMR_TXB1_MASK | ENET_EIMR_RXB1_MASK;
0756 }
0757 #endif
0758
0759
0760 base->EIMR &= ~intMask;
0761
0762
0763 for (queue = 0; queue < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base); queue++)
0764 {
0765 base->TXIC[queue] = ENET_TXIC_ICFT(config->intCoalesceCfg->txCoalesceFrameCount[queue]) |
0766 config->intCoalesceCfg->txCoalesceTimeCount[queue] | ENET_TXIC_ICCS_MASK |
0767 ENET_TXIC_ICEN_MASK;
0768 base->RXIC[queue] = ENET_RXIC_ICFT(config->intCoalesceCfg->rxCoalesceFrameCount[queue]) |
0769 config->intCoalesceCfg->rxCoalesceTimeCount[queue] | ENET_RXIC_ICCS_MASK |
0770 ENET_RXIC_ICEN_MASK;
0771 }
0772 }
0773 #endif
0774 ENET_EnableInterrupts(base, config->interrupt);
0775
0776 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0777
0778 ecr |= ENET_ECR_EN1588_MASK;
0779 #endif
0780
0781 ecr |= ENET_ECR_ETHEREN_MASK | ENET_ECR_DBSWP_MASK;
0782 base->ECR = ecr;
0783 }
0784
0785 static void ENET_SetTxBufferDescriptors(enet_handle_t *handle,
0786 const enet_config_t *config,
0787 const enet_buffer_config_t *bufferConfig)
0788 {
0789 assert(config != NULL);
0790 assert(bufferConfig != NULL);
0791
0792 const enet_buffer_config_t *buffCfg = bufferConfig;
0793 uintptr_t txBuffer = 0;
0794 uint32_t txBuffSizeAlign;
0795 uint16_t txBdNumber;
0796 uint8_t ringNum;
0797 uint16_t count;
0798
0799
0800 for (ringNum = 0; ringNum < config->ringNum; ringNum++)
0801 {
0802 if (buffCfg->txBdStartAddrAlign != NULL)
0803 {
0804 volatile enet_tx_bd_struct_t *curBuffDescrip = buffCfg->txBdStartAddrAlign;
0805 txBuffSizeAlign = buffCfg->txBuffSizeAlign;
0806 txBdNumber = buffCfg->txBdNumber;
0807
0808 if (buffCfg->txBufferAlign != NULL)
0809 {
0810 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0811 txBuffer = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->txBufferAlign, kMEMORY_Local2DMA);
0812 #else
0813 txBuffer = (uintptr_t)buffCfg->txBufferAlign;
0814 #endif
0815 assert((uint64_t)txBuffer + (uint64_t)txBdNumber * txBuffSizeAlign - 1U <= UINT32_MAX);
0816 }
0817
0818 for (count = 0; count < txBdNumber; count++)
0819 {
0820 if (buffCfg->txBufferAlign != NULL)
0821 {
0822
0823 curBuffDescrip->buffer = (uint32_t)(txBuffer + count * txBuffSizeAlign);
0824 }
0825
0826 curBuffDescrip->length = 0;
0827
0828 curBuffDescrip->control = ENET_BUFFDESCRIPTOR_TX_TRANMITCRC_MASK;
0829
0830 if (count == (txBdNumber - 1U))
0831 {
0832 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_WRAP_MASK;
0833 }
0834
0835 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0836
0837 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_INTERRUPT_MASK;
0838 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
0839
0840 curBuffDescrip->controlExtend1 |= (uint16_t)(ENET_BD_FTYPE(ringNum));
0841 #endif
0842 #endif
0843
0844 curBuffDescrip++;
0845 }
0846 }
0847 buffCfg++;
0848 }
0849 }
0850
0851 static void ENET_SetRxBufferDescriptors(enet_handle_t *handle,
0852 const enet_config_t *config,
0853 const enet_buffer_config_t *bufferConfig)
0854 {
0855 assert(config != NULL);
0856 assert(bufferConfig != NULL);
0857
0858 const enet_buffer_config_t *buffCfg = bufferConfig;
0859 uint16_t rxBuffSizeAlign;
0860 uint16_t rxBdNumber;
0861 uintptr_t rxBuffer;
0862 uint8_t ringNum;
0863 uint16_t count;
0864
0865 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0866 uint32_t mask = ((uint32_t)kENET_RxFrameInterrupt | (uint32_t)kENET_RxBufferInterrupt);
0867 #endif
0868
0869
0870 for (ringNum = 0; ringNum < config->ringNum; ringNum++)
0871 {
0872 assert(buffCfg->rxBuffSizeAlign >= ENET_RX_MIN_BUFFERSIZE);
0873 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0874 #if FSL_FEATURE_ENET_QUEUE > 1
0875 if (ringNum == 1U)
0876 {
0877 mask = ((uint32_t)kENET_RxFrame1Interrupt | (uint32_t)kENET_RxBuffer1Interrupt);
0878 }
0879 else if (ringNum == 2U)
0880 {
0881 mask = ((uint32_t)kENET_RxFrame2Interrupt | (uint32_t)kENET_RxBuffer2Interrupt);
0882 }
0883 else
0884 {
0885
0886 }
0887 #endif
0888 #endif
0889
0890 if ((buffCfg->rxBdStartAddrAlign != NULL) && ((buffCfg->rxBufferAlign != NULL) || config->rxBuffAlloc != NULL))
0891 {
0892 volatile enet_rx_bd_struct_t *curBuffDescrip = buffCfg->rxBdStartAddrAlign;
0893 rxBuffSizeAlign = buffCfg->rxBuffSizeAlign;
0894 rxBdNumber = buffCfg->rxBdNumber;
0895 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0896 rxBuffer = MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->rxBufferAlign, kMEMORY_Local2DMA);
0897 #else
0898 rxBuffer = (uintptr_t)buffCfg->rxBufferAlign;
0899 #endif
0900 assert((uint64_t)rxBuffer + (uint64_t)rxBdNumber * rxBuffSizeAlign - 1U <= UINT32_MAX);
0901
0902 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
0903 if (buffCfg->rxMaintainEnable)
0904 {
0905
0906 DCACHE_InvalidateByRange(rxBuffer, ((uint32_t)rxBdNumber * rxBuffSizeAlign));
0907 }
0908 #endif
0909
0910 for (count = 0; count < rxBdNumber; count++)
0911 {
0912
0913 curBuffDescrip->length = 0;
0914 if (config->rxBuffAlloc == NULL)
0915 {
0916 curBuffDescrip->buffer = (uint32_t)(rxBuffer + (uintptr_t)count * rxBuffSizeAlign);
0917
0918 curBuffDescrip->control = ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
0919 }
0920
0921
0922 if (count == (rxBdNumber - 1U))
0923 {
0924 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
0925 }
0926
0927 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
0928 if (0U != (config->interrupt & mask))
0929 {
0930
0931 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_RX_INTERRUPT_MASK;
0932 }
0933 else
0934 {
0935 curBuffDescrip->controlExtend1 = 0;
0936 }
0937 #endif
0938
0939 curBuffDescrip++;
0940 }
0941 }
0942 buffCfg++;
0943 }
0944 }
0945
0946
0947
0948
0949 static status_t ENET_RxBufferAllocAll(ENET_Type *base, enet_handle_t *handle)
0950 {
0951 assert(handle->rxBuffAlloc != NULL);
0952
0953 volatile enet_rx_bd_struct_t *curBuffDescrip;
0954 enet_rx_bd_ring_t *rxBdRing;
0955 uintptr_t buffer;
0956 uint16_t ringId;
0957 uint16_t index;
0958
0959
0960 for (ringId = 0; ringId < handle->ringNum; ringId++)
0961 {
0962 assert(handle->rxBdRing[ringId].rxBdBase != NULL);
0963
0964 rxBdRing = &handle->rxBdRing[ringId];
0965 curBuffDescrip = rxBdRing->rxBdBase;
0966 index = 0;
0967
0968 do
0969 {
0970 buffer = (uintptr_t)(uint8_t *)handle->rxBuffAlloc(base, handle->userData, ringId);
0971 if (buffer == 0U)
0972 {
0973 ENET_RxBufferFreeAll(base, handle);
0974 return kStatus_ENET_InitMemoryFail;
0975 }
0976 assert((uint64_t)buffer + handle->rxBuffSizeAlign[ringId] - 1U <= UINT32_MAX);
0977
0978 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
0979 if (handle->rxMaintainEnable[ringId])
0980 {
0981
0982 DCACHE_InvalidateByRange(buffer, handle->rxBuffSizeAlign[ringId]);
0983 }
0984 #endif
0985 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
0986 buffer = MEMORY_ConvertMemoryMapAddress(buffer, kMEMORY_Local2DMA);
0987 #endif
0988 curBuffDescrip->buffer = (uint32_t)buffer;
0989 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
0990
0991
0992 index = ENET_IncreaseIndex(index, rxBdRing->rxRingLen);
0993 curBuffDescrip = rxBdRing->rxBdBase + index;
0994 } while (index != 0U);
0995 }
0996 return kStatus_Success;
0997 }
0998
0999
1000
1001
1002 static void ENET_RxBufferFreeAll(ENET_Type *base, enet_handle_t *handle)
1003 {
1004 assert(handle->rxBuffFree != NULL);
1005
1006 uint16_t index;
1007 enet_rx_bd_ring_t *rxBdRing;
1008 volatile enet_rx_bd_struct_t *curBuffDescrip;
1009 uintptr_t buffer;
1010 uint16_t ringId;
1011
1012 for (ringId = 0; ringId < handle->ringNum; ringId++)
1013 {
1014 assert(handle->rxBdRing[ringId].rxBdBase != NULL);
1015
1016 rxBdRing = &handle->rxBdRing[ringId];
1017 curBuffDescrip = rxBdRing->rxBdBase;
1018 index = 0;
1019
1020
1021 do
1022 {
1023 if (curBuffDescrip->buffer != 0U)
1024 {
1025 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1026 buffer = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
1027 #else
1028 buffer = curBuffDescrip->buffer;
1029 #endif
1030 handle->rxBuffFree(base, (void *)(uint8_t *)buffer, handle->userData, ringId);
1031 curBuffDescrip->buffer = 0;
1032
1033 curBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
1034 }
1035
1036
1037 index = ENET_IncreaseIndex(index, rxBdRing->rxRingLen);
1038 curBuffDescrip = rxBdRing->rxBdBase + index;
1039 } while (index != 0U);
1040 }
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 static inline void ENET_ActiveReadRing(ENET_Type *base, uint8_t ringId)
1055 {
1056 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
1057
1058
1059 __DSB();
1060
1061
1062 switch (ringId)
1063 {
1064 case kENET_Ring0:
1065 base->RDAR = ENET_RDAR_RDAR_MASK;
1066 break;
1067 #if FSL_FEATURE_ENET_QUEUE > 1
1068 case kENET_Ring1:
1069 base->RDAR1 = ENET_RDAR1_RDAR_MASK;
1070 break;
1071 case kENET_Ring2:
1072 base->RDAR2 = ENET_RDAR2_RDAR_MASK;
1073 break;
1074 #endif
1075 default:
1076 assert(false);
1077 break;
1078 }
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static void ENET_ActiveSendRing(ENET_Type *base, uint8_t ringId)
1092 {
1093 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
1094
1095 volatile uint32_t *txDesActive = NULL;
1096
1097
1098 __DSB();
1099
1100 switch (ringId)
1101 {
1102 case kENET_Ring0:
1103 txDesActive = &(base->TDAR);
1104 break;
1105 #if FSL_FEATURE_ENET_QUEUE > 1
1106 case kENET_Ring1:
1107 txDesActive = &(base->TDAR1);
1108 break;
1109 case kENET_Ring2:
1110 txDesActive = &(base->TDAR2);
1111 break;
1112 #endif
1113 default:
1114 txDesActive = &(base->TDAR);
1115 break;
1116 }
1117
1118 #if defined(FSL_FEATURE_ENET_HAS_ERRATA_007885) && FSL_FEATURE_ENET_HAS_ERRATA_007885
1119
1120
1121
1122
1123
1124 for (uint8_t i = 0; i < 4U; i++)
1125 {
1126 if (*txDesActive == 0U)
1127 {
1128 break;
1129 }
1130 }
1131 #endif
1132
1133
1134 *txDesActive = 0;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 void ENET_SetMII(ENET_Type *base, enet_mii_speed_t speed, enet_mii_duplex_t duplex)
1147 {
1148 uint32_t rcr = base->RCR;
1149 uint32_t tcr = base->TCR;
1150
1151 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
1152 if (FSL_FEATURE_ENET_INSTANCE_HAS_AVBn(base) == 1)
1153 {
1154 uint32_t ecr = base->ECR;
1155
1156 if (kENET_MiiSpeed1000M == speed)
1157 {
1158 assert(duplex == kENET_MiiFullDuplex);
1159 ecr |= ENET_ECR_SPEED_MASK;
1160 }
1161 else
1162 {
1163 ecr &= ~ENET_ECR_SPEED_MASK;
1164 }
1165
1166 base->ECR = ecr;
1167 }
1168 #endif
1169
1170
1171 if (kENET_MiiSpeed10M == speed)
1172 {
1173 rcr |= ENET_RCR_RMII_10T_MASK;
1174 }
1175 else
1176 {
1177 rcr &= ~ENET_RCR_RMII_10T_MASK;
1178 }
1179
1180 if (duplex == kENET_MiiHalfDuplex)
1181 {
1182 rcr |= ENET_RCR_DRT_MASK;
1183 tcr &= ~ENET_TCR_FDEN_MASK;
1184 }
1185 else
1186 {
1187 rcr &= ~ENET_RCR_DRT_MASK;
1188 tcr |= ENET_TCR_FDEN_MASK;
1189 }
1190
1191 base->RCR = rcr;
1192 base->TCR = tcr;
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202 void ENET_SetMacAddr(ENET_Type *base, uint8_t *macAddr)
1203 {
1204 uint32_t address;
1205
1206
1207 address = (uint32_t)(((uint32_t)macAddr[0] << 24U) | ((uint32_t)macAddr[1] << 16U) | ((uint32_t)macAddr[2] << 8U) |
1208 (uint32_t)macAddr[3]);
1209 base->PALR = address;
1210
1211 address = (uint32_t)(((uint32_t)macAddr[4] << 8U) | ((uint32_t)macAddr[5]));
1212 base->PAUR = address << ENET_PAUR_PADDR2_SHIFT;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222 void ENET_GetMacAddr(ENET_Type *base, uint8_t *macAddr)
1223 {
1224 assert(macAddr != NULL);
1225
1226 uint32_t address;
1227
1228
1229 address = base->PALR;
1230 macAddr[0] = 0xFFU & (uint8_t)(address >> 24U);
1231 macAddr[1] = 0xFFU & (uint8_t)(address >> 16U);
1232 macAddr[2] = 0xFFU & (uint8_t)(address >> 8U);
1233 macAddr[3] = 0xFFU & (uint8_t)address;
1234
1235
1236 address = (base->PAUR & ENET_PAUR_PADDR2_MASK) >> ENET_PAUR_PADDR2_SHIFT;
1237 macAddr[4] = 0xFFU & (uint8_t)(address >> 8U);
1238 macAddr[5] = 0xFFU & (uint8_t)address;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 void ENET_SetSMI(ENET_Type *base, uint32_t srcClock_Hz, bool isPreambleDisabled)
1251 {
1252
1253 assert((srcClock_Hz != 0U) && (srcClock_Hz <= 320000000U));
1254
1255 uint32_t clkCycle = 0;
1256 uint32_t speed = 0;
1257 uint32_t mscr = 0;
1258
1259
1260
1261 speed = (srcClock_Hz + 2U * ENET_MDC_FREQUENCY - 1U) / (2U * ENET_MDC_FREQUENCY) - 1U;
1262
1263 clkCycle = (10U + ENET_NANOSECOND_ONE_SECOND / srcClock_Hz - 1U) / (ENET_NANOSECOND_ONE_SECOND / srcClock_Hz) - 1U;
1264
1265 mscr =
1266 ENET_MSCR_MII_SPEED(speed) | ENET_MSCR_HOLDTIME(clkCycle) | (isPreambleDisabled ? ENET_MSCR_DIS_PRE_MASK : 0U);
1267 base->MSCR = mscr;
1268 }
1269
1270 static status_t ENET_MDIOWaitTransferOver(ENET_Type *base)
1271 {
1272 status_t result = kStatus_Success;
1273 #ifdef ENET_MDIO_TIMEOUT_COUNT
1274 uint32_t counter;
1275 #endif
1276
1277
1278 #ifdef ENET_MDIO_TIMEOUT_COUNT
1279 for (counter = ENET_MDIO_TIMEOUT_COUNT; counter > 0U; counter--)
1280 {
1281 if (ENET_EIR_MII_MASK == (ENET_GetInterruptStatus(base) & ENET_EIR_MII_MASK))
1282 {
1283 break;
1284 }
1285 }
1286
1287 if (0U == counter)
1288 {
1289 result = kStatus_Timeout;
1290 }
1291 #else
1292 while (ENET_EIR_MII_MASK != (ENET_GetInterruptStatus(base) & ENET_EIR_MII_MASK))
1293 {
1294 }
1295 #endif
1296 return result;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 status_t ENET_MDIOWrite(ENET_Type *base, uint8_t phyAddr, uint8_t regAddr, uint16_t data)
1310 {
1311 status_t result = kStatus_Success;
1312
1313
1314 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1315
1316
1317 ENET_StartSMIWrite(base, phyAddr, regAddr, kENET_MiiWriteValidFrame, data);
1318
1319 result = ENET_MDIOWaitTransferOver(base);
1320 if (result != kStatus_Success)
1321 {
1322 return result;
1323 }
1324
1325
1326 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1327
1328 return result;
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 status_t ENET_MDIORead(ENET_Type *base, uint8_t phyAddr, uint8_t regAddr, uint16_t *pData)
1342 {
1343 assert(pData != NULL);
1344
1345 status_t result = kStatus_Success;
1346
1347
1348 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1349
1350
1351 ENET_StartSMIRead(base, phyAddr, regAddr, kENET_MiiReadValidFrame);
1352
1353 result = ENET_MDIOWaitTransferOver(base);
1354 if (result != kStatus_Success)
1355 {
1356 return result;
1357 }
1358
1359
1360 *pData = (uint16_t)ENET_ReadSMIData(base);
1361
1362
1363 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1364
1365 return result;
1366 }
1367
1368 #if defined(FSL_FEATURE_ENET_HAS_EXTEND_MDIO) && FSL_FEATURE_ENET_HAS_EXTEND_MDIO
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 status_t ENET_MDIOC45Write(ENET_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr, uint16_t data)
1381 {
1382 status_t result = kStatus_Success;
1383
1384
1385 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1386 ENET_StartExtC45SMIWriteReg(base, portAddr, devAddr, regAddr);
1387 result = ENET_MDIOWaitTransferOver(base);
1388 if (result != kStatus_Success)
1389 {
1390 return result;
1391 }
1392 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1393
1394
1395 ENET_StartExtC45SMIWriteData(base, portAddr, devAddr, data);
1396 result = ENET_MDIOWaitTransferOver(base);
1397 if (result != kStatus_Success)
1398 {
1399 return result;
1400 }
1401 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1402
1403 return result;
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 status_t ENET_MDIOC45Read(ENET_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr, uint16_t *pData)
1417 {
1418 assert(pData != NULL);
1419
1420 status_t result = kStatus_Success;
1421
1422
1423 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1424 ENET_StartExtC45SMIWriteReg(base, portAddr, devAddr, regAddr);
1425 result = ENET_MDIOWaitTransferOver(base);
1426 if (result != kStatus_Success)
1427 {
1428 return result;
1429 }
1430
1431
1432 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1433 ENET_StartExtC45SMIReadData(base, portAddr, devAddr);
1434 result = ENET_MDIOWaitTransferOver(base);
1435 if (result != kStatus_Success)
1436 {
1437 return result;
1438 }
1439 ENET_ClearInterruptStatus(base, ENET_EIR_MII_MASK);
1440 *pData = (uint16_t)ENET_ReadSMIData(base);
1441 return result;
1442 }
1443 #endif
1444
1445 static uint16_t ENET_IncreaseIndex(uint16_t index, uint16_t max)
1446 {
1447 assert(index < max);
1448
1449
1450 index++;
1451 if (index >= max)
1452 {
1453 index = 0;
1454 }
1455 return index;
1456 }
1457
1458 static inline bool ENET_TxDirtyRingAvailable(enet_tx_dirty_ring_t *txDirtyRing)
1459 {
1460 return !txDirtyRing->isFull;
1461 }
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 void ENET_GetRxErrBeforeReadFrame(enet_handle_t *handle, enet_data_error_stats_t *eErrorStatic, uint8_t ringId)
1483 {
1484 assert(handle != NULL);
1485 assert(eErrorStatic != NULL);
1486 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
1487
1488 uint16_t control = 0;
1489 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
1490 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1491 volatile enet_rx_bd_struct_t *cmpBuffDescrip = curBuffDescrip;
1492
1493 do
1494 {
1495
1496 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
1497 {
1498 control = curBuffDescrip->control;
1499 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_TRUNC_MASK))
1500 {
1501
1502 eErrorStatic->statsRxTruncateErr++;
1503 }
1504 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_OVERRUN_MASK))
1505 {
1506
1507 eErrorStatic->statsRxOverRunErr++;
1508 }
1509 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_LENVLIOLATE_MASK))
1510 {
1511
1512 eErrorStatic->statsRxLenGreaterErr++;
1513 }
1514 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_NOOCTET_MASK))
1515 {
1516
1517 eErrorStatic->statsRxAlignErr++;
1518 }
1519 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_CRC_MASK))
1520 {
1521
1522 eErrorStatic->statsRxFcsErr++;
1523 }
1524 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
1525 uint16_t controlExt = curBuffDescrip->controlExtend1;
1526 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_RX_MACERR_MASK))
1527 {
1528
1529 eErrorStatic->statsRxMacErr++;
1530 }
1531 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_RX_PHYERR_MASK))
1532 {
1533
1534 eErrorStatic->statsRxPhyErr++;
1535 }
1536 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_RX_COLLISION_MASK))
1537 {
1538
1539 eErrorStatic->statsRxCollisionErr++;
1540 }
1541 #endif
1542
1543 break;
1544 }
1545
1546
1547 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK))
1548 {
1549 curBuffDescrip = rxBdRing->rxBdBase;
1550 }
1551 else
1552 {
1553 curBuffDescrip++;
1554 }
1555
1556 } while (curBuffDescrip != cmpBuffDescrip);
1557 }
1558
1559
1560
1561
1562
1563
1564
1565 void ENET_GetStatistics(ENET_Type *base, enet_transfer_stats_t *statistics)
1566 {
1567
1568 statistics->statsRxFrameCount = base->RMON_R_PACKETS;
1569 statistics->statsRxFrameOk = base->IEEE_R_FRAME_OK;
1570 statistics->statsRxCrcErr = base->IEEE_R_CRC;
1571 statistics->statsRxAlignErr = base->IEEE_R_ALIGN;
1572 statistics->statsRxDropInvalidSFD = base->IEEE_R_DROP;
1573 statistics->statsRxFifoOverflowErr = base->IEEE_R_MACERR;
1574
1575
1576 statistics->statsTxFrameCount = base->RMON_T_PACKETS;
1577 statistics->statsTxFrameOk = base->IEEE_T_FRAME_OK;
1578 statistics->statsTxCrcAlignErr = base->RMON_T_CRC_ALIGN;
1579 statistics->statsTxFifoUnderRunErr = base->IEEE_T_MACERR;
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 status_t ENET_GetRxFrameSize(enet_handle_t *handle, uint32_t *length, uint8_t ringId)
1600 {
1601 assert(handle != NULL);
1602 assert(length != NULL);
1603 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
1604
1605
1606 *length = 0;
1607
1608 uint16_t validLastMask = ENET_BUFFDESCRIPTOR_RX_LAST_MASK | ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
1609 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
1610 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1611 uint16_t index = rxBdRing->rxGenIdx;
1612 bool isReturn = false;
1613 status_t result = kStatus_Success;
1614
1615
1616
1617
1618 if (0U != (curBuffDescrip->control & (ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK | ENET_BUFFDESCRIPTOR_RX_SOFTOWNER1_MASK)))
1619 {
1620 isReturn = true;
1621 result = kStatus_ENET_RxFrameEmpty;
1622 }
1623 else
1624 {
1625 do
1626 {
1627
1628 if (curBuffDescrip->length == 0U)
1629 {
1630 isReturn = true;
1631 result = kStatus_ENET_RxFrameError;
1632 break;
1633 }
1634
1635
1636 if ((curBuffDescrip->control & validLastMask) == ENET_BUFFDESCRIPTOR_RX_LAST_MASK)
1637 {
1638 isReturn = true;
1639
1640 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_ERR_MASK))
1641 {
1642 result = kStatus_ENET_RxFrameError;
1643 break;
1644 }
1645 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
1646 if (0U != (curBuffDescrip->controlExtend1 & ENET_BUFFDESCRIPTOR_RX_EXT_ERR_MASK))
1647 {
1648 result = kStatus_ENET_RxFrameError;
1649 break;
1650 }
1651 #endif
1652
1653
1654 *length = curBuffDescrip->length;
1655 break;
1656 }
1657
1658 index = ENET_IncreaseIndex(index, rxBdRing->rxRingLen);
1659 curBuffDescrip = rxBdRing->rxBdBase + index;
1660 } while (index != rxBdRing->rxGenIdx);
1661 }
1662
1663 if (isReturn == false)
1664 {
1665
1666 result = kStatus_ENET_RxFrameEmpty;
1667 }
1668
1669 return result;
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 status_t ENET_ReadFrame(
1717 ENET_Type *base, enet_handle_t *handle, uint8_t *data, uint32_t length, uint8_t ringId, uint32_t *ts)
1718 {
1719 assert(handle != NULL);
1720 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
1721 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
1722
1723 uint32_t len = 0;
1724 uint32_t offset = 0;
1725 uint16_t control;
1726 bool isLastBuff = false;
1727 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
1728 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1729 uint16_t index = rxBdRing->rxGenIdx;
1730 status_t result = kStatus_Success;
1731 uintptr_t address;
1732 uintptr_t dest;
1733
1734
1735 if (data == NULL)
1736 {
1737 do
1738 {
1739
1740 control = curBuffDescrip->control;
1741
1742
1743 ENET_UpdateReadBuffers(base, handle, ringId);
1744
1745
1746 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
1747 {
1748 break;
1749 }
1750 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1751 } while (index != rxBdRing->rxGenIdx);
1752 }
1753 else
1754 {
1755 while (!isLastBuff)
1756 {
1757
1758 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1759 address = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
1760 #else
1761 address = curBuffDescrip->buffer;
1762 #endif
1763 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
1764 if (handle->rxMaintainEnable[ringId])
1765 {
1766
1767 DCACHE_InvalidateByRange(address, handle->rxBuffSizeAlign[ringId]);
1768 }
1769 #endif
1770
1771 dest = (uintptr_t)data + offset;
1772
1773 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
1774 {
1775
1776 isLastBuff = true;
1777 if (length == curBuffDescrip->length)
1778 {
1779
1780 len = curBuffDescrip->length - offset;
1781 (void)memcpy((void *)(uint8_t *)dest, (void *)(uint8_t *)address, len);
1782 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
1783
1784 if (ts != NULL)
1785 {
1786 *ts = curBuffDescrip->timestamp;
1787 }
1788 #endif
1789
1790
1791 ENET_UpdateReadBuffers(base, handle, ringId);
1792 break;
1793 }
1794 else
1795 {
1796
1797 ENET_UpdateReadBuffers(base, handle, ringId);
1798 }
1799 }
1800 else
1801 {
1802
1803 isLastBuff = false;
1804
1805 if (offset >= length)
1806 {
1807 result = kStatus_ENET_RxFrameFail;
1808 break;
1809 }
1810 (void)memcpy((void *)(uint8_t *)dest, (void *)(uint8_t *)address, handle->rxBuffSizeAlign[ringId]);
1811 offset += handle->rxBuffSizeAlign[ringId];
1812
1813
1814 ENET_UpdateReadBuffers(base, handle, ringId);
1815 }
1816
1817
1818 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1819 }
1820 }
1821
1822 return result;
1823 }
1824
1825 static void ENET_UpdateReadBuffers(ENET_Type *base, enet_handle_t *handle, uint8_t ringId)
1826 {
1827 assert(handle != NULL);
1828 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
1829 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
1830
1831 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
1832 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
1833
1834
1835 curBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
1836
1837 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
1838
1839
1840 rxBdRing->rxGenIdx = ENET_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
1841
1842 ENET_ActiveReadRing(base, ringId);
1843 }
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866 status_t ENET_SendFrame(ENET_Type *base,
1867 enet_handle_t *handle,
1868 const uint8_t *data,
1869 uint32_t length,
1870 uint8_t ringId,
1871 bool tsFlag,
1872 void *context)
1873 {
1874 assert(handle != NULL);
1875 assert(data != NULL);
1876 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
1877 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
1878
1879 volatile enet_tx_bd_struct_t *curBuffDescrip;
1880 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
1881 enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
1882 enet_frame_info_t *txDirty = NULL;
1883 uint32_t len = 0;
1884 uint32_t sizeleft = 0;
1885 uintptr_t address;
1886 status_t result = kStatus_Success;
1887 uintptr_t src;
1888 uint32_t configVal;
1889 bool isReturn = false;
1890 uint32_t primask;
1891
1892
1893 if (length > ENET_FRAME_TX_LEN_LIMITATION(base))
1894 {
1895 result = kStatus_ENET_TxFrameOverLen;
1896 }
1897 else
1898 {
1899
1900 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
1901 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
1902 {
1903 result = kStatus_ENET_TxFrameBusy;
1904 }
1905
1906 else if ((handle->txReclaimEnable[ringId]) && !ENET_TxDirtyRingAvailable(txDirtyRing))
1907 {
1908 result = kStatus_ENET_TxFrameBusy;
1909 }
1910 else
1911 {
1912
1913 if (handle->txBuffSizeAlign[ringId] >= length)
1914 {
1915
1916 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1917 address = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
1918 #else
1919 address = curBuffDescrip->buffer;
1920 #endif
1921 (void)memcpy((void *)(uint8_t *)address, (const void *)data, length);
1922 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
1923 if (handle->txMaintainEnable[ringId])
1924 {
1925 DCACHE_CleanByRange(address, length);
1926 }
1927 #endif
1928
1929 curBuffDescrip->length = (uint16_t)length;
1930 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
1931
1932 if (tsFlag)
1933 {
1934 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
1935 }
1936 else
1937 {
1938 curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
1939 }
1940
1941 #endif
1942 curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
1943
1944
1945 txBdRing->txGenIdx = ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
1946
1947
1948 if (handle->txReclaimEnable[ringId])
1949 {
1950 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
1951 txDirty->context = context;
1952 txDirtyRing->txGenIdx = ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
1953 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
1954 {
1955 txDirtyRing->isFull = true;
1956 }
1957 primask = DisableGlobalIRQ();
1958 txBdRing->txDescUsed++;
1959 EnableGlobalIRQ(primask);
1960 }
1961
1962
1963 ENET_ActiveSendRing(base, ringId);
1964 }
1965 else
1966 {
1967
1968 do
1969 {
1970 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
1971
1972 if (tsFlag)
1973 {
1974 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
1975 }
1976 else
1977 {
1978 curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
1979 }
1980 #endif
1981
1982
1983 sizeleft = length - len;
1984 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1985 address = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
1986 #else
1987 address = curBuffDescrip->buffer;
1988 #endif
1989 src = (uintptr_t)data + len;
1990
1991
1992 txBdRing->txGenIdx = ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
1993
1994 if (sizeleft > handle->txBuffSizeAlign[ringId])
1995 {
1996
1997 (void)memcpy((void *)(uint8_t *)address, (void *)(uint8_t *)src,
1998 handle->txBuffSizeAlign[ringId]);
1999 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2000 if (handle->txMaintainEnable[ringId])
2001 {
2002
2003 DCACHE_CleanByRange(address, handle->txBuffSizeAlign[ringId]);
2004 }
2005 #endif
2006
2007 curBuffDescrip->length = handle->txBuffSizeAlign[ringId];
2008 len += handle->txBuffSizeAlign[ringId];
2009
2010 configVal = (uint32_t)curBuffDescrip->control;
2011 configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
2012 configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
2013 curBuffDescrip->control = (uint16_t)configVal;
2014
2015 if (handle->txReclaimEnable[ringId])
2016 {
2017 primask = DisableGlobalIRQ();
2018 txBdRing->txDescUsed++;
2019 EnableGlobalIRQ(primask);
2020 }
2021
2022
2023 ENET_ActiveSendRing(base, ringId);
2024 }
2025 else
2026 {
2027 (void)memcpy((void *)(uint8_t *)address, (void *)(uint8_t *)src, sizeleft);
2028 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2029 if (handle->txMaintainEnable[ringId])
2030 {
2031
2032 DCACHE_CleanByRange(address, sizeleft);
2033 }
2034 #endif
2035 curBuffDescrip->length = (uint16_t)sizeleft;
2036
2037 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
2038
2039 if (handle->txReclaimEnable[ringId])
2040 {
2041
2042 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
2043 txDirty->context = context;
2044 txDirtyRing->txGenIdx = ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2045 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2046 {
2047 txDirtyRing->isFull = true;
2048 }
2049 primask = DisableGlobalIRQ();
2050 txBdRing->txDescUsed++;
2051 EnableGlobalIRQ(primask);
2052 }
2053
2054
2055 ENET_ActiveSendRing(base, ringId);
2056 isReturn = true;
2057 break;
2058 }
2059
2060 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
2061 } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
2062
2063 if (isReturn == false)
2064 {
2065 result = kStatus_ENET_TxFrameBusy;
2066 }
2067 }
2068 }
2069 }
2070 return result;
2071 }
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 status_t ENET_SetTxReclaim(enet_handle_t *handle, bool isEnable, uint8_t ringId)
2085 {
2086 assert(handle != NULL);
2087 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2088
2089 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
2090 enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
2091
2092 status_t result = kStatus_Success;
2093
2094
2095 if ((txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx) && ENET_TxDirtyRingAvailable(txDirtyRing))
2096 {
2097 if (isEnable)
2098 {
2099 handle->txReclaimEnable[ringId] = true;
2100 txBdRing->txConsumIdx = txBdRing->txGenIdx;
2101 }
2102 else
2103 {
2104 handle->txReclaimEnable[ringId] = false;
2105 }
2106 }
2107 else
2108 {
2109 result = kStatus_Fail;
2110 }
2111 return result;
2112 }
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 void ENET_ReclaimTxDescriptor(ENET_Type *base, enet_handle_t *handle, uint8_t ringId)
2126 {
2127 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
2128 assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
2129
2130 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
2131 volatile enet_tx_bd_struct_t *curBuffDescrip = txBdRing->txBdBase + txBdRing->txConsumIdx;
2132 enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
2133 enet_frame_info_t *txDirty = NULL;
2134 uint32_t primask;
2135
2136
2137 while ((0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)) && (txBdRing->txDescUsed > 0U))
2138 {
2139 if ((curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_LAST_MASK) != 0U)
2140 {
2141 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txConsumIdx;
2142 txDirtyRing->txConsumIdx = ENET_IncreaseIndex(txDirtyRing->txConsumIdx, txDirtyRing->txRingLen);
2143 txDirtyRing->isFull = false;
2144
2145 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2146 txDirty->isTsAvail = false;
2147 if ((curBuffDescrip->controlExtend1 & ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK) != 0U)
2148 {
2149 enet_ptp_time_t *ts = &txDirty->timeStamp;
2150
2151 txDirty->isTsAvail = true;
2152 ts->second = handle->msTimerSecond;
2153 ts->nanosecond = curBuffDescrip->timestamp;
2154 }
2155 #endif
2156
2157
2158 if (handle->callback != NULL)
2159 {
2160 #if FSL_FEATURE_ENET_QUEUE > 1
2161 handle->callback(base, handle, ringId, kENET_TxEvent, txDirty, handle->userData);
2162 #else
2163 handle->callback(base, handle, kENET_TxEvent, txDirty, handle->userData);
2164 #endif
2165 }
2166 }
2167
2168 primask = DisableGlobalIRQ();
2169 txBdRing->txDescUsed--;
2170 EnableGlobalIRQ(primask);
2171
2172
2173 txBdRing->txConsumIdx = ENET_IncreaseIndex(txBdRing->txConsumIdx, txBdRing->txRingLen);
2174 curBuffDescrip = txBdRing->txBdBase + txBdRing->txConsumIdx;
2175 }
2176 }
2177
2178
2179
2180
2181 status_t ENET_GetRxBuffer(ENET_Type *base,
2182 enet_handle_t *handle,
2183 void **buffer,
2184 uint32_t *length,
2185 uint8_t ringId,
2186 bool *isLastBuff,
2187 uint32_t *ts)
2188 {
2189 assert(handle != NULL);
2190 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2191 assert(handle->rxBdRing[ringId].rxBdBase != NULL);
2192 assert(handle->rxBuffAlloc == NULL);
2193
2194 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
2195 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2196 uintptr_t address;
2197
2198
2199
2200 if ((curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_SOFTOWNER1_MASK) == 0U)
2201 {
2202 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_SOFTOWNER1_MASK;
2203 }
2204 else
2205 {
2206 return kStatus_ENET_RxFrameFail;
2207 }
2208
2209
2210 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2211 address = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
2212 #else
2213 address = curBuffDescrip->buffer;
2214 #endif
2215 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2216 if (handle->rxMaintainEnable[ringId])
2217 {
2218
2219 DCACHE_InvalidateByRange(address, handle->rxBuffSizeAlign[ringId]);
2220 }
2221 #endif
2222
2223 *buffer = (void *)(uint8_t *)address;
2224 *length = curBuffDescrip->length;
2225
2226
2227 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
2228 {
2229
2230 *isLastBuff = true;
2231 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2232 if (ts != NULL)
2233 {
2234 *ts = curBuffDescrip->timestamp;
2235 }
2236 #endif
2237 }
2238 else
2239 {
2240 *isLastBuff = false;
2241 }
2242
2243
2244 rxBdRing->rxGenIdx = ENET_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2245
2246 return kStatus_Success;
2247 }
2248
2249
2250
2251
2252 void ENET_ReleaseRxBuffer(ENET_Type *base, enet_handle_t *handle, void *buffer, uint8_t ringId)
2253 {
2254 assert(handle != NULL);
2255 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2256
2257 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
2258 enet_rx_bd_struct_t *ownBuffDescrip = (enet_rx_bd_struct_t *)rxBdRing->rxBdBase;
2259 enet_rx_bd_struct_t *blockBuffDescrip = (enet_rx_bd_struct_t *)rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2260 enet_rx_bd_struct_t tempBuffDescrip;
2261 uint16_t index = rxBdRing->rxGenIdx;
2262 bool isReleaseBd = false;
2263
2264 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2265 buffer = (void *)(uint32_t *)MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)buffer, kMEMORY_Local2DMA);
2266 #endif
2267
2268 do
2269 {
2270
2271 if (buffer == (void *)(uint8_t *)ownBuffDescrip->buffer)
2272 {
2273 if (0U != (ownBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_SOFTOWNER1_MASK))
2274 {
2275 isReleaseBd = true;
2276 break;
2277 }
2278 }
2279
2280 if (0U != (ownBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK))
2281 {
2282 break;
2283 }
2284 ownBuffDescrip++;
2285 } while (true);
2286
2287 if (isReleaseBd)
2288 {
2289
2290
2291 do
2292 {
2293 if (0U != (blockBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_SOFTOWNER1_MASK))
2294 {
2295 break;
2296 }
2297 if (0U != (blockBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK))
2298 {
2299 blockBuffDescrip = (enet_rx_bd_struct_t *)(uint32_t)rxBdRing->rxBdBase;
2300 }
2301 else
2302 {
2303 blockBuffDescrip++;
2304 }
2305 index = ENET_IncreaseIndex(index, rxBdRing->rxRingLen);
2306 } while (index != rxBdRing->rxGenIdx);
2307
2308
2309
2310 if (blockBuffDescrip != ownBuffDescrip)
2311 {
2312
2313 tempBuffDescrip = *ownBuffDescrip;
2314 *ownBuffDescrip = *blockBuffDescrip;
2315 *blockBuffDescrip = tempBuffDescrip;
2316
2317
2318 if (0U != (ownBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK))
2319 {
2320 ownBuffDescrip->control &= (uint16_t)(~ENET_BUFFDESCRIPTOR_RX_WRAP_MASK);
2321 blockBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2322 }
2323 else if (0U != (blockBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK))
2324 {
2325 blockBuffDescrip->control &= (uint16_t)(~ENET_BUFFDESCRIPTOR_RX_WRAP_MASK);
2326 ownBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2327 }
2328 else
2329 {
2330
2331 }
2332
2333
2334 blockBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2335
2336 blockBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
2337 }
2338 else
2339 {
2340
2341 ownBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2342
2343 ownBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
2344 }
2345
2346 ENET_ActiveReadRing(base, ringId);
2347 }
2348 }
2349
2350 static inline status_t ENET_GetRxFrameErr(enet_rx_bd_struct_t *rxDesc, enet_rx_frame_error_t *rxFrameError)
2351 {
2352 assert(rxDesc != NULL);
2353 assert(rxFrameError != NULL);
2354
2355 status_t result = kStatus_Success;
2356 uint16_t control = rxDesc->control;
2357 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2358 uint16_t controlExtend1 = rxDesc->controlExtend1;
2359 #endif
2360
2361 union _frame_error
2362 {
2363 uint32_t data;
2364 enet_rx_frame_error_t frameError;
2365 };
2366 union _frame_error error;
2367
2368 (void)memset((void *)&error.frameError, 0, sizeof(enet_rx_frame_error_t));
2369
2370
2371 if (0U != (control & ENET_BUFFDESCRIPTOR_RX_ERR_MASK))
2372 {
2373 result = kStatus_ENET_RxFrameError;
2374 }
2375 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2376 if (0U != (controlExtend1 & ENET_BUFFDESCRIPTOR_RX_EXT_ERR_MASK))
2377 {
2378 result = kStatus_ENET_RxFrameError;
2379 }
2380 #endif
2381
2382 if (result != kStatus_Success)
2383 {
2384 error.data = control;
2385 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2386 error.data |= ((uint32_t)controlExtend1 << 16U);
2387 #endif
2388 }
2389
2390 *rxFrameError = error.frameError;
2391
2392 return result;
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 status_t ENET_GetRxFrame(ENET_Type *base, enet_handle_t *handle, enet_rx_frame_struct_t *rxFrame, uint8_t ringId)
2417 {
2418 assert(handle != NULL);
2419 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2420 assert(handle->rxBdRing[ringId].rxBdBase != NULL);
2421 assert(rxFrame != NULL);
2422 assert(rxFrame->rxBuffArray != NULL);
2423
2424 status_t result = kStatus_Success;
2425 enet_rx_bd_ring_t *rxBdRing = &handle->rxBdRing[ringId];
2426 volatile enet_rx_bd_struct_t *curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2427 bool isLastBuff = false;
2428 uintptr_t newBuff = 0;
2429 uint16_t buffLen = 0;
2430 enet_buffer_struct_t *rxBuffer;
2431 uintptr_t address;
2432 uintptr_t buffer;
2433 uint16_t index;
2434
2435
2436 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK))
2437 {
2438 result = kStatus_ENET_RxFrameEmpty;
2439 }
2440 else
2441 {
2442 index = rxBdRing->rxGenIdx;
2443 do
2444 {
2445
2446 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
2447 {
2448
2449 result = ENET_GetRxFrameErr((enet_rx_bd_struct_t *)(uint32_t)curBuffDescrip, &rxFrame->rxFrameError);
2450 break;
2451 }
2452
2453
2454 index = ENET_IncreaseIndex(index, rxBdRing->rxRingLen);
2455 curBuffDescrip = rxBdRing->rxBdBase + index;
2456 if (index == rxBdRing->rxGenIdx)
2457 {
2458 result = kStatus_ENET_RxFrameEmpty;
2459 break;
2460 }
2461 } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK));
2462 }
2463
2464
2465 if (result == kStatus_ENET_RxFrameError)
2466 {
2467 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2468 do
2469 {
2470
2471 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
2472 {
2473 isLastBuff = true;
2474 }
2475
2476
2477 curBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2478
2479 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
2480
2481
2482 rxBdRing->rxGenIdx = ENET_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2483 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2484 } while (!isLastBuff);
2485
2486 ENET_ActiveReadRing(base, ringId);
2487
2488 return result;
2489 }
2490 else if (result != kStatus_Success)
2491 {
2492 return result;
2493 }
2494 else
2495 {
2496
2497 }
2498
2499
2500 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2501 index = 0;
2502 do
2503 {
2504 newBuff = (uintptr_t)(uint8_t *)handle->rxBuffAlloc(base, handle->userData, ringId);
2505 if (newBuff != 0U)
2506 {
2507 assert((uint64_t)newBuff + handle->rxBuffSizeAlign[ringId] - 1U <= UINT32_MAX);
2508 rxBuffer = &rxFrame->rxBuffArray[index];
2509
2510 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2511 address = MEMORY_ConvertMemoryMapAddress(curBuffDescrip->buffer, kMEMORY_DMA2Local);
2512 #else
2513 address = curBuffDescrip->buffer;
2514 #endif
2515 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2516 if (handle->rxMaintainEnable[ringId])
2517 {
2518 DCACHE_InvalidateByRange(address, handle->rxBuffSizeAlign[ringId]);
2519 }
2520 #endif
2521
2522 rxBuffer->buffer = (void *)(uint8_t *)address;
2523
2524
2525 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
2526 {
2527
2528 isLastBuff = true;
2529 rxFrame->totLen = curBuffDescrip->length;
2530 rxBuffer->length = curBuffDescrip->length - buffLen;
2531
2532 rxFrame->rxAttribute.promiscuous = false;
2533 if (0U != (base->RCR & ENET_RCR_PROM_MASK))
2534 {
2535 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_MISS_MASK))
2536 {
2537 rxFrame->rxAttribute.promiscuous = true;
2538 }
2539 }
2540 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2541 rxFrame->rxAttribute.timestamp = curBuffDescrip->timestamp;
2542 #endif
2543 }
2544 else
2545 {
2546 rxBuffer->length = curBuffDescrip->length;
2547 buffLen += rxBuffer->length;
2548 }
2549
2550
2551
2552 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2553 buffer = MEMORY_ConvertMemoryMapAddress(newBuff, kMEMORY_Local2DMA);
2554 #else
2555 buffer = newBuff;
2556 #endif
2557 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2558 if (handle->rxMaintainEnable[ringId])
2559 {
2560 DCACHE_InvalidateByRange(buffer, handle->rxBuffSizeAlign[ringId]);
2561 }
2562 #endif
2563
2564 curBuffDescrip->buffer = (uint32_t)buffer;
2565
2566
2567 curBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2568
2569 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
2570
2571
2572 index++;
2573 rxBdRing->rxGenIdx = ENET_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2574 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2575 }
2576 else
2577 {
2578
2579
2580
2581 while (index-- != 0U)
2582 {
2583 handle->rxBuffFree(base, &rxFrame->rxBuffArray[index].buffer, handle->userData, ringId);
2584 }
2585
2586
2587 do
2588 {
2589
2590 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_RX_LAST_MASK))
2591 {
2592 isLastBuff = true;
2593 }
2594
2595
2596 curBuffDescrip->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
2597
2598 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
2599
2600
2601 rxBdRing->rxGenIdx = ENET_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2602 curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
2603 } while (!isLastBuff);
2604
2605 result = kStatus_ENET_RxFrameDrop;
2606 break;
2607 }
2608 } while (!isLastBuff);
2609
2610 ENET_ActiveReadRing(base, ringId);
2611
2612 return result;
2613 }
2614
2615 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2616 static inline void ENET_PrepareTxDesc(volatile enet_tx_bd_struct_t *txDesc, enet_tx_config_struct_t *txConfig)
2617 {
2618 uint16_t controlExtend1 = 0U;
2619
2620
2621 if (txConfig->intEnable)
2622 {
2623 controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_INTERRUPT_MASK;
2624 }
2625 if (txConfig->tsEnable)
2626 {
2627 controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
2628 }
2629 if (txConfig->autoProtocolChecksum)
2630 {
2631 controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_PROTOCHECKSUM_MASK;
2632 }
2633 if (txConfig->autoIPChecksum)
2634 {
2635 controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_IPCHECKSUM_MASK;
2636 }
2637 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
2638 if (txConfig->tltEnable)
2639 {
2640 controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_USETXLAUNCHTIME_MASK;
2641 txDesc->txLaunchTimeLow |= txConfig->tltLow;
2642 txDesc->txLaunchTimeHigh |= txConfig->tltHigh;
2643 }
2644 controlExtend1 |= (uint16_t)ENET_BD_FTYPE(txConfig->AVBFrameType);
2645 #endif
2646
2647 txDesc->controlExtend1 = controlExtend1;
2648 }
2649 #endif
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666 status_t ENET_StartTxFrame(ENET_Type *base, enet_handle_t *handle, enet_tx_frame_struct_t *txFrame, uint8_t ringId)
2667 {
2668 assert(handle != NULL);
2669 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2670 assert(txFrame->txBuffArray != NULL);
2671 assert(txFrame->txBuffNum != 0U);
2672 assert(handle->txReclaimEnable[ringId]);
2673
2674 volatile enet_tx_bd_struct_t *curBuffDescrip;
2675 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
2676 enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
2677 status_t result = kStatus_Success;
2678 enet_buffer_struct_t *txBuff = txFrame->txBuffArray;
2679 uint32_t txBuffNum = txFrame->txBuffNum;
2680 enet_frame_info_t *txDirty = NULL;
2681 uint32_t frameLen = 0;
2682 uint32_t idleDescNum = 0;
2683 uint16_t index = 0;
2684 uint32_t configVal;
2685 uint32_t primask;
2686 uintptr_t buffer;
2687
2688
2689 do
2690 {
2691 frameLen += txBuff->length;
2692 txBuff++;
2693 } while (--txBuffNum != 0U);
2694 txBuffNum = txFrame->txBuffNum;
2695
2696
2697 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
2698 index = txBdRing->txGenIdx;
2699 do
2700 {
2701 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
2702 {
2703 break;
2704 }
2705
2706
2707 if (++idleDescNum >= txBuffNum)
2708 {
2709 break;
2710 }
2711 index = ENET_IncreaseIndex(index, txBdRing->txRingLen);
2712 curBuffDescrip = txBdRing->txBdBase + index;
2713 } while (index != txBdRing->txGenIdx);
2714
2715
2716 if (frameLen > ENET_FRAME_TX_LEN_LIMITATION(base))
2717 {
2718 result = kStatus_ENET_TxFrameOverLen;
2719 }
2720
2721 else if (txBuffNum > idleDescNum)
2722 {
2723 result = kStatus_ENET_TxFrameBusy;
2724 }
2725
2726 else if (!ENET_TxDirtyRingAvailable(txDirtyRing))
2727 {
2728 result = kStatus_ENET_TxFrameBusy;
2729 }
2730 else
2731 {
2732 txBuff = txFrame->txBuffArray;
2733 do
2734 {
2735 assert(txBuff->buffer != NULL);
2736 assert((uint64_t)(uintptr_t)(uint8_t *)txBuff->buffer + txBuff->length - 1U <= UINT32_MAX);
2737
2738 #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
2739 if (handle->txMaintainEnable[ringId])
2740 {
2741 DCACHE_CleanByRange((uintptr_t)(uint8_t *)txBuff->buffer, txBuff->length);
2742 }
2743 #endif
2744 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2745
2746 buffer = MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)txBuff->buffer, kMEMORY_Local2DMA);
2747 #else
2748 buffer = (uintptr_t)(uint8_t *)txBuff->buffer;
2749 #endif
2750
2751
2752 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
2753 curBuffDescrip->buffer = (uint32_t)buffer;
2754 curBuffDescrip->length = txBuff->length;
2755
2756
2757 txBuff++;
2758 txBdRing->txGenIdx = ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
2759
2760 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2761 ENET_PrepareTxDesc(curBuffDescrip, &txFrame->txConfig);
2762 #endif
2763
2764
2765 if (--txBuffNum != 0U)
2766 {
2767
2768 configVal = (uint32_t)curBuffDescrip->control;
2769 configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
2770 configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
2771 curBuffDescrip->control = (uint16_t)configVal;
2772
2773 primask = DisableGlobalIRQ();
2774 txBdRing->txDescUsed++;
2775 EnableGlobalIRQ(primask);
2776 }
2777 else
2778 {
2779 curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
2780
2781
2782 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
2783 txDirty->context = txFrame->context;
2784 txDirtyRing->txGenIdx = ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2785 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2786 {
2787 txDirtyRing->isFull = true;
2788 }
2789 primask = DisableGlobalIRQ();
2790 txBdRing->txDescUsed++;
2791 EnableGlobalIRQ(primask);
2792 }
2793
2794 ENET_ActiveSendRing(base, ringId);
2795 } while (txBuffNum != 0U);
2796 }
2797 return result;
2798 }
2799
2800
2801
2802
2803 status_t ENET_SendFrameZeroCopy(ENET_Type *base,
2804 enet_handle_t *handle,
2805 const uint8_t *data,
2806 uint32_t length,
2807 uint8_t ringId,
2808 bool tsFlag,
2809 void *context)
2810 {
2811 assert(handle != NULL);
2812 assert(data != NULL);
2813 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
2814
2815 volatile enet_tx_bd_struct_t *curBuffDescrip;
2816 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
2817 enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
2818 enet_frame_info_t *txDirty = NULL;
2819 uint32_t len = 0;
2820 uint32_t sizeleft = 0;
2821 status_t result = kStatus_Success;
2822 uintptr_t data_temp;
2823 uint32_t configVal;
2824 bool isReturn = false;
2825 uint32_t primask;
2826
2827
2828 if (length > ENET_FRAME_TX_LEN_LIMITATION(base))
2829 {
2830 result = kStatus_ENET_TxFrameOverLen;
2831 }
2832 else
2833 {
2834
2835 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
2836 if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
2837 {
2838 result = kStatus_ENET_TxFrameBusy;
2839 }
2840
2841 else if (handle->txReclaimEnable[ringId] && !ENET_TxDirtyRingAvailable(txDirtyRing))
2842 {
2843 result = kStatus_ENET_TxFrameBusy;
2844 }
2845 else
2846 {
2847 assert((uint64_t)(uintptr_t)data + length - 1U <= UINT32_MAX);
2848
2849 if (handle->txBuffSizeAlign[ringId] >= length)
2850 {
2851
2852 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2853 data = (uint8_t *)MEMORY_ConvertMemoryMapAddress((uintptr_t)data, kMEMORY_Local2DMA);
2854 #endif
2855 curBuffDescrip->buffer = (uint32_t)(uintptr_t)data;
2856
2857 curBuffDescrip->length = (uint16_t)length;
2858 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2859
2860 if (tsFlag)
2861 {
2862 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
2863 }
2864 else
2865 {
2866 curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
2867 }
2868
2869 #endif
2870 curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
2871
2872
2873 txBdRing->txGenIdx = ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
2874
2875
2876 if (handle->txReclaimEnable[ringId])
2877 {
2878 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
2879 txDirty->context = context;
2880 txDirtyRing->txGenIdx = ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2881 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2882 {
2883 txDirtyRing->isFull = true;
2884 }
2885 primask = DisableGlobalIRQ();
2886 txBdRing->txDescUsed++;
2887 EnableGlobalIRQ(primask);
2888 }
2889
2890
2891 ENET_ActiveSendRing(base, ringId);
2892 }
2893 else
2894 {
2895
2896 do
2897 {
2898 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
2899
2900 if (tsFlag)
2901 {
2902 curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
2903 }
2904 else
2905 {
2906 curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
2907 }
2908 #endif
2909
2910
2911 sizeleft = length - len;
2912 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2913 data = (uint8_t *)MEMORY_ConvertMemoryMapAddress((uintptr_t)data, kMEMORY_Local2DMA);
2914 #endif
2915 data_temp = (uintptr_t)data + len;
2916
2917
2918 txBdRing->txGenIdx = ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
2919
2920 if (sizeleft > handle->txBuffSizeAlign[ringId])
2921 {
2922
2923 curBuffDescrip->buffer = (uint32_t)data_temp;
2924
2925 curBuffDescrip->length = handle->txBuffSizeAlign[ringId];
2926 len += handle->txBuffSizeAlign[ringId];
2927
2928 configVal = (uint32_t)curBuffDescrip->control;
2929 configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
2930 configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
2931 curBuffDescrip->control = (uint16_t)configVal;
2932
2933 if (handle->txReclaimEnable[ringId])
2934 {
2935 primask = DisableGlobalIRQ();
2936 txBdRing->txDescUsed++;
2937 EnableGlobalIRQ(primask);
2938 }
2939
2940
2941 ENET_ActiveSendRing(base, ringId);
2942 }
2943 else
2944 {
2945 curBuffDescrip->buffer = (uint32_t)data_temp;
2946 curBuffDescrip->length = (uint16_t)sizeleft;
2947
2948 curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
2949
2950 if (handle->txReclaimEnable[ringId])
2951 {
2952
2953 txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
2954 txDirty->context = context;
2955 txDirtyRing->txGenIdx = ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2956 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2957 {
2958 txDirtyRing->isFull = true;
2959 }
2960 primask = DisableGlobalIRQ();
2961 txBdRing->txDescUsed++;
2962 EnableGlobalIRQ(primask);
2963 }
2964
2965
2966 ENET_ActiveSendRing(base, ringId);
2967 isReturn = true;
2968 break;
2969 }
2970
2971 curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
2972
2973 } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
2974
2975 if (isReturn == false)
2976 {
2977 result = kStatus_ENET_TxFrameBusy;
2978 }
2979 }
2980 }
2981 }
2982 return result;
2983 }
2984
2985
2986
2987
2988
2989
2990
2991 void ENET_AddMulticastGroup(ENET_Type *base, uint8_t *address)
2992 {
2993 assert(address != NULL);
2994
2995 enet_handle_t *handle = s_ENETHandle[ENET_GetInstance(base)];
2996 uint32_t crc = 0xFFFFFFFFU;
2997 uint32_t count1 = 0;
2998 uint32_t count2 = 0;
2999 uint32_t configVal = 0;
3000
3001
3002 for (count1 = 0; count1 < ENET_FRAME_MACLEN; count1++)
3003 {
3004 uint8_t c = address[count1];
3005 for (count2 = 0; count2 < 0x08U; count2++)
3006 {
3007 if (0U != ((c ^ crc) & 1U))
3008 {
3009 crc >>= 1U;
3010 c >>= 1U;
3011 crc ^= 0xEDB88320U;
3012 }
3013 else
3014 {
3015 crc >>= 1U;
3016 c >>= 1U;
3017 }
3018 }
3019 }
3020
3021 crc = crc >> 26U;
3022
3023 handle->multicastCount[crc]++;
3024
3025
3026 configVal = ((uint32_t)1U << (crc & 0x1FU));
3027
3028 if (0U != (crc & 0x20U))
3029 {
3030 base->GAUR |= configVal;
3031 }
3032 else
3033 {
3034 base->GALR |= configVal;
3035 }
3036 }
3037
3038
3039
3040
3041
3042
3043
3044 void ENET_LeaveMulticastGroup(ENET_Type *base, uint8_t *address)
3045 {
3046 assert(address != NULL);
3047
3048 enet_handle_t *handle = s_ENETHandle[ENET_GetInstance(base)];
3049 uint32_t crc = 0xFFFFFFFFU;
3050 uint32_t count1 = 0;
3051 uint32_t count2 = 0;
3052 uint32_t configVal = 0;
3053
3054
3055 for (count1 = 0; count1 < ENET_FRAME_MACLEN; count1++)
3056 {
3057 uint8_t c = address[count1];
3058 for (count2 = 0; count2 < 0x08U; count2++)
3059 {
3060 if (0U != ((c ^ crc) & 1U))
3061 {
3062 crc >>= 1U;
3063 c >>= 1U;
3064 crc ^= 0xEDB88320U;
3065 }
3066 else
3067 {
3068 crc >>= 1U;
3069 c >>= 1U;
3070 }
3071 }
3072 }
3073
3074 crc = crc >> 26U;
3075
3076 handle->multicastCount[crc]--;
3077
3078
3079 if (0U == handle->multicastCount[crc])
3080 {
3081 configVal = ~((uint32_t)1U << (crc & 0x1FU));
3082
3083 if (0U != (crc & 0x20U))
3084 {
3085 base->GAUR &= configVal;
3086 }
3087 else
3088 {
3089 base->GALR &= configVal;
3090 }
3091 }
3092 }
3093
3094 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 status_t ENET_GetTxErrAfterSendFrame(enet_handle_t *handle, enet_data_error_stats_t *eErrorStatic, uint8_t ringId)
3110 {
3111 assert(handle != NULL);
3112 assert(eErrorStatic != NULL);
3113 assert(ringId < (uint8_t)FSL_FEATURE_ENET_QUEUE);
3114
3115 uint16_t control = 0;
3116 uint16_t controlExt = 0;
3117 status_t result = kStatus_Success;
3118 bool isReturn = false;
3119 enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
3120 volatile enet_tx_bd_struct_t *curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
3121
3122 do
3123 {
3124
3125 control = handle->txBdDirtyStatic[ringId]->control;
3126 controlExt = handle->txBdDirtyStatic[ringId]->controlExtend0;
3127
3128
3129 if (0U != (control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
3130 {
3131 result = kStatus_ENET_TxFrameBusy;
3132 isReturn = true;
3133 break;
3134 }
3135
3136
3137 if (0U != (handle->txBdDirtyStatic[ringId]->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK))
3138 {
3139 handle->txBdDirtyStatic[ringId] = txBdRing->txBdBase;
3140 }
3141 else
3142 {
3143 handle->txBdDirtyStatic[ringId]++;
3144 }
3145
3146
3147 if (0U != (control & ENET_BUFFDESCRIPTOR_TX_LAST_MASK))
3148 {
3149 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_TX_ERR_MASK))
3150 {
3151
3152 eErrorStatic->statsTxErr++;
3153 }
3154 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_TX_EXCCOLLISIONERR_MASK))
3155 {
3156
3157 eErrorStatic->statsTxExcessCollisionErr++;
3158 }
3159 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_TX_LATECOLLISIONERR_MASK))
3160 {
3161
3162 eErrorStatic->statsTxLateCollisionErr++;
3163 }
3164 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_TX_UNDERFLOWERR_MASK))
3165 {
3166
3167 eErrorStatic->statsTxUnderFlowErr++;
3168 }
3169 if (0U != (controlExt & ENET_BUFFDESCRIPTOR_TX_OVERFLOWERR_MASK))
3170 {
3171
3172 eErrorStatic->statsTxOverFlowErr++;
3173 }
3174 isReturn = true;
3175 break;
3176 }
3177
3178 } while (handle->txBdDirtyStatic[ringId] != curBuffDescrip);
3179
3180 if (isReturn == false)
3181 {
3182 result = kStatus_ENET_TxFrameFail;
3183 }
3184 return result;
3185 }
3186
3187 void ENET_Ptp1588ConfigureHandler(ENET_Type *base, enet_handle_t *handle, enet_ptp_config_t *ptpConfig)
3188 {
3189 assert(handle != NULL);
3190 assert(ptpConfig != NULL);
3191 uint8_t count;
3192
3193 uint32_t mask = (uint32_t)kENET_TxBufferInterrupt;
3194 #if FSL_FEATURE_ENET_QUEUE > 1
3195 mask |= (uint32_t)kENET_TxBuffer1Interrupt | (uint32_t)kENET_TxBuffer2Interrupt;
3196 #endif
3197
3198 for (count = 0; count < handle->ringNum; count++)
3199 {
3200 handle->txBdDirtyStatic[count] = handle->txBdRing[count].txBdBase;
3201 }
3202
3203
3204 handle->msTimerSecond = 0;
3205
3206 #if defined(FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID) && FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID
3207 uint32_t refClock;
3208
3209
3210 if (handle->enetClock <= ptpConfig->ptp1588ClockSrc_Hz)
3211 {
3212
3213
3214 handle->tsDelayCount = 6U * handle->enetClock;
3215 }
3216 else
3217 {
3218 refClock = ptpConfig->ptp1588ClockSrc_Hz;
3219
3220
3221
3222 handle->tsDelayCount = 6U * ((handle->enetClock + refClock - 1U) / refClock);
3223 }
3224
3225 #endif
3226
3227 ENET_DisableInterrupts(base, mask);
3228
3229
3230 ENET_SetTsISRHandler(base, ENET_TimeStampIRQHandler);
3231 ENET_SetTxISRHandler(base, ENET_TransmitIRQHandler);
3232
3233
3234
3235 ENET_EnableInterrupts(base, (ENET_TS_INTERRUPT | ENET_TX_INTERRUPT));
3236 }
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254 void ENET_Ptp1588Configure(ENET_Type *base, enet_handle_t *handle, enet_ptp_config_t *ptpConfig)
3255 {
3256 assert(handle != NULL);
3257 assert(ptpConfig != NULL);
3258
3259
3260 ENET_Ptp1588StartTimer(base, ptpConfig->ptp1588ClockSrc_Hz);
3261
3262 ENET_Ptp1588ConfigureHandler(base, handle, ptpConfig);
3263 }
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273 void ENET_Ptp1588StartTimer(ENET_Type *base, uint32_t ptpClkSrc)
3274 {
3275
3276 base->ATCR = ENET_ATCR_RESTART_MASK;
3277
3278
3279 base->ATINC = ENET_ATINC_INC(ENET_NANOSECOND_ONE_SECOND / ptpClkSrc);
3280 base->ATPER = ENET_NANOSECOND_ONE_SECOND;
3281
3282 base->ATCR = ENET_ATCR_PEREN_MASK | ENET_ATCR_PINPER_MASK | ENET_ATCR_EN_MASK;
3283 }
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293 void ENET_Ptp1588GetTimerNoIrqDisable(ENET_Type *base, enet_handle_t *handle, enet_ptp_time_t *ptpTime)
3294 {
3295
3296 ptpTime->second = handle->msTimerSecond;
3297
3298 base->ATCR |= ENET_ATCR_CAPTURE_MASK;
3299
3300 #if defined(FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID) && FSL_FEATURE_ENET_TIMESTAMP_CAPTURE_BIT_INVALID
3301
3302 uint32_t count = (handle->tsDelayCount + 3U - 1U) / 3U;
3303
3304 while (0U != (count--))
3305 {
3306 __NOP();
3307 }
3308 #else
3309
3310 while (0U != (base->ATCR & ENET_ATCR_CAPTURE_MASK))
3311 {
3312 }
3313 #endif
3314
3315
3316 ptpTime->nanosecond = base->ATVR;
3317 }
3318
3319
3320
3321
3322
3323
3324
3325
3326 void ENET_Ptp1588GetTimer(ENET_Type *base, enet_handle_t *handle, enet_ptp_time_t *ptpTime)
3327 {
3328 assert(handle != NULL);
3329 assert(ptpTime != NULL);
3330 uint32_t primask;
3331
3332
3333 primask = DisableGlobalIRQ();
3334
3335 ENET_Ptp1588GetTimerNoIrqDisable(base, handle, ptpTime);
3336
3337
3338 if (0U != (base->EIR & (uint32_t)kENET_TsTimerInterrupt))
3339 {
3340 ptpTime->second++;
3341 }
3342
3343
3344 EnableGlobalIRQ(primask);
3345 }
3346
3347
3348
3349
3350
3351
3352
3353
3354 void ENET_Ptp1588SetTimer(ENET_Type *base, enet_handle_t *handle, enet_ptp_time_t *ptpTime)
3355 {
3356 assert(handle != NULL);
3357 assert(ptpTime != NULL);
3358
3359 uint32_t primask;
3360
3361
3362 primask = DisableGlobalIRQ();
3363
3364
3365 handle->msTimerSecond = ptpTime->second;
3366 base->ATVR = ptpTime->nanosecond;
3367
3368
3369 EnableGlobalIRQ(primask);
3370 }
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383 void ENET_Ptp1588AdjustTimer(ENET_Type *base, uint32_t corrIncrease, uint32_t corrPeriod)
3384 {
3385
3386 base->ATINC = (base->ATINC & ~ENET_ATINC_INC_CORR_MASK) | (corrIncrease << ENET_ATINC_INC_CORR_SHIFT);
3387
3388 base->ATCOR = (base->ATCOR & ~ENET_ATCOR_COR_MASK) | (corrPeriod << ENET_ATCOR_COR_SHIFT);
3389 }
3390
3391 #if defined(FSL_FEATURE_ENET_HAS_AVB) && FSL_FEATURE_ENET_HAS_AVB
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407 void ENET_AVBConfigure(ENET_Type *base, enet_handle_t *handle, const enet_avb_config_t *config)
3408 {
3409 assert(config != NULL);
3410 assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
3411
3412 uint8_t count = 0;
3413
3414 for (count = 0; count < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) - 1U; count++)
3415 {
3416
3417 if (0U != (config->rxClassifyMatch[count]))
3418 {
3419 base->RCMR[count] = ((uint32_t)config->rxClassifyMatch[count] & 0xFFFFU) | ENET_RCMR_MATCHEN_MASK;
3420 }
3421
3422 base->DMACFG[count] |= ENET_DMACFG_IDLE_SLOPE(config->idleSlope[count]);
3423 }
3424
3425
3426 base->QOS &= ~ENET_QOS_TX_SCHEME_MASK;
3427 base->QOS |= ENET_QOS_RX_FLUSH0_MASK;
3428 }
3429 #endif
3430 #endif
3431
3432 #if FSL_FEATURE_ENET_QUEUE > 1
3433
3434
3435
3436
3437
3438
3439 void ENET_TransmitIRQHandler(ENET_Type *base, enet_handle_t *handle, uint32_t ringId)
3440 #else
3441
3442
3443
3444
3445
3446
3447 void ENET_TransmitIRQHandler(ENET_Type *base, enet_handle_t *handle)
3448 #endif
3449 {
3450 assert(handle != NULL);
3451 uint32_t mask = (uint32_t)kENET_TxBufferInterrupt | (uint32_t)kENET_TxFrameInterrupt;
3452 uint32_t index = 0;
3453 uint32_t irq;
3454
3455
3456 #if FSL_FEATURE_ENET_QUEUE > 1
3457 switch (ringId)
3458 {
3459 case kENET_Ring1:
3460 mask = ((uint32_t)kENET_TxFrame1Interrupt | (uint32_t)kENET_TxBuffer1Interrupt);
3461 break;
3462 case kENET_Ring2:
3463 mask = ((uint32_t)kENET_TxFrame2Interrupt | (uint32_t)kENET_TxBuffer2Interrupt);
3464 break;
3465 default:
3466 mask = (uint32_t)kENET_TxBufferInterrupt | (uint32_t)kENET_TxFrameInterrupt;
3467 break;
3468 }
3469 index = ringId;
3470 #endif
3471
3472 while (0U != (mask & base->EIR))
3473 {
3474 irq = base->EIR;
3475
3476
3477 base->EIR = mask;
3478
3479
3480 if (handle->txReclaimEnable[index] && (0U != (irq & (uint32_t)kENET_TxFrameInterrupt)))
3481 {
3482 ENET_ReclaimTxDescriptor(base, handle, (uint8_t)index);
3483 }
3484 else
3485 {
3486 if (NULL != handle->callback)
3487 {
3488 #if FSL_FEATURE_ENET_QUEUE > 1
3489 handle->callback(base, handle, index, kENET_TxEvent, NULL, handle->userData);
3490 #else
3491 handle->callback(base, handle, kENET_TxEvent, NULL, handle->userData);
3492 #endif
3493 }
3494 }
3495 }
3496 }
3497
3498
3499
3500
3501
3502
3503
3504 #if FSL_FEATURE_ENET_QUEUE > 1
3505 void ENET_ReceiveIRQHandler(ENET_Type *base, enet_handle_t *handle, uint32_t ringId)
3506 #else
3507 void ENET_ReceiveIRQHandler(ENET_Type *base, enet_handle_t *handle)
3508 #endif
3509 {
3510 assert(handle != NULL);
3511 uint32_t mask = (uint32_t)kENET_RxFrameInterrupt | (uint32_t)kENET_RxBufferInterrupt;
3512
3513
3514 #if FSL_FEATURE_ENET_QUEUE > 1
3515 switch (ringId)
3516 {
3517 case kENET_Ring1:
3518 mask = ((uint32_t)kENET_RxFrame1Interrupt | (uint32_t)kENET_RxBuffer1Interrupt);
3519 break;
3520 case kENET_Ring2:
3521 mask = ((uint32_t)kENET_RxFrame2Interrupt | (uint32_t)kENET_RxBuffer2Interrupt);
3522 break;
3523 default:
3524 mask = (uint32_t)kENET_RxFrameInterrupt | (uint32_t)kENET_RxBufferInterrupt;
3525 break;
3526 }
3527 #endif
3528
3529 while (0U != (mask & base->EIR))
3530 {
3531
3532 base->EIR = mask;
3533
3534
3535 if (NULL != handle->callback)
3536 {
3537 #if FSL_FEATURE_ENET_QUEUE > 1
3538 handle->callback(base, handle, ringId, kENET_RxEvent, NULL, handle->userData);
3539 #else
3540 handle->callback(base, handle, kENET_RxEvent, NULL, handle->userData);
3541 #endif
3542 }
3543 }
3544 }
3545
3546
3547
3548
3549
3550
3551
3552 void ENET_ErrorIRQHandler(ENET_Type *base, enet_handle_t *handle)
3553 {
3554 assert(handle != NULL);
3555
3556 uint32_t errMask = (uint32_t)kENET_BabrInterrupt | (uint32_t)kENET_BabtInterrupt | (uint32_t)kENET_EBusERInterrupt |
3557 (uint32_t)kENET_PayloadRxInterrupt | (uint32_t)kENET_LateCollisionInterrupt |
3558 (uint32_t)kENET_RetryLimitInterrupt | (uint32_t)kENET_UnderrunInterrupt;
3559
3560
3561 if (0U != ((uint32_t)kENET_WakeupInterrupt & base->EIR))
3562 {
3563
3564 base->EIR = (uint32_t)kENET_WakeupInterrupt;
3565
3566 ENET_EnableSleepMode(base, false);
3567
3568 if (NULL != handle->callback)
3569 {
3570 #if FSL_FEATURE_ENET_QUEUE > 1
3571 handle->callback(base, handle, 0, kENET_WakeUpEvent, NULL, handle->userData);
3572 #else
3573 handle->callback(base, handle, kENET_WakeUpEvent, NULL, handle->userData);
3574 #endif
3575 }
3576 }
3577 else
3578 {
3579
3580 errMask &= base->EIR;
3581 base->EIR = errMask;
3582
3583 if (NULL != handle->callback)
3584 {
3585 #if FSL_FEATURE_ENET_QUEUE > 1
3586 handle->callback(base, handle, 0, kENET_ErrEvent, NULL, handle->userData);
3587 #else
3588 handle->callback(base, handle, kENET_ErrEvent, NULL, handle->userData);
3589 #endif
3590 }
3591 }
3592 }
3593
3594 #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
3595
3596
3597
3598
3599
3600
3601 void ENET_TimeStampIRQHandler(ENET_Type *base, enet_handle_t *handle)
3602 {
3603 assert(handle != NULL);
3604
3605
3606 if (0U != ((uint32_t)kENET_TsTimerInterrupt & base->EIR))
3607 {
3608
3609 base->EIR = (uint32_t)kENET_TsTimerInterrupt;
3610
3611
3612 handle->msTimerSecond++;
3613
3614
3615 if (NULL != handle->callback)
3616 {
3617 #if FSL_FEATURE_ENET_QUEUE > 1
3618 handle->callback(base, handle, 0, kENET_TimeStampEvent, NULL, handle->userData);
3619 #else
3620 handle->callback(base, handle, kENET_TimeStampEvent, NULL, handle->userData);
3621 #endif
3622 }
3623 }
3624
3625 if (0U != ((uint32_t)kENET_TsAvailInterrupt & base->EIR))
3626 {
3627
3628 base->EIR = (uint32_t)kENET_TsAvailInterrupt;
3629
3630 if (NULL != handle->callback)
3631 {
3632 #if FSL_FEATURE_ENET_QUEUE > 1
3633 handle->callback(base, handle, 0, kENET_TimeStampAvailEvent, NULL, handle->userData);
3634 #else
3635 handle->callback(base, handle, kENET_TimeStampAvailEvent, NULL, handle->userData);
3636 #endif
3637 }
3638 }
3639 }
3640 #endif
3641
3642
3643
3644
3645
3646
3647
3648
3649 void ENET_CommonFrame0IRQHandler(ENET_Type *base)
3650 {
3651 uint32_t event = base->EIR;
3652 uint32_t instance = ENET_GetInstance(base);
3653
3654 event &= base->EIMR;
3655 if (0U != (event & ((uint32_t)kENET_TxBufferInterrupt | (uint32_t)kENET_TxFrameInterrupt)))
3656 {
3657 if (s_enetTxIsr[instance] != NULL)
3658 {
3659 #if FSL_FEATURE_ENET_QUEUE > 1
3660 s_enetTxIsr[instance](base, s_ENETHandle[instance], 0);
3661 #else
3662 s_enetTxIsr[instance](base, s_ENETHandle[instance]);
3663 #endif
3664 }
3665 }
3666
3667 if (0U != (event & ((uint32_t)kENET_RxBufferInterrupt | (uint32_t)kENET_RxFrameInterrupt)))
3668 {
3669 if (s_enetRxIsr[instance] != NULL)
3670 {
3671 #if FSL_FEATURE_ENET_QUEUE > 1
3672 s_enetRxIsr[instance](base, s_ENETHandle[instance], 0);
3673 #else
3674 s_enetRxIsr[instance](base, s_ENETHandle[instance]);
3675 #endif
3676 }
3677 }
3678
3679 if (0U != (event & ENET_TS_INTERRUPT) && (NULL != s_enetTsIsr[instance]))
3680 {
3681 s_enetTsIsr[instance](base, s_ENETHandle[instance]);
3682 }
3683 if (0U != (event & ENET_ERR_INTERRUPT) && (NULL != s_enetErrIsr[instance]))
3684 {
3685 s_enetErrIsr[instance](base, s_ENETHandle[instance]);
3686 }
3687 }
3688
3689 #if FSL_FEATURE_ENET_QUEUE > 1
3690
3691
3692
3693
3694
3695
3696
3697 void ENET_CommonFrame1IRQHandler(ENET_Type *base)
3698 {
3699 uint32_t event = base->EIR;
3700 uint32_t instance = ENET_GetInstance(base);
3701
3702 event &= base->EIMR;
3703 if (0U != (event & ((uint32_t)kENET_TxBuffer1Interrupt | (uint32_t)kENET_TxFrame1Interrupt)))
3704 {
3705 if (s_enetTxIsr[instance] != NULL)
3706 {
3707 s_enetTxIsr[instance](base, s_ENETHandle[instance], 1);
3708 }
3709 }
3710
3711 if (0U != (event & ((uint32_t)kENET_RxBuffer1Interrupt | (uint32_t)kENET_RxFrame1Interrupt)))
3712 {
3713 if (s_enetRxIsr[instance] != NULL)
3714 {
3715 s_enetRxIsr[instance](base, s_ENETHandle[instance], 1);
3716 }
3717 }
3718 }
3719
3720
3721
3722
3723
3724
3725
3726
3727 void ENET_CommonFrame2IRQHandler(ENET_Type *base)
3728 {
3729 uint32_t event = base->EIR;
3730 uint32_t instance = ENET_GetInstance(base);
3731
3732 event &= base->EIMR;
3733 if (0U != (event & ((uint32_t)kENET_TxBuffer2Interrupt | (uint32_t)kENET_TxFrame2Interrupt)))
3734 {
3735 if (s_enetTxIsr[instance] != NULL)
3736 {
3737 s_enetTxIsr[instance](base, s_ENETHandle[instance], 2);
3738 }
3739 }
3740
3741 if (0U != (event & ((uint32_t)kENET_RxBuffer2Interrupt | (uint32_t)kENET_RxFrame2Interrupt)))
3742 {
3743 if (s_enetRxIsr[instance] != NULL)
3744 {
3745 s_enetRxIsr[instance](base, s_ENETHandle[instance], 2);
3746 }
3747 }
3748 }
3749 #endif
3750
3751 void ENET_Ptp1588IRQHandler(ENET_Type *base)
3752 {
3753 uint32_t instance = ENET_GetInstance(base);
3754
3755 #if defined(ENET_ENHANCEDBUFFERDESCRIPTOR_MODE) && ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
3756
3757 if ((s_enetTsIrqId[instance] == s_enet1588TimerIrqId[instance]) && (s_enetTsIrqId[instance] != NotAvail_IRQn))
3758 {
3759 uint32_t event = base->EIR;
3760 event &= base->EIMR;
3761 if (0U != (event & ((uint32_t)kENET_TsTimerInterrupt | (uint32_t)kENET_TsAvailInterrupt)))
3762 {
3763 if (s_enetTsIsr[instance] != NULL)
3764 {
3765 s_enetTsIsr[instance](base, s_ENETHandle[instance]);
3766 }
3767 }
3768 }
3769 #endif
3770
3771 if (s_enet1588TimerIsr[instance] != NULL)
3772 {
3773 s_enet1588TimerIsr[instance](base, s_ENETHandle[instance]);
3774 }
3775 }
3776
3777 #if defined(ENET)
3778 #if FSL_FEATURE_ENET_QUEUE < 2
3779 void ENET_TxIRQHandler(ENET_Type *base);
3780 void ENET_TxIRQHandler(ENET_Type *base)
3781 {
3782 uint32_t instance = ENET_GetInstance(base);
3783
3784 if (s_enetTxIsr[instance] != NULL)
3785 {
3786 s_enetTxIsr[instance](base, s_ENETHandle[instance]);
3787 }
3788 SDK_ISR_EXIT_BARRIER;
3789 }
3790
3791 void ENET_RxIRQHandler(ENET_Type *base);
3792 void ENET_RxIRQHandler(ENET_Type *base)
3793 {
3794 uint32_t instance = ENET_GetInstance(base);
3795
3796 if (s_enetRxIsr[instance] != NULL)
3797 {
3798 s_enetRxIsr[instance](base, s_ENETHandle[instance]);
3799 }
3800 }
3801
3802 void ENET_ErrIRQHandler(ENET_Type *base);
3803 void ENET_ErrIRQHandler(ENET_Type *base)
3804 {
3805 uint32_t instance = ENET_GetInstance(base);
3806
3807 if (s_enetErrIsr[instance] != NULL)
3808 {
3809 s_enetErrIsr[instance](base, s_ENETHandle[instance]);
3810 }
3811 }
3812
3813 void ENET_Transmit_DriverIRQHandler(void);
3814 void ENET_Transmit_DriverIRQHandler(void)
3815 {
3816 ENET_TxIRQHandler(ENET);
3817 SDK_ISR_EXIT_BARRIER;
3818 }
3819
3820 void ENET_Receive_DriverIRQHandler(void);
3821 void ENET_Receive_DriverIRQHandler(void)
3822 {
3823 ENET_RxIRQHandler(ENET);
3824 SDK_ISR_EXIT_BARRIER;
3825 }
3826
3827 void ENET_Error_DriverIRQHandler(void);
3828 void ENET_Error_DriverIRQHandler(void)
3829 {
3830 ENET_ErrIRQHandler(ENET);
3831 SDK_ISR_EXIT_BARRIER;
3832 }
3833 #else
3834
3835 void ENET_MAC0_Rx_Tx_Done1_DriverIRQHandler(void);
3836 void ENET_MAC0_Rx_Tx_Done1_DriverIRQHandler(void)
3837 {
3838 ENET_CommonFrame1IRQHandler(ENET);
3839 SDK_ISR_EXIT_BARRIER;
3840 }
3841 void ENET_MAC0_Rx_Tx_Done2_DriverIRQHandler(void);
3842 void ENET_MAC0_Rx_Tx_Done2_DriverIRQHandler(void)
3843 {
3844 ENET_CommonFrame2IRQHandler(ENET);
3845 SDK_ISR_EXIT_BARRIER;
3846 }
3847 #endif
3848
3849 void ENET_DriverIRQHandler(void);
3850 void ENET_DriverIRQHandler(void)
3851 {
3852 ENET_CommonFrame0IRQHandler(ENET);
3853 SDK_ISR_EXIT_BARRIER;
3854 }
3855
3856 void ENET_1588_Timer_DriverIRQHandler(void);
3857 void ENET_1588_Timer_DriverIRQHandler(void)
3858 {
3859 ENET_Ptp1588IRQHandler(ENET);
3860 SDK_ISR_EXIT_BARRIER;
3861 }
3862 #endif
3863
3864 #if defined(ENET1)
3865 void ENET1_DriverIRQHandler(void);
3866 void ENET1_DriverIRQHandler(void)
3867 {
3868 ENET_CommonFrame0IRQHandler(ENET1);
3869 SDK_ISR_EXIT_BARRIER;
3870 }
3871 #endif
3872
3873 #if defined(ENET2)
3874 void ENET2_DriverIRQHandler(void);
3875 void ENET2_DriverIRQHandler(void)
3876 {
3877 ENET_CommonFrame0IRQHandler(ENET2);
3878 SDK_ISR_EXIT_BARRIER;
3879 }
3880
3881 void ENET2_1588_Timer_DriverIRQHandler(void);
3882 void ENET2_1588_Timer_DriverIRQHandler(void)
3883 {
3884 ENET_Ptp1588IRQHandler(ENET2);
3885 SDK_ISR_EXIT_BARRIER;
3886 }
3887 #endif
3888
3889 #if defined(CONNECTIVITY__ENET0)
3890 void CONNECTIVITY_ENET0_FRAME0_EVENT_INT_DriverIRQHandler(void);
3891 void CONNECTIVITY_ENET0_FRAME0_EVENT_INT_DriverIRQHandler(void)
3892 {
3893 ENET_CommonFrame0IRQHandler(CONNECTIVITY__ENET0);
3894 SDK_ISR_EXIT_BARRIER;
3895 }
3896 #if FSL_FEATURE_ENET_QUEUE > 1
3897 void CONNECTIVITY_ENET0_FRAME1_INT_DriverIRQHandler(void);
3898 void CONNECTIVITY_ENET0_FRAME1_INT_DriverIRQHandler(void)
3899 {
3900 ENET_CommonFrame1IRQHandler(CONNECTIVITY__ENET0);
3901 SDK_ISR_EXIT_BARRIER;
3902 }
3903 void CONNECTIVITY_ENET0_FRAME2_INT_DriverIRQHandler(void);
3904 void CONNECTIVITY_ENET0_FRAME2_INT_DriverIRQHandler(void)
3905 {
3906 ENET_CommonFrame2IRQHandler(CONNECTIVITY__ENET0);
3907 SDK_ISR_EXIT_BARRIER;
3908 }
3909 void CONNECTIVITY_ENET0_TIMER_INT_DriverIRQHandler(void);
3910 void CONNECTIVITY_ENET0_TIMER_INT_DriverIRQHandler(void)
3911 {
3912 ENET_Ptp1588IRQHandler(CONNECTIVITY__ENET0);
3913 SDK_ISR_EXIT_BARRIER;
3914 }
3915 #endif
3916 #endif
3917 #if defined(CONNECTIVITY__ENET1)
3918 void CONNECTIVITY_ENET1_FRAME0_EVENT_INT_DriverIRQHandler(void);
3919 void CONNECTIVITY_ENET1_FRAME0_EVENT_INT_DriverIRQHandler(void)
3920 {
3921 ENET_CommonFrame0IRQHandler(CONNECTIVITY__ENET1);
3922 SDK_ISR_EXIT_BARRIER;
3923 }
3924 #if FSL_FEATURE_ENET_QUEUE > 1
3925 void CONNECTIVITY_ENET1_FRAME1_INT_DriverIRQHandler(void);
3926 void CONNECTIVITY_ENET1_FRAME1_INT_DriverIRQHandler(void)
3927 {
3928 ENET_CommonFrame1IRQHandler(CONNECTIVITY__ENET1);
3929 SDK_ISR_EXIT_BARRIER;
3930 }
3931 void CONNECTIVITY_ENET1_FRAME2_INT_DriverIRQHandler(void);
3932 void CONNECTIVITY_ENET1_FRAME2_INT_DriverIRQHandler(void)
3933 {
3934 ENET_CommonFrame2IRQHandler(CONNECTIVITY__ENET1);
3935 SDK_ISR_EXIT_BARRIER;
3936 }
3937 void CONNECTIVITY_ENET1_TIMER_INT_DriverIRQHandler(void);
3938 void CONNECTIVITY_ENET1_TIMER_INT_DriverIRQHandler(void)
3939 {
3940 ENET_Ptp1588IRQHandler(CONNECTIVITY__ENET1);
3941 SDK_ISR_EXIT_BARRIER;
3942 }
3943 #endif
3944 #endif
3945 #if FSL_FEATURE_ENET_QUEUE > 1
3946 #if defined(ENET_1G)
3947 void ENET_1G_DriverIRQHandler(void);
3948 void ENET_1G_DriverIRQHandler(void)
3949 {
3950 ENET_CommonFrame0IRQHandler(ENET_1G);
3951 SDK_ISR_EXIT_BARRIER;
3952 }
3953 void ENET_1G_MAC0_Tx_Rx_1_DriverIRQHandler(void);
3954 void ENET_1G_MAC0_Tx_Rx_1_DriverIRQHandler(void)
3955 {
3956 ENET_CommonFrame1IRQHandler(ENET_1G);
3957 SDK_ISR_EXIT_BARRIER;
3958 }
3959 void ENET_1G_MAC0_Tx_Rx_2_DriverIRQHandler(void);
3960 void ENET_1G_MAC0_Tx_Rx_2_DriverIRQHandler(void)
3961 {
3962 ENET_CommonFrame2IRQHandler(ENET_1G);
3963 SDK_ISR_EXIT_BARRIER;
3964 }
3965 void ENET_1G_1588_Timer_DriverIRQHandler(void);
3966 void ENET_1G_1588_Timer_DriverIRQHandler(void)
3967 {
3968 ENET_Ptp1588IRQHandler(ENET_1G);
3969 SDK_ISR_EXIT_BARRIER;
3970 }
3971 #endif
3972
3973 #if defined(ENET1)
3974 void ENET1_MAC0_Rx_Tx_Done1_DriverIRQHandler(void);
3975 void ENET1_MAC0_Rx_Tx_Done1_DriverIRQHandler(void)
3976 {
3977 ENET_CommonFrame1IRQHandler(ENET1);
3978 SDK_ISR_EXIT_BARRIER;
3979 }
3980 void ENET1_MAC0_Rx_Tx_Done2_DriverIRQHandler(void);
3981 void ENET1_MAC0_Rx_Tx_Done2_DriverIRQHandler(void)
3982 {
3983 ENET_CommonFrame2IRQHandler(ENET1);
3984 SDK_ISR_EXIT_BARRIER;
3985 }
3986 void ENET1_1588_Timer_DriverIRQHandler(void);
3987 void ENET1_1588_Timer_DriverIRQHandler(void)
3988 {
3989 ENET_Ptp1588IRQHandler(ENET1);
3990 SDK_ISR_EXIT_BARRIER;
3991 }
3992 #endif
3993 #endif