#include "kern/dma/dma_manager.h" #include "arch/stm32l4xxx/peripherals/dma.h" #include "arch/stm32l4xxx/peripherals/rcc.h" #include "arch/stm32l4xxx/peripherals/spi.h" #include "arch/stm32l4xxx/peripherals/usart.h" /* Bitmask of DMA2 channels in use. */ uint8_t dma_inuse[2]; void (*dma_channel_callbacks[14])(void*); void* callback_args[14]; #define ON_DMA(dma, chan) \ void on_dma##dma##_channel##chan() \ { \ if (dma_channel_callbacks[(dma - 1) * 7 + (chan - 1)]) { \ dma_channel_callbacks[(dma - 1) * 7 + (chan - 1)]( \ callback_args[(dma - 1) * 7 + (chan - 1)]); \ } \ } ON_DMA(1, 1); ON_DMA(1, 2); ON_DMA(1, 3); ON_DMA(1, 4); ON_DMA(1, 5); ON_DMA(1, 6); ON_DMA(1, 7); ON_DMA(2, 1); ON_DMA(2, 2); ON_DMA(2, 3); ON_DMA(2, 4); ON_DMA(2, 5); ON_DMA(2, 6); ON_DMA(2, 7); static inline dma_t* get_dma(int dma) { if (dma) { return &DMA2; } else { return &DMA1; } } static dma_t* get_raw_dma(dma_channel_t chan) { return get_dma(chan.dma); } static dma_channel_config_t* get_raw_channel_config(dma_channel_t chan) { dma_t* dma = get_raw_dma(chan); return &dma->channel_config[chan.chan]; } static uint32_t get_periph_location(dma_peripheral_t operipheral) { #define CASE(p, n) \ case p: \ return ptr2reg(n); switch (operipheral) { CASE(DMA1_PERIPH_USART1_RX, &USART1.rd_r) CASE(DMA1_PERIPH_USART1_TX, &USART1.td_r) CASE(DMA1_PERIPH_USART2_RX, &USART2.rd_r) CASE(DMA1_PERIPH_USART2_TX, &USART2.td_r) CASE(DMA2_PERIPH_SPI1_RX, &SPI1.d_r) CASE(DMA2_PERIPH_SPI1_TX, &SPI1.d_r) CASE(DMA1_PERIPH_SPI1_RX, &SPI1.d_r) CASE(DMA1_PERIPH_SPI1_TX, &SPI1.d_r) CASE(DMA2_PERIPH_SPI3_RX, &SPI3.d_r) CASE(DMA2_PERIPH_SPI3_TX, &SPI3.d_r) default: return 0; }; #undef CASE } static dma_channel_t allocate_dma_channel( dma_peripheral_t operipheral, int* modesel) { dma_peripheral_t peripheral = operipheral & 0xff; int dmasel = peripheral >= DMA2_DMA1_SWITCH__; if (dmasel) { peripheral -= DMA2_DMA1_SWITCH__; } int chan = peripheral % DMA_N_CHANNELS; *modesel = peripheral / 7; return (dma_channel_t){.dma = dmasel, .chan = chan}; } /* * Atomically reserves the DMA channel so other calls * cannot erroneously reserve the same DMA channel. * * Returns 0 if this function was unable to reserve * the channel. */ static int try_reserve_dma_channel(dma_channel_t chan) { int in_use = __sync_fetch_and_or(&dma_inuse[chan.dma], 1 << chan.chan); return !(in_use & (1 << chan.chan)); } void release_dma_channel(dma_channel_t chan) { dma_channel_config_t* config = get_raw_channel_config(chan); regset(config->cc_r, dma_cc_en, 0); /* Disable the register. */ dma_inuse[chan.dma] &= ~(1 << chan.chan); /* Release the DMA. */ if (!dma_inuse[chan.dma]) { /* Power-down the DMA if not in use. */ if (chan.dma) { regset(RCC.ahb1en_r, rcc_dma2en, 0); } else { regset(RCC.ahb1en_r, rcc_dma1en, 0); } } } void configure_dma_channel( dma_channel_t chan, dma_peripheral_t operipheral, dma_opts_t* opts, dma_dir_t dir, int selmode, bool mem2mem, int* error_out) { if (chan.dma) { regset(RCC.ahb1en_r, rcc_dma2en, 1); } else { regset(RCC.ahb1en_r, rcc_dma1en, 1); } dma_t* dma = get_raw_dma(chan); regset(dma->csel_r, 0xF << (4 * chan.chan), selmode); dma_channel_config_t* config = &dma->channel_config[chan.chan]; uint32_t reg = 0; regset(reg, dma_cc_dir, dir); regset(reg, dma_cc_tcie, opts->transfer_complete_interrupt_enable); regset(reg, dma_cc_htie, opts->half_transfer_interrupt_enable); regset(reg, dma_cc_teie, opts->transfer_error_interrupt_enable); regset(reg, dma_cc_circ, opts->circular_mode); regset(reg, dma_cc_pinc, opts->peripheral_increment); regset(reg, dma_cc_minc, opts->memory_increment); regset(reg, dma_cc_psize, opts->peripheral_block_size); regset(reg, dma_cc_msize, opts->memory_block_size); regset(reg, dma_cc_pl, opts->priority); regset(reg, dma_cc_mem2mem, mem2mem); config->cc_r = reg; config->cpa_r = get_periph_location(operipheral); *error_out = 0; } dma_mem2mem_channel_t select_dma_channel_mem2mem( int channel, dma_opts_t* opts, int* error_out) { #define WRAP(c) ((dma_mem2mem_channel_t){.c_ = c}) // TODO this should probably be in a critical section. dma_channel_t chan; if (channel == -1) { chan.dma = 1; if ((dma_inuse[chan.dma] & 0x7F) == 0x7F) { chan.dma = 0; } if ((dma_inuse[chan.dma] & 0x7F) == 0x7F) { *error_out = DMA_ERROR_CHANNEL_IN_USE; return WRAP(DMA_CHAN_ERROR); } uint8_t t = ~(dma_inuse[chan.dma] << 1); chan.chan = 6 - (__builtin_clz(t) - 24); } else { if (channel < 7) { chan.dma = 0; chan.chan = channel; } else { chan.dma = 0; chan.chan = channel - 7; } } if (!try_reserve_dma_channel(chan)) { *error_out = DMA_ERROR_CHANNEL_IN_USE; return WRAP(DMA_CHAN_ERROR); } int ec = 0; configure_dma_channel( chan, -1 /* No peripheral */, opts, READ_FROM_PERIPHERAL, /* selmode = */ 0x8, /* mem2mem = */ true, &ec); if (ec) { *error_out = ec; return WRAP(DMA_CHAN_ERROR); } *error_out = 0; return WRAP(chan); #undef WRAP } dma_mem2p_channel_t select_dma_channel_mem2p( dma_peripheral_t peripheral, dma_opts_t* opts_in, int* error_out) { #define WRAP(c) ((dma_mem2p_channel_t){.c_ = c}) *error_out = 0; int modesel; dma_channel_t ret = allocate_dma_channel(peripheral, &modesel); if (!try_reserve_dma_channel(ret)) { *error_out = DMA_ERROR_CHANNEL_IN_USE; return WRAP(DMA_CHAN_ERROR); } configure_dma_channel( ret, peripheral, opts_in, READ_FROM_MEMORY, modesel, /* mem2mem = */ false, error_out); if (*error_out) { return WRAP(DMA_CHAN_ERROR); } *error_out = 0; return WRAP(ret); #undef WRAP } dma_p2mem_channel_t select_dma_channel_p2mem( dma_peripheral_t peripheral, dma_opts_t* opts_in, int* error_out) { #define WRAP(c) ((dma_p2mem_channel_t){.c_ = c}) *error_out = 0; int modesel; dma_channel_t ret = allocate_dma_channel(peripheral, &modesel); if (!try_reserve_dma_channel(ret)) { *error_out = DMA_ERROR_CHANNEL_IN_USE; return WRAP(DMA_CHAN_ERROR); } configure_dma_channel( ret, peripheral, opts_in, READ_FROM_PERIPHERAL, modesel, /* mem2mem = */ false, error_out); if (*error_out) { return WRAP(DMA_CHAN_ERROR); } *error_out = 0; return WRAP(ret); #undef WRAP } void dma_mem2p_initiate_transfer( dma_mem2p_channel_t chan, const void* from_loc, uint16_t nblocks) { dma_channel_config_t* config = get_raw_channel_config(chan.c_); config->cma_r = ptr2reg(from_loc); config->cndt_r = nblocks; regset(config->cc_r, dma_cc_en, 1); } void dma_mem2mem_initiate_transfer( dma_mem2mem_channel_t chan, void* to_loc, const void* from_loc, uint16_t nblocks) { dma_channel_config_t* config = get_raw_channel_config(chan.c_); config->cma_r = ptr2reg(to_loc); config->cpa_r = ptr2reg(from_loc); config->cndt_r = nblocks; regset(config->cc_r, dma_cc_en, 1); } void dma_p2mem_initiate_transfer( dma_p2mem_channel_t chan, void* to_loc, uint16_t nblocks) { dma_channel_config_t* config = get_raw_channel_config(chan.c_); config->cma_r = ptr2reg(to_loc); config->cndt_r = nblocks; regset(config->cc_r, dma_cc_en, 1); } void dma_chan_set_callback( dma_channel_t chan, void (*callback)(void*), void* arg) { dma_channel_callbacks[chan.dma * 7 + chan.chan] = callback; callback_args[chan.dma * 7 + chan.chan] = arg; enable_interrupt(dma_channel_get_interrupt(chan)); } void dma_channel_interrupt_enable(dma_channel_t chan, bool enabled) { dma_channel_config_t* config = get_raw_channel_config(chan); regset(config->cc_r, dma_cc_tcie, !!enabled); if (enabled) { enable_interrupt(dma_channel_get_interrupt(chan)); } else { disable_interrupt(dma_channel_get_interrupt(chan)); } } interrupt_t dma_channel_get_interrupt(dma_channel_t chan) { if (chan.dma == 0) { return IRQ_DMA1_CHANNEL1_IRQ + chan.chan; } else { switch (chan.chan) { case 5: return IRQ_DMA1_CHANNEL6_IRQ; case 6: return IRQ_DMA1_CHANNEL7_IRQ; default: return IRQ_DMA2_CHANNEL1_IRQ + chan.chan; } } }