diff options
author | Josh Rahm <joshuarahm@gmail.com> | 2020-11-21 01:25:26 -0700 |
---|---|---|
committer | Josh Rahm <joshuarahm@gmail.com> | 2020-11-21 01:25:26 -0700 |
commit | 14a651cda0bd8dfb992d2a6a1544300c39492ca3 (patch) | |
tree | a3f4b148fe64736f7bb73784498871009faf1fe0 /02-usart/src | |
parent | fd763486d875968941c77386e23936e817856c8e (diff) | |
download | stm32l4-14a651cda0bd8dfb992d2a6a1544300c39492ca3.tar.gz stm32l4-14a651cda0bd8dfb992d2a6a1544300c39492ca3.tar.bz2 stm32l4-14a651cda0bd8dfb992d2a6a1544300c39492ca3.zip |
Implemented DMA abstraction in the peri/dma.c source file.
This abstraction makes it much more intuitive to use the
DMA features on the STM32L4 boards.
Diffstat (limited to '02-usart/src')
-rw-r--r-- | 02-usart/src/main.c | 40 | ||||
-rw-r--r-- | 02-usart/src/peri/dma.c | 312 |
2 files changed, 329 insertions, 23 deletions
diff --git a/02-usart/src/main.c b/02-usart/src/main.c index 8b98cbf..d26dce4 100644 --- a/02-usart/src/main.c +++ b/02-usart/src/main.c @@ -8,6 +8,7 @@ #include "core/nvic.h" #include "core/irq.h" +#include "peri/dma.h" #include "delay.h" #include "mem.h" #include "spin.h" @@ -64,37 +65,30 @@ int main() setup_usart2(115200); regset(USART2.c_r1, usart_txeie, 1); regset(USART2.c_r1, usart_rxneie, 1); + usart_enable_dma(&USART2, USART_ENABLE_TX); usart_set_enabled(&USART2, USART_ENABLE_TX | USART_ENABLE_RX); - enable_interrupt(IRQ_USART2); - USART2.td_r = (uint8_t) 0x61; + dma_opts_t opts = DEFAULT_DMA_OPTS; + opts.transfer_complete_interrupt_enable = 1; + int ec = 0; + dma_mem2p_channel_t dma_chan = + select_dma_channel_mem2p(DMA1_PERIPH_USART2_TX, &opts, &ec); + enable_interrupt(dma_channel_get_interrupt(dma_chan.c_)); + + if (ec) { + usart_printf(&USART2, "Select DMA channel failed :( %d\n", ec); + for (;;); + } + + const char* thing = "HELLO DMA!\r\n"; + regset(USART2.ic_r, usart_tccf, 1); + dma_mem2p_initiate_transfer(dma_chan, thing, strlen(thing)); __IO gpio_port_t* port_b = enable_gpio(GPIO_PORT_B); gpio_output_pin_t pin3 = set_gpio_pin_output(port_b, PIN_3); pin_on(pin3); - usart_printf(&USART2, "\nUSART2.c_r1: %p\n", USART2.c_r1); - usart_printf(&USART2, "NVIC intlinesnum: %d\n", - regget(NVIC.ict_r, nvic_intlinesnum)); - - int off = 1; - int last; - for(;;) { - int next = USART2.is_r & usart_rxne; - volatile int y = USART2.rd_r; - if (next) - USART2.td_r = y; - if (last != next) { - if (off) { - pin_on(pin3); - } else { - pin_off(pin3); - } - off = !off; - } - } - // usart_printf(&USART2, "Start Configuring Countdown!\n"); /* Set the countdown to start from 1,000,0000. */ diff --git a/02-usart/src/peri/dma.c b/02-usart/src/peri/dma.c new file mode 100644 index 0000000..ceae2e6 --- /dev/null +++ b/02-usart/src/peri/dma.c @@ -0,0 +1,312 @@ +#include "peri/dma.h" +#include "core/dma.h" +#include "core/usart.h" +#include "core/rcc.h" + + +/* Bitmask of DMA2 channels in use. */ +static uint8_t dma_inuse[2]; + +static inline dma_t* get_dma(int dma) +{ + if (dma) { + return &DMA2; + } else { + return &DMA1; + } +} + +static dma_t* get_raw_dma(dma_channel_t chan) +{ + return get_dma(chan.dma); +} + +static dma_channel_config_t* get_raw_channel_config(dma_channel_t chan) +{ + dma_t* dma = get_raw_dma(chan); + return &dma->channel_config[chan.chan]; +} + +static uint32_t get_periph_location(dma_peripheral_t operipheral) +{ +#define CASE(p, n) case p: return ptr2reg(n); + switch (operipheral) { + CASE(DMA1_PERIPH_USART1_RX, &USART1.rd_r) + CASE(DMA1_PERIPH_USART1_TX, &USART1.td_r) + CASE(DMA1_PERIPH_USART2_RX, &USART2.rd_r) + CASE(DMA1_PERIPH_USART2_TX, &USART2.td_r) + + default: + return 0; + }; +#undef CASE +} + +static dma_channel_t allocate_dma_channel( + dma_peripheral_t operipheral, int* modesel) +{ + dma_peripheral_t peripheral = operipheral & 0xff; + int dmasel = peripheral >= DMA2_DMA1_SWITCH__; + if (dmasel) { + peripheral -= DMA2_DMA1_SWITCH__; + } + int chan = peripheral % DMA_N_CHANNELS; + + *modesel = peripheral / 7; + return (dma_channel_t) { + .dma = dmasel, + .chan = chan + }; +} + +/* + * Atomically reserves the DMA channel so other calls + * cannot erroneously reserve the same DMA channel. + * + * Returns 0 if this function was unable to reserve + * the channel. + */ +static int try_reserve_dma_channel( + dma_channel_t chan) +{ + int in_use = __sync_fetch_and_or( + &dma_inuse[chan.dma], 1 << chan.chan); + + return !(in_use & (1 << chan.chan)); +} + + // int in_use = __sync_fetch_and_or(&dma_inuse[dmasel], 1 << chan); +void release_dma_channel(dma_channel_t chan) +{ + dma_channel_config_t* config = get_raw_channel_config(chan); + regset(config->cc_r, dma_cc_en, 0); /* Disable the register. */ + dma_inuse[chan.dma] &= ~(1 << chan.chan); /* Release the DMA. */ + + if (!dma_inuse[chan.dma]) { + /* Power-down the DMA if not in use. */ + if (chan.dma) { + regset(RCC.ahb1en_r, rcc_dma2en, 0); + } else { + regset(RCC.ahb1en_r, rcc_dma1en, 0); + } + } +} + +void configure_dma_channel( + dma_channel_t chan, + dma_peripheral_t operipheral, + dma_opts_t* opts, + dma_dir_t dir, + int selmode, + bool mem2mem, + int* error_out) +{ + if (chan.dma) { + regset(RCC.ahb1en_r, rcc_dma2en, 1); + } else { + regset(RCC.ahb1en_r, rcc_dma1en, 1); + } + + dma_t* dma = get_raw_dma(chan); + regset(dma->csel_r, 0xF << (4 * chan.chan), selmode); + dma_channel_config_t* config = + &dma->channel_config[chan.chan]; + + uint32_t reg = 0; + + regset(reg, dma_cc_dir, dir); + regset(reg, dma_cc_tcie, opts->transfer_complete_interrupt_enable); + regset(reg, dma_cc_htie, opts->half_transfer_interrupt_enable); + regset(reg, dma_cc_teie, opts->transfer_error_interrupt_enable); + regset(reg, dma_cc_circ, opts->circular_mode); + regset(reg, dma_cc_pinc, opts->peripheral_increment); + regset(reg, dma_cc_minc, opts->memory_increment); + regset(reg, dma_cc_psize, opts->peripheral_block_size); + regset(reg, dma_cc_msize, opts->memory_block_size); + regset(reg, dma_cc_pl, opts->priority); + regset(reg, dma_cc_mem2mem, mem2mem); + + config->cc_r = reg; + config->cpa_r = get_periph_location(operipheral); + + *error_out = 0; +} + +dma_mem2mem_channel_t select_dma_channel_mem2mem( + int channel, + dma_opts_t* opts, + int* error_out) +{ + +#define WRAP(c) ((dma_mem2mem_channel_t) { .c_ = c }) + // TODO this should probably be in a critical section. + dma_channel_t chan; + if (channel == -1) { + chan.dma = 1; + if ((dma_inuse[chan.dma] & 0x7F) == 0x7F) { + chan.dma = 0; + } + + if ((dma_inuse[chan.dma] & 0x7F) == 0x7F) { + *error_out = DMA_ERROR_CHANNEL_IN_USE; + return WRAP(DMA_CHAN_ERROR); + } + + uint8_t t = ~(dma_inuse[chan.dma] << 1); + chan.chan = 6 - (__builtin_clz(t) - 24); + } else { + if (channel < 7) { + chan.dma = 0; + chan.chan = channel; + } else { + chan.dma = 0; + chan.chan = channel - 7; + } + } + + if (!try_reserve_dma_channel(chan)) { + *error_out = DMA_ERROR_CHANNEL_IN_USE; + return WRAP(DMA_CHAN_ERROR); + } + + int ec = 0; + configure_dma_channel( + chan, + -1 /* No peripheral */, + opts, + READ_FROM_PERIPHERAL, + /* selmode = */ 0x8, + /* mem2mem = */ true, + &ec); + + if (ec) { + *error_out = ec; + return WRAP(DMA_CHAN_ERROR); + } + + *error_out = 0; + return WRAP(chan); +#undef WRAP +} + +dma_mem2p_channel_t select_dma_channel_mem2p( + dma_peripheral_t peripheral, + dma_opts_t* opts_in, + int* error_out) +{ +#define WRAP(c) ((dma_mem2p_channel_t) { .c_ = c }) + *error_out = 0; + + int modesel; + dma_channel_t ret = + allocate_dma_channel(peripheral, &modesel); + + if (!try_reserve_dma_channel(ret)) { + *error_out = DMA_ERROR_CHANNEL_IN_USE; + return WRAP(DMA_CHAN_ERROR); + } + + configure_dma_channel( + ret, + peripheral, + opts_in, + READ_FROM_MEMORY, + modesel, + /* mem2mem = */ false, + error_out); + + if (*error_out) { + return WRAP(DMA_CHAN_ERROR); + } + + *error_out = 0; + return WRAP(ret); +#undef WRAP +} + +dma_p2mem_channel_t select_dma_channel_p2mem( + dma_peripheral_t peripheral, + dma_opts_t* opts_in, + int* error_out) +{ +#define WRAP(c) ((dma_p2mem_channel_t) { .c_ = c }) + *error_out = 0; + + int modesel; + dma_channel_t ret = + allocate_dma_channel(peripheral, &modesel); + + if (!try_reserve_dma_channel(ret)) { + *error_out = DMA_ERROR_CHANNEL_IN_USE; + return WRAP(DMA_CHAN_ERROR); + } + + configure_dma_channel( + ret, + peripheral, + opts_in, + READ_FROM_PERIPHERAL, + modesel, + /* mem2mem = */ false, + error_out); + + if (*error_out) { + return WRAP(DMA_CHAN_ERROR); + } + + *error_out = 0; + return WRAP(ret); +#undef WRAP +} + + +void dma_mem2p_initiate_transfer( + dma_mem2p_channel_t chan, const void* from_loc, uint16_t nblocks) +{ + dma_channel_config_t* config = get_raw_channel_config(chan.c_); + config->cma_r = ptr2reg(from_loc); + config->cndt_r = nblocks; + + regset(config->cc_r, dma_cc_en, 1); +} + +void dma_mem2mem_initiate_transfer( + dma_mem2mem_channel_t chan, + void* to_loc, + const void* from_loc, + uint16_t nblocks) +{ + dma_channel_config_t* config = get_raw_channel_config(chan.c_); + config->cma_r = ptr2reg(to_loc); + config->cpa_r = ptr2reg(from_loc); + config->cndt_r = nblocks; + + regset(config->cc_r, dma_cc_en, 1); +} + +void dma_p2mem_initiate_transfer( + dma_p2mem_channel_t chan, void* to_loc, uint16_t nblocks) +{ + dma_channel_config_t* config = get_raw_channel_config(chan.c_); + + config->cma_r = ptr2reg(to_loc); + config->cndt_r = nblocks; + + regset(config->cc_r, dma_cc_en, 1); +} + +interrupt_t dma_channel_get_interrupt(dma_channel_t chan) +{ + if (chan.dma == 0) { + return IRQ_DMA1_CHANNEL1_IRQ + chan.chan; + } else { + switch (chan.chan) { + case 5: + return IRQ_DMA1_CHANNEL6_IRQ; + case 6: + return IRQ_DMA1_CHANNEL7_IRQ; + default: + return IRQ_DMA2_CHANNEL1_IRQ + chan.chan; + } + } +} |