bits_per_word = t->bits_per_word ? t->bits_per_word :
tspi->cur_spi->bits_per_word;
- rx_mask = (1 << bits_per_word) -1;
+ rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < rx_full_count; ++count) {
x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
x &= rx_mask;
struct spi_tegra_data *tspi, struct spi_transfer *t)
{
unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->tx_buf_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
}
}
tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
}
static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
struct spi_tegra_data *tspi, struct spi_transfer *t)
{
unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->rx_buf_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
bits_per_word = t->bits_per_word ? t->bits_per_word :
tspi->cur_spi->bits_per_word;
- rx_mask = (1 << bits_per_word) -1;
+ rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; ++count) {
x = tspi->rx_buf[count];
x &= rx_mask;
}
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
}
static int spi_tegra_start_dma_based_transfer(
INIT_COMPLETION(tspi->rx_dma_complete);
INIT_COMPLETION(tspi->tx_dma_complete);
+ /* Make sure that Rx and Tx fifo are empty */
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ if (((test_val >> 20) & 0xF) != 0xA)
+ dev_err(&tspi->pdev->dev,
+ "The Rx and Tx fifo are not empty status 0x%08lx\n",
+ test_val);
+
val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
val |= tspi->packed_size;
if (tspi->is_packed)
if (tspi->cur_direction & DATA_DIR_TX) {
spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
wmb();
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
tspi->tx_dma_req.size = len;
ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
if (ret < 0) {
- dev_err(&tspi->pdev->dev, "Error in starting tx dma "
- " error = %d\n", ret);
+ dev_err(&tspi->pdev->dev,
+ "Error in starting tx dma error = %d\n", ret);
return ret;
}
}
if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
tspi->rx_dma_req.size = len;
ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
if (ret < 0) {
- dev_err(&tspi->pdev->dev, "Error in starting rx dma "
- " error = %d\n", ret);
+ dev_err(&tspi->pdev->dev,
+ "Error in starting rx dma error = %d\n", ret);
if (tspi->cur_direction & DATA_DIR_TX)
tegra_dma_dequeue_req(tspi->tx_dma,
&tspi->tx_dma_req);
ret = clk_set_parent(tspi->clk,
tspi->parent_clk_list[count].parent_clk);
if (ret < 0) {
- dev_warn(&tspi->pdev->dev, "Error in setting parent "
- " clk src %s\n",
+ dev_warn(&tspi->pdev->dev,
+ "Error in setting parent clk src %s\n",
tspi->parent_clk_list[count].name);
continue;
}
val |= cs_bit;
else
val &= ~cs_bit;
- tspi->def_command_reg |= val;
+ tspi->def_command_reg = val;
if (!tspi->is_clkon_always && !tspi->clk_state) {
spin_unlock_irqrestore(&tspi->lock, flags);
goto exit;
}
- dev_vdbg(&tspi->pdev->dev, " Current direction %x\n",
- tspi->cur_direction);
+ dev_vdbg(&tspi->pdev->dev, "Current direction %x\n",
+ tspi->cur_direction);
if (tspi->cur_direction & DATA_DIR_RX)
spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
else
WARN_ON(1);
- dev_vdbg(&tspi->pdev->dev, "current position %d and length of the "
- "transfer %d\n", tspi->cur_pos, t->len);
+ dev_vdbg(&tspi->pdev->dev,
+ "current position %d and length of the transfer %d\n",
+ tspi->cur_pos, t->len);
if (tspi->cur_pos == t->len) {
spi_tegra_curr_transfer_complete(tspi,
tspi->tx_status || tspi->rx_status, t->len, &flags);
&tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
if (wait_status <= 0) {
tegra_dma_dequeue_req(tspi->tx_dma,
- &tspi->tx_dma_req);
- dev_err(&tspi->pdev->dev, "Error in Dma Tx "
- "transfer\n");
+ &tspi->tx_dma_req);
+ dev_err(&tspi->pdev->dev,
+ "Error in Dma Tx transfer\n");
err += 1;
}
}
if (wait_status <= 0) {
tegra_dma_dequeue_req(tspi->rx_dma,
&tspi->rx_dma_req);
- dev_err(&tspi->pdev->dev, "Error in Dma Rx "
- "transfer\n");
+ dev_err(&tspi->pdev->dev,
+ "Error in Dma Rx transfer\n");
err += 2;
}
}
goto fail_rx_buf_alloc;
}
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
tspi->rx_dma_req.to_memory = 1;
goto fail_tx_buf_alloc;
}
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
tspi->tx_dma_req.to_memory = 0;
}
if (tspi->is_transfer_in_progress) {
- dev_err(&pdev->dev, "Spi transfer is in progress "
- "Avoiding suspend\n");
+ dev_err(&pdev->dev,
+ "Spi transfer is in progress Avoiding suspend\n");
tspi->is_suspended = false;
spin_unlock_irqrestore(&tspi->lock, flags);
return -EBUSY;