blob: cb322b2f09cd3e127599268b2b9e5664c311c58c [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +02002 /*
3 * Copyright (c) 2012 Analog Devices, Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +02005 */
6
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/module.h>
10#include <linux/iio/iio.h>
11#include <linux/iio/buffer.h>
12#include <linux/iio/kfifo_buf.h>
13#include <linux/iio/triggered_buffer.h>
14#include <linux/iio/trigger_consumer.h>
15
16static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020017 .postenable = &iio_triggered_buffer_postenable,
18 .predisable = &iio_triggered_buffer_predisable,
19};
20
21/**
22 * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
23 * @indio_dev: IIO device structure
Cristina Opriceanafcc577d2015-06-23 16:34:19 +030024 * @h: Function which will be used as pollfunc top half
25 * @thread: Function which will be used as pollfunc bottom half
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020026 * @setup_ops: Buffer setup functions to use for this device.
27 * If NULL the default setup functions for triggered
28 * buffers will be used.
29 *
30 * This function combines some common tasks which will normally be performed
31 * when setting up a triggered buffer. It will allocate the buffer and the
Lars-Peter Clausen3e1b6c92014-11-26 18:55:12 +010032 * pollfunc.
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020033 *
34 * Before calling this function the indio_dev structure should already be
35 * completely initialized, but not yet registered. In practice this means that
36 * this function should be called right before iio_device_register().
37 *
38 * To free the resources allocated by this function call
39 * iio_triggered_buffer_cleanup().
40 */
41int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
Cristina Opriceanafcc577d2015-06-23 16:34:19 +030042 irqreturn_t (*h)(int irq, void *p),
43 irqreturn_t (*thread)(int irq, void *p),
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020044 const struct iio_buffer_setup_ops *setup_ops)
45{
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010046 struct iio_buffer *buffer;
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020047 int ret;
48
Karol Wrona7ab374a2014-12-19 18:39:24 +010049 buffer = iio_kfifo_allocate();
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010050 if (!buffer) {
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020051 ret = -ENOMEM;
52 goto error_ret;
53 }
54
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010055 iio_device_attach_buffer(indio_dev, buffer);
56
Cristina Opriceanafcc577d2015-06-23 16:34:19 +030057 indio_dev->pollfunc = iio_alloc_pollfunc(h,
58 thread,
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020059 IRQF_ONESHOT,
60 indio_dev,
61 "%s_consumer%d",
62 indio_dev->name,
63 indio_dev->id);
64 if (indio_dev->pollfunc == NULL) {
65 ret = -ENOMEM;
66 goto error_kfifo_free;
67 }
68
69 /* Ring buffer functions - here trigger setup related */
70 if (setup_ops)
71 indio_dev->setup_ops = setup_ops;
72 else
73 indio_dev->setup_ops = &iio_triggered_buffer_setup_ops;
74
75 /* Flag that polled ring buffering is possible */
76 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
77
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020078 return 0;
79
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020080error_kfifo_free:
81 iio_kfifo_free(indio_dev->buffer);
82error_ret:
83 return ret;
84}
85EXPORT_SYMBOL(iio_triggered_buffer_setup);
86
87/**
88 * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup()
89 * @indio_dev: IIO device structure
90 */
91void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
92{
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +020093 iio_dealloc_pollfunc(indio_dev->pollfunc);
94 iio_kfifo_free(indio_dev->buffer);
95}
96EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
97
Gregor Boirie70e48342016-09-02 20:47:55 +020098static void devm_iio_triggered_buffer_clean(struct device *dev, void *res)
99{
100 iio_triggered_buffer_cleanup(*(struct iio_dev **)res);
101}
102
103int devm_iio_triggered_buffer_setup(struct device *dev,
104 struct iio_dev *indio_dev,
105 irqreturn_t (*h)(int irq, void *p),
106 irqreturn_t (*thread)(int irq, void *p),
107 const struct iio_buffer_setup_ops *ops)
108{
109 struct iio_dev **ptr;
110 int ret;
111
112 ptr = devres_alloc(devm_iio_triggered_buffer_clean, sizeof(*ptr),
113 GFP_KERNEL);
114 if (!ptr)
115 return -ENOMEM;
116
117 *ptr = indio_dev;
118
119 ret = iio_triggered_buffer_setup(indio_dev, h, thread, ops);
120 if (!ret)
121 devres_add(dev, ptr);
122 else
123 devres_free(ptr);
124
125 return ret;
126}
127EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
128
129void devm_iio_triggered_buffer_cleanup(struct device *dev,
130 struct iio_dev *indio_dev)
131{
132 int rc;
133
134 rc = devres_release(dev, devm_iio_triggered_buffer_clean,
135 devm_iio_device_match, indio_dev);
136 WARN_ON(rc);
137}
138EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
139
Lars-Peter Clausen23f2d732012-06-18 18:33:48 +0200140MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
141MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
142MODULE_LICENSE("GPL");