blob: 8574eda4510286da5435490359d3232dea3cd522 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Timo Kokkonenc332e842012-08-10 06:16:36 -03002/*
3 * Copyright (C) 2008 Nokia Corporation
4 *
5 * Based on lirc_serial.c
Timo Kokkonenc332e842012-08-10 06:16:36 -03006 */
Ivaylo Dimitrov4406d522016-06-22 22:22:17 +03007#include <linux/clk.h>
Timo Kokkonenc332e842012-08-10 06:16:36 -03008#include <linux/module.h>
Timo Kokkonenc332e842012-08-10 06:16:36 -03009#include <linux/platform_device.h>
Timo Kokkonenc332e842012-08-10 06:16:36 -030010#include <linux/wait.h>
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +030011#include <linux/pwm.h>
Ivaylo Dimitrovb5406172016-06-22 22:22:20 +030012#include <linux/of.h>
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030013#include <linux/hrtimer.h>
Timo Kokkonenc332e842012-08-10 06:16:36 -030014
Sean Younga92def12016-12-19 18:48:29 -020015#include <media/rc-core.h>
Timo Kokkonenc332e842012-08-10 06:16:36 -030016
Timo Kokkonenc332e842012-08-10 06:16:36 -030017#define WBUF_LEN 256
18
Sean Younga92def12016-12-19 18:48:29 -020019struct ir_rx51 {
20 struct rc_dev *rcdev;
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +030021 struct pwm_device *pwm;
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030022 struct hrtimer timer;
Timo Kokkonenc332e842012-08-10 06:16:36 -030023 struct device *dev;
Timo Kokkonenc332e842012-08-10 06:16:36 -030024 wait_queue_head_t wqueue;
25
Timo Kokkonenc332e842012-08-10 06:16:36 -030026 unsigned int freq; /* carrier frequency */
27 unsigned int duty_cycle; /* carrier duty cycle */
Timo Kokkonenc332e842012-08-10 06:16:36 -030028 int wbuf[WBUF_LEN];
29 int wbuf_index;
30 unsigned long device_is_open;
Timo Kokkonenc332e842012-08-10 06:16:36 -030031};
32
Sean Younga92def12016-12-19 18:48:29 -020033static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
Timo Kokkonenc332e842012-08-10 06:16:36 -030034{
Sean Younga92def12016-12-19 18:48:29 -020035 pwm_enable(ir_rx51->pwm);
Timo Kokkonenc332e842012-08-10 06:16:36 -030036}
37
Sean Younga92def12016-12-19 18:48:29 -020038static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
Timo Kokkonenc332e842012-08-10 06:16:36 -030039{
Sean Younga92def12016-12-19 18:48:29 -020040 pwm_disable(ir_rx51->pwm);
Timo Kokkonenc332e842012-08-10 06:16:36 -030041}
42
Sean Younga92def12016-12-19 18:48:29 -020043static int init_timing_params(struct ir_rx51 *ir_rx51)
Timo Kokkonenc332e842012-08-10 06:16:36 -030044{
Sean Younga92def12016-12-19 18:48:29 -020045 struct pwm_device *pwm = ir_rx51->pwm;
46 int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
Timo Kokkonenc332e842012-08-10 06:16:36 -030047
Sean Younga92def12016-12-19 18:48:29 -020048 duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100);
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +030049
50 pwm_config(pwm, duty, period);
51
Timo Kokkonenc332e842012-08-10 06:16:36 -030052 return 0;
53}
54
Sean Younga92def12016-12-19 18:48:29 -020055static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
Timo Kokkonenc332e842012-08-10 06:16:36 -030056{
Sean Younga92def12016-12-19 18:48:29 -020057 struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030058 ktime_t now;
Timo Kokkonenc332e842012-08-10 06:16:36 -030059
Sean Younga92def12016-12-19 18:48:29 -020060 if (ir_rx51->wbuf_index < 0) {
61 dev_err_ratelimited(ir_rx51->dev,
62 "BUG wbuf_index has value of %i\n",
63 ir_rx51->wbuf_index);
Timo Kokkonenc332e842012-08-10 06:16:36 -030064 goto end;
65 }
66
67 /*
68 * If we happen to hit an odd latency spike, loop through the
69 * pulses until we catch up.
70 */
71 do {
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030072 u64 ns;
73
Sean Younga92def12016-12-19 18:48:29 -020074 if (ir_rx51->wbuf_index >= WBUF_LEN)
Timo Kokkonenc332e842012-08-10 06:16:36 -030075 goto end;
Sean Younga92def12016-12-19 18:48:29 -020076 if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
Timo Kokkonenc332e842012-08-10 06:16:36 -030077 goto end;
78
Sean Younga92def12016-12-19 18:48:29 -020079 if (ir_rx51->wbuf_index % 2)
80 ir_rx51_off(ir_rx51);
Timo Kokkonenc332e842012-08-10 06:16:36 -030081 else
Sean Younga92def12016-12-19 18:48:29 -020082 ir_rx51_on(ir_rx51);
Timo Kokkonenc332e842012-08-10 06:16:36 -030083
Sean Younga92def12016-12-19 18:48:29 -020084 ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030085 hrtimer_add_expires_ns(timer, ns);
86
Sean Younga92def12016-12-19 18:48:29 -020087 ir_rx51->wbuf_index++;
Timo Kokkonenc332e842012-08-10 06:16:36 -030088
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030089 now = timer->base->get_time();
Timo Kokkonenc332e842012-08-10 06:16:36 -030090
Thomas Gleixner2456e852016-12-25 11:38:40 +010091 } while (hrtimer_get_expires_tv64(timer) < now);
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +030092
93 return HRTIMER_RESTART;
Timo Kokkonenc332e842012-08-10 06:16:36 -030094end:
95 /* Stop TX here */
Sean Younga92def12016-12-19 18:48:29 -020096 ir_rx51_off(ir_rx51);
97 ir_rx51->wbuf_index = -1;
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +030098
Sean Younga92def12016-12-19 18:48:29 -020099 wake_up_interruptible(&ir_rx51->wqueue);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300100
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +0300101 return HRTIMER_NORESTART;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300102}
103
Sean Younga92def12016-12-19 18:48:29 -0200104static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
105 unsigned int count)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300106{
Sean Younga92def12016-12-19 18:48:29 -0200107 struct ir_rx51 *ir_rx51 = dev->priv;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300108
Sean Younga92def12016-12-19 18:48:29 -0200109 if (count > WBUF_LEN)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300110 return -EINVAL;
111
Sean Younga92def12016-12-19 18:48:29 -0200112 memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
Timo Kokkonenc332e842012-08-10 06:16:36 -0300113
114 /* Wait any pending transfers to finish */
Sean Younga92def12016-12-19 18:48:29 -0200115 wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300116
Sean Younga92def12016-12-19 18:48:29 -0200117 init_timing_params(ir_rx51);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300118 if (count < WBUF_LEN)
Sean Younga92def12016-12-19 18:48:29 -0200119 ir_rx51->wbuf[count] = -1; /* Insert termination mark */
Timo Kokkonenc332e842012-08-10 06:16:36 -0300120
121 /*
Tony Lindgren44773ba2018-04-16 10:22:01 -0700122 * REVISIT: Adjust latency requirements so the device doesn't go in too
123 * deep sleep states with pm_qos_add_request().
Timo Kokkonenc332e842012-08-10 06:16:36 -0300124 */
Timo Kokkonenc332e842012-08-10 06:16:36 -0300125
Sean Younga92def12016-12-19 18:48:29 -0200126 ir_rx51_on(ir_rx51);
127 ir_rx51->wbuf_index = 1;
128 hrtimer_start(&ir_rx51->timer,
129 ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +0300130 HRTIMER_MODE_REL);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300131 /*
132 * Don't return back to the userspace until the transfer has
133 * finished
134 */
Sean Younga92def12016-12-19 18:48:29 -0200135 wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300136
Tony Lindgren44773ba2018-04-16 10:22:01 -0700137 /* REVISIT: Remove pm_qos constraint, we can sleep again */
Timo Kokkonenc332e842012-08-10 06:16:36 -0300138
Sean Younga92def12016-12-19 18:48:29 -0200139 return count;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300140}
141
Sean Younga92def12016-12-19 18:48:29 -0200142static int ir_rx51_open(struct rc_dev *dev)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300143{
Sean Younga92def12016-12-19 18:48:29 -0200144 struct ir_rx51 *ir_rx51 = dev->priv;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300145
Sean Younga92def12016-12-19 18:48:29 -0200146 if (test_and_set_bit(1, &ir_rx51->device_is_open))
Timo Kokkonenc332e842012-08-10 06:16:36 -0300147 return -EBUSY;
148
Sean Younga92def12016-12-19 18:48:29 -0200149 ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
150 if (IS_ERR(ir_rx51->pwm)) {
151 int res = PTR_ERR(ir_rx51->pwm);
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +0300152
Sean Younga92def12016-12-19 18:48:29 -0200153 dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +0300154 return res;
155 }
156
157 return 0;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300158}
159
Sean Younga92def12016-12-19 18:48:29 -0200160static void ir_rx51_release(struct rc_dev *dev)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300161{
Sean Younga92def12016-12-19 18:48:29 -0200162 struct ir_rx51 *ir_rx51 = dev->priv;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300163
Sean Younga92def12016-12-19 18:48:29 -0200164 hrtimer_cancel(&ir_rx51->timer);
165 ir_rx51_off(ir_rx51);
166 pwm_put(ir_rx51->pwm);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300167
Sean Younga92def12016-12-19 18:48:29 -0200168 clear_bit(1, &ir_rx51->device_is_open);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300169}
170
Sean Younga92def12016-12-19 18:48:29 -0200171static struct ir_rx51 ir_rx51 = {
Timo Kokkonenc332e842012-08-10 06:16:36 -0300172 .duty_cycle = 50,
173 .wbuf_index = -1,
174};
175
Sean Younga92def12016-12-19 18:48:29 -0200176static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
177{
178 struct ir_rx51 *ir_rx51 = dev->priv;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300179
Sean Younga92def12016-12-19 18:48:29 -0200180 ir_rx51->duty_cycle = duty;
181
182 return 0;
183}
184
185static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
186{
187 struct ir_rx51 *ir_rx51 = dev->priv;
188
189 if (carrier > 500000 || carrier < 20000)
190 return -EINVAL;
191
192 ir_rx51->freq = carrier;
193
194 return 0;
195}
Timo Kokkonenc332e842012-08-10 06:16:36 -0300196
197#ifdef CONFIG_PM
198
Sean Younga92def12016-12-19 18:48:29 -0200199static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300200{
201 /*
202 * In case the device is still open, do not suspend. Normally
203 * this should not be a problem as lircd only keeps the device
204 * open only for short periods of time. We also don't want to
205 * get involved with race conditions that might happen if we
206 * were in a middle of a transmit. Thus, we defer any suspend
207 * actions until transmit has completed.
208 */
Sean Younga92def12016-12-19 18:48:29 -0200209 if (test_and_set_bit(1, &ir_rx51.device_is_open))
Timo Kokkonenc332e842012-08-10 06:16:36 -0300210 return -EAGAIN;
211
Sean Younga92def12016-12-19 18:48:29 -0200212 clear_bit(1, &ir_rx51.device_is_open);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300213
214 return 0;
215}
216
Sean Younga92def12016-12-19 18:48:29 -0200217static int ir_rx51_resume(struct platform_device *dev)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300218{
219 return 0;
220}
221
222#else
223
Sean Younga92def12016-12-19 18:48:29 -0200224#define ir_rx51_suspend NULL
225#define ir_rx51_resume NULL
Timo Kokkonenc332e842012-08-10 06:16:36 -0300226
227#endif /* CONFIG_PM */
228
Sean Younga92def12016-12-19 18:48:29 -0200229static int ir_rx51_probe(struct platform_device *dev)
Timo Kokkonenc332e842012-08-10 06:16:36 -0300230{
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +0300231 struct pwm_device *pwm;
Sean Younga92def12016-12-19 18:48:29 -0200232 struct rc_dev *rcdev;
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +0300233
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +0300234 pwm = pwm_get(&dev->dev, NULL);
235 if (IS_ERR(pwm)) {
236 int err = PTR_ERR(pwm);
237
238 if (err != -EPROBE_DEFER)
239 dev_err(&dev->dev, "pwm_get failed: %d\n", err);
240 return err;
241 }
242
243 /* Use default, in case userspace does not set the carrier */
Sean Younga92def12016-12-19 18:48:29 -0200244 ir_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
Ivaylo Dimitrov3fdd1522016-06-22 22:22:19 +0300245 pwm_put(pwm);
246
Sean Younga92def12016-12-19 18:48:29 -0200247 hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
248 ir_rx51.timer.function = ir_rx51_timer_cb;
Ivaylo Dimitrov79cdad32016-06-22 22:22:21 +0300249
Sean Younga92def12016-12-19 18:48:29 -0200250 ir_rx51.dev = &dev->dev;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300251
Sean Younga92def12016-12-19 18:48:29 -0200252 rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
253 if (!rcdev)
254 return -ENOMEM;
Timo Kokkonenc332e842012-08-10 06:16:36 -0300255
Sean Younga92def12016-12-19 18:48:29 -0200256 rcdev->priv = &ir_rx51;
257 rcdev->open = ir_rx51_open;
258 rcdev->close = ir_rx51_release;
259 rcdev->tx_ir = ir_rx51_tx;
260 rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
261 rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
262 rcdev->driver_name = KBUILD_MODNAME;
263
264 ir_rx51.rcdev = rcdev;
265
266 return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
267}
268
269static int ir_rx51_remove(struct platform_device *dev)
270{
Timo Kokkonenc332e842012-08-10 06:16:36 -0300271 return 0;
272}
273
Sean Younga92def12016-12-19 18:48:29 -0200274static const struct of_device_id ir_rx51_match[] = {
Ivaylo Dimitrovb5406172016-06-22 22:22:20 +0300275 {
276 .compatible = "nokia,n900-ir",
277 },
278 {},
279};
Sean Younga92def12016-12-19 18:48:29 -0200280MODULE_DEVICE_TABLE(of, ir_rx51_match);
Ivaylo Dimitrovb5406172016-06-22 22:22:20 +0300281
Sean Younga92def12016-12-19 18:48:29 -0200282static struct platform_driver ir_rx51_platform_driver = {
283 .probe = ir_rx51_probe,
284 .remove = ir_rx51_remove,
285 .suspend = ir_rx51_suspend,
286 .resume = ir_rx51_resume,
Timo Kokkonenc332e842012-08-10 06:16:36 -0300287 .driver = {
Sean Younga92def12016-12-19 18:48:29 -0200288 .name = KBUILD_MODNAME,
289 .of_match_table = of_match_ptr(ir_rx51_match),
Timo Kokkonenc332e842012-08-10 06:16:36 -0300290 },
291};
Sean Younga92def12016-12-19 18:48:29 -0200292module_platform_driver(ir_rx51_platform_driver);
Timo Kokkonenc332e842012-08-10 06:16:36 -0300293
Sean Younga92def12016-12-19 18:48:29 -0200294MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
Timo Kokkonenc332e842012-08-10 06:16:36 -0300295MODULE_AUTHOR("Nokia Corporation");
296MODULE_LICENSE("GPL");