Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2008 Nokia Corporation |
| 4 | * |
| 5 | * Based on lirc_serial.c |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 6 | */ |
Ivaylo Dimitrov | 4406d52 | 2016-06-22 22:22:17 +0300 | [diff] [blame] | 7 | #include <linux/clk.h> |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 8 | #include <linux/module.h> |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 9 | #include <linux/platform_device.h> |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 10 | #include <linux/wait.h> |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 11 | #include <linux/pwm.h> |
Ivaylo Dimitrov | b540617 | 2016-06-22 22:22:20 +0300 | [diff] [blame] | 12 | #include <linux/of.h> |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 13 | #include <linux/hrtimer.h> |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 14 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 15 | #include <media/rc-core.h> |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 16 | |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 17 | #define WBUF_LEN 256 |
| 18 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 19 | struct ir_rx51 { |
| 20 | struct rc_dev *rcdev; |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 21 | struct pwm_device *pwm; |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 22 | struct hrtimer timer; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 23 | struct device *dev; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 24 | wait_queue_head_t wqueue; |
| 25 | |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 26 | unsigned int freq; /* carrier frequency */ |
| 27 | unsigned int duty_cycle; /* carrier duty cycle */ |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 28 | int wbuf[WBUF_LEN]; |
| 29 | int wbuf_index; |
| 30 | unsigned long device_is_open; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 31 | }; |
| 32 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 33 | static inline void ir_rx51_on(struct ir_rx51 *ir_rx51) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 34 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 35 | pwm_enable(ir_rx51->pwm); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 36 | } |
| 37 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 38 | static inline void ir_rx51_off(struct ir_rx51 *ir_rx51) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 39 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 40 | pwm_disable(ir_rx51->pwm); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 41 | } |
| 42 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 43 | static int init_timing_params(struct ir_rx51 *ir_rx51) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 44 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 45 | struct pwm_device *pwm = ir_rx51->pwm; |
| 46 | int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 47 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 48 | duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100); |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 49 | |
| 50 | pwm_config(pwm, duty, period); |
| 51 | |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 52 | return 0; |
| 53 | } |
| 54 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 55 | static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 56 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 57 | struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer); |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 58 | ktime_t now; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 59 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 60 | if (ir_rx51->wbuf_index < 0) { |
| 61 | dev_err_ratelimited(ir_rx51->dev, |
| 62 | "BUG wbuf_index has value of %i\n", |
| 63 | ir_rx51->wbuf_index); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 64 | goto end; |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * If we happen to hit an odd latency spike, loop through the |
| 69 | * pulses until we catch up. |
| 70 | */ |
| 71 | do { |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 72 | u64 ns; |
| 73 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 74 | if (ir_rx51->wbuf_index >= WBUF_LEN) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 75 | goto end; |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 76 | if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 77 | goto end; |
| 78 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 79 | if (ir_rx51->wbuf_index % 2) |
| 80 | ir_rx51_off(ir_rx51); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 81 | else |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 82 | ir_rx51_on(ir_rx51); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 83 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 84 | ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]); |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 85 | hrtimer_add_expires_ns(timer, ns); |
| 86 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 87 | ir_rx51->wbuf_index++; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 88 | |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 89 | now = timer->base->get_time(); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 90 | |
Thomas Gleixner | 2456e85 | 2016-12-25 11:38:40 +0100 | [diff] [blame] | 91 | } while (hrtimer_get_expires_tv64(timer) < now); |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 92 | |
| 93 | return HRTIMER_RESTART; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 94 | end: |
| 95 | /* Stop TX here */ |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 96 | ir_rx51_off(ir_rx51); |
| 97 | ir_rx51->wbuf_index = -1; |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 98 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 99 | wake_up_interruptible(&ir_rx51->wqueue); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 100 | |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 101 | return HRTIMER_NORESTART; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 102 | } |
| 103 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 104 | static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer, |
| 105 | unsigned int count) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 106 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 107 | struct ir_rx51 *ir_rx51 = dev->priv; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 108 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 109 | if (count > WBUF_LEN) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 110 | return -EINVAL; |
| 111 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 112 | memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int)); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 113 | |
| 114 | /* Wait any pending transfers to finish */ |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 115 | wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 116 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 117 | init_timing_params(ir_rx51); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 118 | if (count < WBUF_LEN) |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 119 | ir_rx51->wbuf[count] = -1; /* Insert termination mark */ |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 120 | |
| 121 | /* |
Tony Lindgren | 44773ba | 2018-04-16 10:22:01 -0700 | [diff] [blame] | 122 | * REVISIT: Adjust latency requirements so the device doesn't go in too |
| 123 | * deep sleep states with pm_qos_add_request(). |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 124 | */ |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 125 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 126 | ir_rx51_on(ir_rx51); |
| 127 | ir_rx51->wbuf_index = 1; |
| 128 | hrtimer_start(&ir_rx51->timer, |
| 129 | ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])), |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 130 | HRTIMER_MODE_REL); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 131 | /* |
| 132 | * Don't return back to the userspace until the transfer has |
| 133 | * finished |
| 134 | */ |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 135 | wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 136 | |
Tony Lindgren | 44773ba | 2018-04-16 10:22:01 -0700 | [diff] [blame] | 137 | /* REVISIT: Remove pm_qos constraint, we can sleep again */ |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 138 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 139 | return count; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 140 | } |
| 141 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 142 | static int ir_rx51_open(struct rc_dev *dev) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 143 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 144 | struct ir_rx51 *ir_rx51 = dev->priv; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 145 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 146 | if (test_and_set_bit(1, &ir_rx51->device_is_open)) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 147 | return -EBUSY; |
| 148 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 149 | ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL); |
| 150 | if (IS_ERR(ir_rx51->pwm)) { |
| 151 | int res = PTR_ERR(ir_rx51->pwm); |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 152 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 153 | dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res); |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 154 | return res; |
| 155 | } |
| 156 | |
| 157 | return 0; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 158 | } |
| 159 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 160 | static void ir_rx51_release(struct rc_dev *dev) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 161 | { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 162 | struct ir_rx51 *ir_rx51 = dev->priv; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 163 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 164 | hrtimer_cancel(&ir_rx51->timer); |
| 165 | ir_rx51_off(ir_rx51); |
| 166 | pwm_put(ir_rx51->pwm); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 167 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 168 | clear_bit(1, &ir_rx51->device_is_open); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 169 | } |
| 170 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 171 | static struct ir_rx51 ir_rx51 = { |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 172 | .duty_cycle = 50, |
| 173 | .wbuf_index = -1, |
| 174 | }; |
| 175 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 176 | static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty) |
| 177 | { |
| 178 | struct ir_rx51 *ir_rx51 = dev->priv; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 179 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 180 | ir_rx51->duty_cycle = duty; |
| 181 | |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier) |
| 186 | { |
| 187 | struct ir_rx51 *ir_rx51 = dev->priv; |
| 188 | |
| 189 | if (carrier > 500000 || carrier < 20000) |
| 190 | return -EINVAL; |
| 191 | |
| 192 | ir_rx51->freq = carrier; |
| 193 | |
| 194 | return 0; |
| 195 | } |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 196 | |
| 197 | #ifdef CONFIG_PM |
| 198 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 199 | static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 200 | { |
| 201 | /* |
| 202 | * In case the device is still open, do not suspend. Normally |
| 203 | * this should not be a problem as lircd only keeps the device |
| 204 | * open only for short periods of time. We also don't want to |
| 205 | * get involved with race conditions that might happen if we |
| 206 | * were in a middle of a transmit. Thus, we defer any suspend |
| 207 | * actions until transmit has completed. |
| 208 | */ |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 209 | if (test_and_set_bit(1, &ir_rx51.device_is_open)) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 210 | return -EAGAIN; |
| 211 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 212 | clear_bit(1, &ir_rx51.device_is_open); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 217 | static int ir_rx51_resume(struct platform_device *dev) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 218 | { |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | #else |
| 223 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 224 | #define ir_rx51_suspend NULL |
| 225 | #define ir_rx51_resume NULL |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 226 | |
| 227 | #endif /* CONFIG_PM */ |
| 228 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 229 | static int ir_rx51_probe(struct platform_device *dev) |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 230 | { |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 231 | struct pwm_device *pwm; |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 232 | struct rc_dev *rcdev; |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 233 | |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 234 | pwm = pwm_get(&dev->dev, NULL); |
| 235 | if (IS_ERR(pwm)) { |
| 236 | int err = PTR_ERR(pwm); |
| 237 | |
| 238 | if (err != -EPROBE_DEFER) |
| 239 | dev_err(&dev->dev, "pwm_get failed: %d\n", err); |
| 240 | return err; |
| 241 | } |
| 242 | |
| 243 | /* Use default, in case userspace does not set the carrier */ |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 244 | ir_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC); |
Ivaylo Dimitrov | 3fdd152 | 2016-06-22 22:22:19 +0300 | [diff] [blame] | 245 | pwm_put(pwm); |
| 246 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 247 | hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 248 | ir_rx51.timer.function = ir_rx51_timer_cb; |
Ivaylo Dimitrov | 79cdad3 | 2016-06-22 22:22:21 +0300 | [diff] [blame] | 249 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 250 | ir_rx51.dev = &dev->dev; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 251 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 252 | rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX); |
| 253 | if (!rcdev) |
| 254 | return -ENOMEM; |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 255 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 256 | rcdev->priv = &ir_rx51; |
| 257 | rcdev->open = ir_rx51_open; |
| 258 | rcdev->close = ir_rx51_release; |
| 259 | rcdev->tx_ir = ir_rx51_tx; |
| 260 | rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle; |
| 261 | rcdev->s_tx_carrier = ir_rx51_set_tx_carrier; |
| 262 | rcdev->driver_name = KBUILD_MODNAME; |
| 263 | |
| 264 | ir_rx51.rcdev = rcdev; |
| 265 | |
| 266 | return devm_rc_register_device(&dev->dev, ir_rx51.rcdev); |
| 267 | } |
| 268 | |
| 269 | static int ir_rx51_remove(struct platform_device *dev) |
| 270 | { |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 271 | return 0; |
| 272 | } |
| 273 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 274 | static const struct of_device_id ir_rx51_match[] = { |
Ivaylo Dimitrov | b540617 | 2016-06-22 22:22:20 +0300 | [diff] [blame] | 275 | { |
| 276 | .compatible = "nokia,n900-ir", |
| 277 | }, |
| 278 | {}, |
| 279 | }; |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 280 | MODULE_DEVICE_TABLE(of, ir_rx51_match); |
Ivaylo Dimitrov | b540617 | 2016-06-22 22:22:20 +0300 | [diff] [blame] | 281 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 282 | static struct platform_driver ir_rx51_platform_driver = { |
| 283 | .probe = ir_rx51_probe, |
| 284 | .remove = ir_rx51_remove, |
| 285 | .suspend = ir_rx51_suspend, |
| 286 | .resume = ir_rx51_resume, |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 287 | .driver = { |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 288 | .name = KBUILD_MODNAME, |
| 289 | .of_match_table = of_match_ptr(ir_rx51_match), |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 290 | }, |
| 291 | }; |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 292 | module_platform_driver(ir_rx51_platform_driver); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 293 | |
Sean Young | a92def1 | 2016-12-19 18:48:29 -0200 | [diff] [blame] | 294 | MODULE_DESCRIPTION("IR TX driver for Nokia RX51"); |
Timo Kokkonen | c332e84 | 2012-08-10 06:16:36 -0300 | [diff] [blame] | 295 | MODULE_AUTHOR("Nokia Corporation"); |
| 296 | MODULE_LICENSE("GPL"); |