|
@@ -20,36 +20,14 @@
|
|
|
/* Define the max number of pulse/space transitions to buffer */
|
|
|
#define MAX_IR_EVENT_SIZE 512
|
|
|
|
|
|
+/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
|
|
|
+static LIST_HEAD(ir_raw_client_list);
|
|
|
+
|
|
|
/* Used to handle IR raw handler extensions */
|
|
|
static DEFINE_SPINLOCK(ir_raw_handler_lock);
|
|
|
static LIST_HEAD(ir_raw_handler_list);
|
|
|
static u64 available_protocols;
|
|
|
|
|
|
-/**
|
|
|
- * RUN_DECODER() - runs an operation on all IR decoders
|
|
|
- * @ops: IR raw handler operation to be called
|
|
|
- * @arg: arguments to be passed to the callback
|
|
|
- *
|
|
|
- * Calls ir_raw_handler::ops for all registered IR handlers. It prevents
|
|
|
- * new decode addition/removal while running, by locking ir_raw_handler_lock
|
|
|
- * mutex. If an error occurs, we keep going, as in the decode case, each
|
|
|
- * decoder must have a crack at decoding the data. We return a sum of the
|
|
|
- * return codes, which will be either 0 or negative for current callers.
|
|
|
- */
|
|
|
-#define RUN_DECODER(ops, ...) ({ \
|
|
|
- struct ir_raw_handler *_ir_raw_handler; \
|
|
|
- int _sumrc = 0, _rc; \
|
|
|
- spin_lock(&ir_raw_handler_lock); \
|
|
|
- list_for_each_entry(_ir_raw_handler, &ir_raw_handler_list, list) { \
|
|
|
- if (_ir_raw_handler->ops) { \
|
|
|
- _rc = _ir_raw_handler->ops(__VA_ARGS__); \
|
|
|
- _sumrc += _rc; \
|
|
|
- } \
|
|
|
- } \
|
|
|
- spin_unlock(&ir_raw_handler_lock); \
|
|
|
- _sumrc; \
|
|
|
-})
|
|
|
-
|
|
|
#ifdef MODULE
|
|
|
/* Used to load the decoders */
|
|
|
static struct work_struct wq_load;
|
|
@@ -58,11 +36,17 @@ static struct work_struct wq_load;
|
|
|
static void ir_raw_event_work(struct work_struct *work)
|
|
|
{
|
|
|
struct ir_raw_event ev;
|
|
|
+ struct ir_raw_handler *handler;
|
|
|
struct ir_raw_event_ctrl *raw =
|
|
|
container_of(work, struct ir_raw_event_ctrl, rx_work);
|
|
|
|
|
|
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev))
|
|
|
- RUN_DECODER(decode, raw->input_dev, ev);
|
|
|
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
|
|
|
+ spin_lock(&ir_raw_handler_lock);
|
|
|
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
|
|
|
+ handler->decode(raw->input_dev, ev);
|
|
|
+ spin_unlock(&ir_raw_handler_lock);
|
|
|
+ raw->prev_ev = ev;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -176,6 +160,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
|
|
|
{
|
|
|
struct ir_input_dev *ir = input_get_drvdata(input_dev);
|
|
|
int rc;
|
|
|
+ struct ir_raw_handler *handler;
|
|
|
|
|
|
ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL);
|
|
|
if (!ir->raw)
|
|
@@ -192,26 +177,32 @@ int ir_raw_event_register(struct input_dev *input_dev)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- rc = RUN_DECODER(raw_register, input_dev);
|
|
|
- if (rc < 0) {
|
|
|
- kfifo_free(&ir->raw->kfifo);
|
|
|
- kfree(ir->raw);
|
|
|
- ir->raw = NULL;
|
|
|
- return rc;
|
|
|
- }
|
|
|
+ spin_lock(&ir_raw_handler_lock);
|
|
|
+ list_add_tail(&ir->raw->list, &ir_raw_client_list);
|
|
|
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
|
|
|
+ if (handler->raw_register)
|
|
|
+ handler->raw_register(ir->raw->input_dev);
|
|
|
+ spin_unlock(&ir_raw_handler_lock);
|
|
|
|
|
|
- return rc;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
void ir_raw_event_unregister(struct input_dev *input_dev)
|
|
|
{
|
|
|
struct ir_input_dev *ir = input_get_drvdata(input_dev);
|
|
|
+ struct ir_raw_handler *handler;
|
|
|
|
|
|
if (!ir->raw)
|
|
|
return;
|
|
|
|
|
|
cancel_work_sync(&ir->raw->rx_work);
|
|
|
- RUN_DECODER(raw_unregister, input_dev);
|
|
|
+
|
|
|
+ spin_lock(&ir_raw_handler_lock);
|
|
|
+ list_del(&ir->raw->list);
|
|
|
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
|
|
|
+ if (handler->raw_unregister)
|
|
|
+ handler->raw_unregister(ir->raw->input_dev);
|
|
|
+ spin_unlock(&ir_raw_handler_lock);
|
|
|
|
|
|
kfifo_free(&ir->raw->kfifo);
|
|
|
kfree(ir->raw);
|
|
@@ -224,8 +215,13 @@ void ir_raw_event_unregister(struct input_dev *input_dev)
|
|
|
|
|
|
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
|
|
|
{
|
|
|
+ struct ir_raw_event_ctrl *raw;
|
|
|
+
|
|
|
spin_lock(&ir_raw_handler_lock);
|
|
|
list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
|
|
|
+ if (ir_raw_handler->raw_register)
|
|
|
+ list_for_each_entry(raw, &ir_raw_client_list, list)
|
|
|
+ ir_raw_handler->raw_register(raw->input_dev);
|
|
|
available_protocols |= ir_raw_handler->protocols;
|
|
|
spin_unlock(&ir_raw_handler_lock);
|
|
|
|
|
@@ -235,8 +231,13 @@ EXPORT_SYMBOL(ir_raw_handler_register);
|
|
|
|
|
|
void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
|
|
|
{
|
|
|
+ struct ir_raw_event_ctrl *raw;
|
|
|
+
|
|
|
spin_lock(&ir_raw_handler_lock);
|
|
|
list_del(&ir_raw_handler->list);
|
|
|
+ if (ir_raw_handler->raw_unregister)
|
|
|
+ list_for_each_entry(raw, &ir_raw_client_list, list)
|
|
|
+ ir_raw_handler->raw_unregister(raw->input_dev);
|
|
|
available_protocols &= ~ir_raw_handler->protocols;
|
|
|
spin_unlock(&ir_raw_handler_lock);
|
|
|
}
|