|
@@ -16,6 +16,7 @@
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/device.h>
|
|
|
#include <linux/kernel.h>
|
|
|
+#include <linux/sched.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -25,6 +26,8 @@
|
|
|
#include <linux/mei_cl_bus.h>
|
|
|
|
|
|
#include "mei_dev.h"
|
|
|
+#include "hw-me.h"
|
|
|
+#include "client.h"
|
|
|
|
|
|
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
|
|
|
#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
|
|
@@ -81,6 +84,11 @@ static int mei_cl_device_remove(struct device *dev)
|
|
|
if (!device || !dev->driver)
|
|
|
return 0;
|
|
|
|
|
|
+ if (device->event_cb) {
|
|
|
+ device->event_cb = NULL;
|
|
|
+ cancel_work_sync(&device->event_work);
|
|
|
+ }
|
|
|
+
|
|
|
driver = to_mei_cl_driver(dev->driver);
|
|
|
if (!driver->remove) {
|
|
|
dev->driver = NULL;
|
|
@@ -196,3 +204,221 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver)
|
|
|
pr_debug("mei: driver [%s] unregistered\n", driver->driver.name);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
|
|
|
+
|
|
|
+int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
|
|
|
+{
|
|
|
+ struct mei_device *dev;
|
|
|
+ struct mei_msg_hdr mei_hdr;
|
|
|
+ struct mei_cl_cb *cb;
|
|
|
+ int me_cl_id, err;
|
|
|
+
|
|
|
+ if (WARN_ON(!cl || !cl->dev))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ if (cl->state != MEI_FILE_CONNECTED)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ cb = mei_io_cb_init(cl, NULL);
|
|
|
+ if (!cb)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ err = mei_io_cb_alloc_req_buf(cb, length);
|
|
|
+ if (err < 0) {
|
|
|
+ mei_io_cb_free(cb);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(cb->request_buffer.data, buf, length);
|
|
|
+ cb->fop_type = MEI_FOP_WRITE;
|
|
|
+
|
|
|
+ dev = cl->dev;
|
|
|
+
|
|
|
+ mutex_lock(&dev->device_lock);
|
|
|
+
|
|
|
+ /* Check if we have an ME client device */
|
|
|
+ me_cl_id = mei_me_cl_by_id(dev, cl->me_client_id);
|
|
|
+ if (me_cl_id == dev->me_clients_num) {
|
|
|
+ err = -ENODEV;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (length > dev->me_clients[me_cl_id].props.max_msg_length) {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mei_cl_flow_ctrl_creds(cl);
|
|
|
+ if (err < 0)
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ /* Host buffer is not ready, we queue the request */
|
|
|
+ if (err == 0 || !dev->hbuf_is_ready) {
|
|
|
+ cb->buf_idx = 0;
|
|
|
+ mei_hdr.msg_complete = 0;
|
|
|
+ cl->writing_state = MEI_WRITING;
|
|
|
+ list_add_tail(&cb->list, &dev->write_list.list);
|
|
|
+
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+
|
|
|
+ return length;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev->hbuf_is_ready = false;
|
|
|
+
|
|
|
+ /* Check for a maximum length */
|
|
|
+ if (length > mei_hbuf_max_len(dev)) {
|
|
|
+ mei_hdr.length = mei_hbuf_max_len(dev);
|
|
|
+ mei_hdr.msg_complete = 0;
|
|
|
+ } else {
|
|
|
+ mei_hdr.length = length;
|
|
|
+ mei_hdr.msg_complete = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ mei_hdr.host_addr = cl->host_client_id;
|
|
|
+ mei_hdr.me_addr = cl->me_client_id;
|
|
|
+ mei_hdr.reserved = 0;
|
|
|
+
|
|
|
+ if (mei_write_message(dev, &mei_hdr, buf)) {
|
|
|
+ err = -EIO;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+
|
|
|
+ cl->writing_state = MEI_WRITING;
|
|
|
+ cb->buf_idx = mei_hdr.length;
|
|
|
+
|
|
|
+ if (!mei_hdr.msg_complete) {
|
|
|
+ list_add_tail(&cb->list, &dev->write_list.list);
|
|
|
+ } else {
|
|
|
+ if (mei_cl_flow_ctrl_reduce(cl)) {
|
|
|
+ err = -EIO;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_add_tail(&cb->list, &dev->write_waiting_list.list);
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+
|
|
|
+ return mei_hdr.length;
|
|
|
+
|
|
|
+out_err:
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+ mei_io_cb_free(cb);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
|
|
|
+{
|
|
|
+ struct mei_device *dev;
|
|
|
+ struct mei_cl_cb *cb;
|
|
|
+ size_t r_length;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (WARN_ON(!cl || !cl->dev))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ dev = cl->dev;
|
|
|
+
|
|
|
+ mutex_lock(&dev->device_lock);
|
|
|
+
|
|
|
+ if (!cl->read_cb) {
|
|
|
+ err = mei_cl_read_start(cl);
|
|
|
+ if (err < 0) {
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cl->reading_state != MEI_READ_COMPLETE &&
|
|
|
+ !waitqueue_active(&cl->rx_wait)) {
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+
|
|
|
+ if (wait_event_interruptible(cl->rx_wait,
|
|
|
+ (MEI_READ_COMPLETE == cl->reading_state))) {
|
|
|
+ if (signal_pending(current))
|
|
|
+ return -EINTR;
|
|
|
+ return -ERESTARTSYS;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_lock(&dev->device_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ cb = cl->read_cb;
|
|
|
+
|
|
|
+ if (cl->reading_state != MEI_READ_COMPLETE) {
|
|
|
+ r_length = 0;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ r_length = min_t(size_t, length, cb->buf_idx);
|
|
|
+
|
|
|
+ memcpy(buf, cb->response_buffer.data, r_length);
|
|
|
+
|
|
|
+ mei_io_cb_free(cb);
|
|
|
+ cl->reading_state = MEI_IDLE;
|
|
|
+ cl->read_cb = NULL;
|
|
|
+
|
|
|
+out:
|
|
|
+ mutex_unlock(&dev->device_lock);
|
|
|
+
|
|
|
+ return r_length;
|
|
|
+}
|
|
|
+
|
|
|
+int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
|
|
|
+{
|
|
|
+ struct mei_cl *cl = NULL;
|
|
|
+
|
|
|
+ /* TODO: hook between mei_bus_client and mei_cl */
|
|
|
+
|
|
|
+ if (device->ops && device->ops->send)
|
|
|
+ return device->ops->send(device, buf, length);
|
|
|
+
|
|
|
+ return __mei_cl_send(cl, buf, length);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mei_cl_send);
|
|
|
+
|
|
|
+int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
|
|
|
+{
|
|
|
+ struct mei_cl *cl = NULL;
|
|
|
+
|
|
|
+ /* TODO: hook between mei_bus_client and mei_cl */
|
|
|
+
|
|
|
+ if (device->ops && device->ops->recv)
|
|
|
+ return device->ops->recv(device, buf, length);
|
|
|
+
|
|
|
+ return __mei_cl_recv(cl, buf, length);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mei_cl_recv);
|
|
|
+
|
|
|
+static void mei_bus_event_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct mei_cl_device *device;
|
|
|
+
|
|
|
+ device = container_of(work, struct mei_cl_device, event_work);
|
|
|
+
|
|
|
+ if (device->event_cb)
|
|
|
+ device->event_cb(device, device->events, device->event_context);
|
|
|
+
|
|
|
+ device->events = 0;
|
|
|
+
|
|
|
+ /* Prepare for the next read */
|
|
|
+ mei_cl_read_start(device->cl);
|
|
|
+}
|
|
|
+
|
|
|
+int mei_cl_register_event_cb(struct mei_cl_device *device,
|
|
|
+ mei_cl_event_cb_t event_cb, void *context)
|
|
|
+{
|
|
|
+ if (device->event_cb)
|
|
|
+ return -EALREADY;
|
|
|
+
|
|
|
+ device->events = 0;
|
|
|
+ device->event_cb = event_cb;
|
|
|
+ device->event_context = context;
|
|
|
+ INIT_WORK(&device->event_work, mei_bus_event_work);
|
|
|
+
|
|
|
+ mei_cl_read_start(device->cl);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mei_cl_register_event_cb);
|