Merge branch 'greybus' into staging-testing

This merges the greybus branch into staging-testing.  It contains the
drivers/staging/greybus/ subsystem and related drivers and has passed
the 0-day bot tests so no builds should break.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2016-09-19 19:34:33 +02:00
commit 530a70617c
89 changed files with 37600 additions and 0 deletions

View File

@ -5290,6 +5290,23 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/aeroflex/
GREYBUS SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
M: Alex Elder <elder@kernel.org>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
F: drivers/staging/greybus/
GREYBUS PROTOCOLS DRIVERS
M: Rui Miguel Silva <rmfrfs@gmail.com>
S: Maintained
F: drivers/staging/greybus/sdio.c
F: drivers/staging/greybus/light.c
F: drivers/staging/greybus/gpio.c
F: drivers/staging/greybus/power_supply.c
F: drivers/staging/greybus/spi.c
F: drivers/staging/greybus/spilib.c
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org

View File

@ -102,4 +102,6 @@ source "drivers/staging/i4l/Kconfig"
source "drivers/staging/ks7010/Kconfig"
source "drivers/staging/greybus/Kconfig"
endif # STAGING

View File

@ -40,3 +40,4 @@ obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/

View File

@ -0,0 +1,139 @@
/*
* Sample code to test CAP protocol
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "../../greybus_authentication.h"
struct cap_ioc_get_endpoint_uid uid;
struct cap_ioc_get_ims_certificate cert = {
.certificate_class = 0,
.certificate_id = 0,
};
struct cap_ioc_authenticate authenticate = {
.auth_type = 0,
.challenge = {0},
};
int main(int argc, char *argv[])
{
unsigned int timeout = 10000;
char *capdev;
int fd, ret;
/* Make sure arguments are correct */
if (argc != 2) {
printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
return 0;
}
capdev = argv[1];
printf("Opening %s authentication device\n", capdev);
fd = open(capdev, O_RDWR);
if (fd < 0) {
printf("Failed to open: %s\n", capdev);
return -1;
}
/* Get UID */
printf("Get UID\n");
ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
if (ret < 0) {
printf("Failed to get UID: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
/* Get certificate */
printf("Get IMS certificate\n");
ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
if (ret < 0) {
printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("IMS Certificate size: %d\n", cert.cert_size);
/* Authenticate */
printf("Authenticate module\n");
memcpy(authenticate.uid, uid.uid, 8);
ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
if (ret < 0) {
printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("Authenticated, result (%02x), sig-size (%02x)\n",
authenticate.result_code, authenticate.signature_size);
close_fd:
close(fd);
return ret;
}

View File

@ -0,0 +1,333 @@
Firmware Management
-------------------
Copyright 2016 Google Inc.
Copyright 2016 Linaro Ltd.
Interface-Manifest
------------------
All firmware packages on the Modules or Interfaces are managed by a special
Firmware Management Protocol. To support Firmware Management by the AP, the
Interface Manifest shall at least contain the Firmware Management Bundle and a
Firmware Management Protocol CPort within it.
The bundle may contain additional CPorts based on the extra functionality
required to manage firmware packages.
For example, this is how the Firmware Management part of the Interface Manifest
may look like:
; Firmware Management Bundle (Bundle 1):
[bundle-descriptor 1]
class = 0x16
; (Mandatory) Firmware Management Protocol on CPort 1
[cport-descriptor 2]
bundle = 1
protocol = 0x18
; (Optional) Firmware Download Protocol on CPort 2
[cport-descriptor 1]
bundle = 1
protocol = 0x17
; (Optional) SPI protocol on CPort 3
[cport-descriptor 3]
bundle = 1
protocol = 0x0b
; (Optional) Component Authentication Protocol (CAP) on CPort 4
[cport-descriptor 4]
bundle = 1
protocol = 0x19
Sysfs Interfaces - Firmware Management
--------------------------------------
The Firmware Management Protocol interacts with Userspace using the character
device interface. The character device will be present in /dev/ directory
and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
Identifying the Character Device
================================
There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
and user first needs to identify the character device used for
firmware-management for a particular interface.
The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
be used by the user to identify the right character device for it. The class
device is created within the Bundle directory for a particular Interface.
For example this is how the class-device can be present:
/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
device and so the device in this case will be:
/dev/gb-fw-mgmt-0.
Operations on the Char device
=============================
The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
application and it can perform various 'ioctl' operations on the device. The
device doesn't support any read/write operations.
Following are the IOCTLs and their data structures available to the user:
/* IOCTL support */
#define GB_FW_LOAD_METHOD_UNIPRO 0x01
#define GB_FW_LOAD_METHOD_INTERNAL 0x02
#define GB_FW_LOAD_STATUS_FAILED 0x00
#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
#define GB_FW_LOAD_STATUS_VALIDATED 0x02
#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
#define GB_FW_BACKEND_FW_STATUS_INT 0x05
#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
struct fw_mgmt_ioc_get_intf_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_get_backend_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
__u8 status;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_intf_load_and_validate {
__u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
__u8 load_method;
__u8 status;
__u16 major;
__u16 minor;
} __packed;
struct fw_mgmt_ioc_backend_fw_update {
__u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
__u8 status;
} __packed;
#define FW_MGMT_IOCTL_BASE 'S'
#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
1. FW_MGMT_IOC_GET_INTF_FW:
This ioctl shall be used by the user to get the version and firmware-tag of
the currently running Interface Firmware. All the fields of the 'struct
fw_mgmt_ioc_get_fw' are filled by the kernel.
2. FW_MGMT_IOC_GET_BACKEND_FW:
This ioctl shall be used by the user to get the version of a currently
running Backend Interface Firmware identified by a firmware-tag. The user is
required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
in this case. The 'major' and 'minor' fields are set by the kernel in
response.
3. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
This ioctl shall be used by the user to load an Interface Firmware package on
an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
'major' and 'minor' fields are set by the kernel in response.
4. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
This ioctl shall be used by the user to request an Interface to update a
Backend Interface Firmware. The user is required to fill the 'firmware_tag'
field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
set by the kernel in response.
5. FW_MGMT_IOC_SET_TIMEOUT_MS:
This ioctl shall be used by the user to increase the timeout interval within
which the firmware must get loaded by the Module. The default timeout is 1
second. The user needs to pass the timeout in milliseconds.
6. FW_MGMT_IOC_MODE_SWITCH:
This ioctl shall be used by the user to mode-switch the module to the
previously loaded interface firmware. If the interface firmware isn't loaded
previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
operation is started after loading interface firmware, then the firmware core
wouldn't allow mode-switch.
Sysfs Interfaces - Authentication
---------------------------------
The Component Authentication Protocol interacts with Userspace using the
character device interface. The character device will be present in /dev/
directory and will be named gb-authenticate-<N>. The number <N> is assigned at
runtime.
Identifying the Character Device
================================
There can be multiple devices present in /dev/ directory with name
gb-authenticate-N and user first needs to identify the character device used for
authentication a of particular interface.
The Authentication core creates a device of class 'gb_authenticate', which shall
be used by the user to identify the right character device for it. The class
device is created within the Bundle directory for a particular Interface.
For example this is how the class-device can be present:
/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
The last name in this path: gb-authenticate-0 is precisely the name of the char
device and so the device in this case will be:
/dev/gb-authenticate-0.
Operations on the Char device
=============================
The Character device (/dev/gb-authenticate-0 in above example) can be opened by
the userspace application and it can perform various 'ioctl' operations on the
device. The device doesn't support any read/write operations.
Following are the IOCTLs and their data structures available to the user:
#define CAP_CERTIFICATE_MAX_SIZE 1600
#define CAP_SIGNATURE_MAX_SIZE 320
/* Certificate class types */
#define CAP_CERT_IMS_EAPC 0x00000001
#define CAP_CERT_IMS_EASC 0x00000002
#define CAP_CERT_IMS_EARC 0x00000003
#define CAP_CERT_IMS_IAPC 0x00000004
#define CAP_CERT_IMS_IASC 0x00000005
#define CAP_CERT_IMS_IARC 0x00000006
/* IMS Certificate response result codes */
#define CAP_IMS_RESULT_CERT_FOUND 0x00
#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
/* Authentication types */
#define CAP_AUTH_IMS_PRI 0x00000001
#define CAP_AUTH_IMS_SEC 0x00000002
#define CAP_AUTH_IMS_RSA 0x00000003
/* Authenticate response result codes */
#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
/* IOCTL support */
struct cap_ioc_get_endpoint_uid {
__u8 uid[8];
} __attribute__ ((__packed__));
struct cap_ioc_get_ims_certificate {
__u32 certificate_class;
__u32 certificate_id;
__u8 result_code;
__u32 cert_size;
__u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
} __attribute__ ((__packed__));
struct cap_ioc_authenticate {
__u32 auth_type;
__u8 uid[8];
__u8 challenge[32];
__u8 result_code;
__u8 response[64];
__u32 signature_size;
__u8 signature[CAP_SIGNATURE_MAX_SIZE];
} __attribute__ ((__packed__));
#define CAP_IOCTL_BASE 'C'
#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
1. CAP_IOC_GET_ENDPOINT_UID:
This ioctl shall be used by the user to get the endpoint UID associated with
the Interface. All the fields of the 'struct cap_ioc_get_endpoint_uid' are
filled by the kernel.
2. CAP_IOC_GET_IMS_CERTIFICATE:
This ioctl shall be used by the user to retrieve one of the available
cryptographic certificates held by the Interface for use in Component
Authentication. The user is required to fill the 'certificate_class' and
'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
case. The other fields will be set by the kernel in response. The first
'cert_size' bytes of the 'certificate' shall be read by the user and others
must be discarded.
3. CAP_IOC_AUTHENTICATE:
This ioctl shall be used by the user to authenticate the Module attached to
an Interface. The user needs to fill the 'auth_type', 'uid', and 'challenge'
fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
the kernel in response. The first 'signature_size' bytes of the 'signature'
shall be read by the user and others must be discarded.
Sysfs Interfaces - Firmware Download
------------------------------------
The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
and the interface provided to userspace are described in:
Documentation/firmware_class/.
Sysfs Interfaces - SPI Flash
----------------------------
The SPI flash is exposed in userspace as a MTD device and is created
within the Bundle directory. For example, this is how the path may look like:
$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
mtd0 mtd0ro
Sample Applications
-------------------
The current directory also provides a firmware.c test application, which can be
referenced while developing userspace application to talk to firmware-management
protocol.
The current directory also provides a authenticate.c test application, which can
be referenced while developing userspace application to talk to
component authentication protocol.

View File

@ -0,0 +1,262 @@
/*
* Sample code to test firmware-management protocol
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "../../greybus_firmware.h"
#define FW_DEV_DEFAULT "/dev/gb-fw-mgmt-0"
#define FW_TAG_INT_DEFAULT "s3f"
#define FW_TAG_BCND_DEFAULT "bf_01"
#define FW_UPDATE_TYPE_DEFAULT 0
#define FW_TIMEOUT_DEFAULT 10000;
static const char *firmware_tag;
static const char *fwdev = FW_DEV_DEFAULT;
static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
static int fw_timeout = FW_TIMEOUT_DEFAULT;
static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
static struct fw_mgmt_ioc_backend_fw_update backend_update;
static void usage(void)
{
printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
}
static int update_intf_firmware(int fd)
{
int ret;
/* Get Interface Firmware Version */
printf("Get Interface Firmware Version\n");
ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
if (ret < 0) {
printf("Failed to get interface firmware version: %s (%d)\n",
fwdev, ret);
return -1;
}
printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
intf_fw_info.firmware_tag, intf_fw_info.major,
intf_fw_info.minor);
/* Try Interface Firmware load over Unipro */
printf("Loading Interface Firmware\n");
intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
intf_load.status = 0;
intf_load.major = 0;
intf_load.minor = 0;
strncpy((char *)&intf_load.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
if (ret < 0) {
printf("Failed to load interface firmware: %s (%d)\n", fwdev,
ret);
return -1;
}
if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
printf("Load status says loading failed: %d\n",
intf_load.status);
return -1;
}
printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
firmware_tag, intf_load.major, intf_load.minor,
intf_load.status);
/* Initiate Mode-switch to the newly loaded firmware */
printf("Initiate Mode switch\n");
ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
if (ret < 0)
printf("Failed to initiate mode-switch (%d)\n", ret);
return ret;
}
static int update_backend_firmware(int fd)
{
int ret;
/* Get Backend Firmware Version */
printf("Getting Backend Firmware Version\n");
strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
retry_fw_version:
ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
if (ret < 0) {
printf("Failed to get backend firmware version: %s (%d)\n",
fwdev, ret);
return -1;
}
printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
backend_fw_info.firmware_tag, backend_fw_info.major,
backend_fw_info.minor, backend_fw_info.status);
if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
goto retry_fw_version;
if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS)
&& (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
printf("Failed to get backend firmware version: %s (%d)\n",
fwdev, backend_fw_info.status);
return -1;
}
/* Try Backend Firmware Update over Unipro */
printf("Updating Backend Firmware\n");
strncpy((char *)&backend_update.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
retry_fw_update:
backend_update.status = 0;
ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
if (ret < 0) {
printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
return -1;
}
if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
printf("Retrying firmware update: %d\n", backend_update.status);
goto retry_fw_update;
}
if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
printf("Load status says loading failed: %d\n",
backend_update.status);
} else {
printf("Backend Firmware (%s) Load done: status: %d\n",
firmware_tag, backend_update.status);
}
return 0;
}
int main(int argc, char *argv[])
{
int fd, ret;
if (argc > 1 &&
(!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
usage();
return -1;
}
if (argc > 1)
fwdev = argv[1];
if (argc > 2)
sscanf(argv[2], "%u", &fw_update_type);
if (argc > 3) {
firmware_tag = argv[3];
} else if (!fw_update_type) {
firmware_tag = FW_TAG_INT_DEFAULT;
} else {
firmware_tag = FW_TAG_BCND_DEFAULT;
}
if (argc > 4)
sscanf(argv[4], "%u", &fw_timeout);
printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
fwdev, fw_update_type == 0 ? "interface" : "backend",
firmware_tag, fw_timeout);
printf("Opening %s firmware management device\n", fwdev);
fd = open(fwdev, O_RDWR);
if (fd < 0) {
printf("Failed to open: %s\n", fwdev);
return -1;
}
/* Set Timeout */
printf("Setting timeout to %u ms\n", fw_timeout);
ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
if (ret < 0) {
printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
ret = -1;
goto close_fd;
}
if (!fw_update_type)
ret = update_intf_firmware(fd);
else
ret = update_backend_firmware(fd);
close_fd:
close(fd);
return ret;
}

View File

@ -0,0 +1,275 @@
What: /sys/bus/greybus/devices/greybusN
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The "root" greybus device for the Greybus device tree, or bus,
where N is a dynamically assigned 1-based id.
What: /sys/bus/greybus/devices/greybusN/bus_id
Date: April 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of the "root" greybus device, or bus.
What: /sys/bus/greybus/devices/N-M
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A Module M on the bus N, where M is the 1-byte interface
ID of the module's primary interface.
What: /sys/bus/greybus/devices/N-M/eject
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Writing a non-zero argument to this attibute disables the
module's interfaces before physically ejecting it.
What: /sys/bus/greybus/devices/N-M/module_id
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of a Greybus module, corresponding to the ID of its
primary interface.
What: /sys/bus/greybus/devices/N-M/num_interfaces
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The number of interfaces of a module.
What: /sys/bus/greybus/devices/N-M.I
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
An Interface I on the bus N and module N-M, where I is the
1-byte interface ID.
What: /sys/bus/greybus/devices/N-M.I/current_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Current measurement of the interface in microamps (uA)
What: /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Unipro Device Descriptor Block Level 1 manufacturer ID for the
greybus Interface.
What: /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Unipro Device Descriptor Block Level 1 product ID for the
greybus Interface.
What: /sys/bus/greybus/devices/N-M.I/interface_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/interface_type
Date: June 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The type of a Greybus interface; "dummy", "unipro", "greybus",
or "unknown".
What: /sys/bus/greybus/devices/N-M.I/power_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Power measurement of the interface in microwatts (uW)
What: /sys/bus/greybus/devices/N-M.I/power_state
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
This file reflects the power state of a Greybus interface. If
the value read from it is "on", then power is currently
supplied to the interface. Otherwise it will read "off" and
power is currently not supplied to the interface.
If the value read is "off", then writing "on" (or '1', 'y',
'Y') to this file will enable power to the interface and an
attempt to boot and possibly enumerate it will be made. Note
that on errors, the interface will again be powered down.
If the value read is "on", then writing "off" (or '0', 'n',
'N') to this file will power down the interface.
What: /sys/bus/greybus/devices/N-M.I/product_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Product ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/serial_number
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Serial Number of the Greybus interface, represented by a 64 bit
hexadecimal number.
What: /sys/bus/greybus/devices/N-M.I/vendor_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Vendor ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/voltage_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Voltage measurement of the interface in microvolts (uV)
What: /sys/bus/greybus/devices/N-M.I.ctrl
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Abstract control device for interface I that represents the
current mode of an enumerated Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.ctrl/product_string
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Product ID string of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Vendor ID string of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.B
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A bundle B on the Interface I, B is replaced by a 1-byte
number representing the bundle.
What: /sys/bus/greybus/devices/N-M.I.B/bundle_class
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The greybus class of the bundle B.
What: /sys/bus/greybus/devices/N-M.I.B/bundle_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The interface-unique id of the bundle B.
What: /sys/bus/greybus/devices/N-M.I.B/gpbX
Date: April 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The General Purpose Bridged PHY device of the bundle B,
where X is a dynamically assigned 0-based id.
What: /sys/bus/greybus/devices/N-M.I.B/state
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A bundle has a state that is managed by the userspace
Endo process. This file allows that Endo to signal
other Android HALs that the state of the bundle has
changed to a specific value. When written to, any
process watching the file will be woken up, and the new
value can be read. It's a "poor-man's IPC", yes, but
simplifies the Android userspace code immensely.
What: /sys/bus/greybus/devices/N-svc
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The singleton SVC device of bus N.
What: /sys/bus/greybus/devices/N-svc/ap_intf_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The AP interface ID, a 1-byte non-zero integer which
defines the position of the AP module on the frame.
The interface positions are defined in the GMP
Module Developer Kit.
What: /sys/bus/greybus/devices/N-svc/endo_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The Endo ID, which is a 2-byte hexadecimal value
defined by the Endo layout scheme, documented in
the GMP Module Developer Kit.
What: /sys/bus/greybus/devices/N-svc/intf_eject
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Write the number of the interface that you wish to
forcibly eject from the system.
What: /sys/bus/greybus/devices/N-svc/version
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The version number of the firmware in the SVC device.
What: /sys/bus/greybus/devices/N-svc/watchdog
Date: October 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
If the SVC watchdog is enabled or not. Writing 0 to this
file will disable the watchdog, writing 1 will enable it.
What: /sys/bus/greybus/devices/N-svc/watchdog_action
Date: July 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
This attribute indicates the action to be performed upon SVC
watchdog bite.
The action can be one of the "reset" or "panic". Writing either
one of the "reset" or "panic" will change the behavior of SVC
watchdog bite. Default value is "reset".
"reset" means the UniPro subsystem is to be reset.
"panic" means SVC watchdog bite will cause kernel to panic.

View File

@ -0,0 +1,219 @@
menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
---help---
This option enables the Greybus driver core. Greybus is an
hardware protocol that was designed to provide Unipro with a
sane application layer. It was originally designed for the
ARA project, a module phone system, but has shown up in other
phones, and can be tunneled over other busses in order to
control hardware devices.
Say Y here to enable support for these types of drivers.
To compile this code as a module, chose M here: the module
will be called greybus.ko
if GREYBUS
config GREYBUS_ES2
tristate "Greybus ES3 USB host controller"
depends on USB
---help---
Select this option if you have a Toshiba ES3 USB device that
acts as a Greybus "host controller". This device is a bridge
from a USB device to a Unipro network.
To compile this code as a module, chose M here: the module
will be called gb-es2.ko
config GREYBUS_AUDIO
tristate "Greybus Audio Class driver"
depends on SOUND
---help---
Select this option if you have a device that follows the
Greybus Audio Class specification.
To compile this code as a module, chose M here: the module
will be called gb-audio.ko
config GREYBUS_BOOTROM
tristate "Greybus Bootrom Class driver"
---help---
Select this option if you have a device that follows the
Greybus Bootrom Class specification.
To compile this code as a module, chose M here: the module
will be called gb-bootrom.ko
config GREYBUS_CAMERA
tristate "Greybus Camera Class driver"
depends on MEDIA && LEDS_CLASS_FLASH && BROKEN
---help---
Select this option if you have a device that follows the
Greybus Camera Class specification.
To compile this code as a module, chose M here: the module
will be called gb-camera.ko
config GREYBUS_FIRMWARE
tristate "Greybus Firmware Download Class driver"
depends on SPI
---help---
Select this option if you have a device that follows the
Greybus Firmware Download Class specification.
To compile this code as a module, chose M here: the module
will be called gb-firmware.ko
config GREYBUS_HID
tristate "Greybus HID Class driver"
depends on HID && INPUT
---help---
Select this option if you have a device that follows the
Greybus HID Class specification.
To compile this code as a module, chose M here: the module
will be called gb-hid.ko
config GREYBUS_LIGHT
tristate "Greybus LED Class driver"
depends on LEDS_CLASS
---help---
Select this option if you have a device that follows the
Greybus LED Class specification.
To compile this code as a module, chose M here: the module
will be called gb-light.ko
config GREYBUS_LOG
tristate "Greybus Debug Log Class driver"
---help---
Select this option if you have a device that follows the
Greybus Debug Log Class specification.
To compile this code as a module, chose M here: the module
will be called gb-log.ko
config GREYBUS_LOOPBACK
tristate "Greybus Loopback Class driver"
---help---
Select this option if you have a device that follows the
Greybus Debug Log Class specification.
To compile this code as a module, chose M here: the module
will be called gb-log.ko
config GREYBUS_POWER
tristate "Greybus Powersupply Class driver"
depends on POWER_SUPPLY
---help---
Select this option if you have a device that follows the
Greybus Powersupply Class specification.
To compile this code as a module, chose M here: the module
will be called gb-power-supply.ko
config GREYBUS_RAW
tristate "Greybus Raw Class driver"
---help---
Select this option if you have a device that follows the
Greybus Raw Class specification.
To compile this code as a module, chose M here: the module
will be called gb-raw.ko
config GREYBUS_VIBRATOR
tristate "Greybus Vibrator Motor Class driver"
---help---
Select this option if you have a device that follows the
Greybus Vibrator Motor Class specification.
To compile this code as a module, chose M here: the module
will be called gb-vibrator.ko
menuconfig GREYBUS_BRIDGED_PHY
tristate "Greybus Bridged PHY Class drivers"
---help---
Select this option to pick from a variety of Greybus Bridged
PHY class drivers. These drivers emulate a number of
different "traditional" busses by tunneling them over Greybus.
Examples of this include serial, SPI, USB, and others.
To compile this code as a module, chose M here: the module
will be called gb-phy.ko
if GREYBUS_BRIDGED_PHY
config GREYBUS_GPIO
tristate "Greybus GPIO Bridged PHY driver"
depends on GPIOLIB
---help---
Select this option if you have a device that follows the
Greybus GPIO Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-gpio.ko
config GREYBUS_I2C
tristate "Greybus I2C Bridged PHY driver"
depends on I2C
---help---
Select this option if you have a device that follows the
Greybus I2C Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-i2c.ko
config GREYBUS_PWM
tristate "Greybus PWM Bridged PHY driver"
depends on PWM
---help---
Select this option if you have a device that follows the
Greybus PWM Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-pwm.ko
config GREYBUS_SDIO
tristate "Greybus SDIO Bridged PHY driver"
depends on MMC
---help---
Select this option if you have a device that follows the
Greybus SDIO Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-sdio.ko
config GREYBUS_SPI
tristate "Greybus SPI Bridged PHY driver"
depends on SPI
---help---
Select this option if you have a device that follows the
Greybus SPI Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-spi.ko
config GREYBUS_UART
tristate "Greybus UART Bridged PHY driver"
depends on TTY
---help---
Select this option if you have a device that follows the
Greybus UART Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-uart.ko
config GREYBUS_USB
tristate "Greybus USB Host Bridged PHY driver"
depends on USB
---help---
Select this option if you have a device that follows the
Greybus USB Host Bridged PHY Class specification.
To compile this code as a module, chose M here: the module
will be called gb-usb.ko
endif # GREYBUS_BRIDGED_PHY
endif # GREYBUS

View File

@ -0,0 +1,96 @@
# Greybus core
greybus-y := core.o \
debugfs.o \
hd.o \
manifest.o \
module.o \
interface.o \
bundle.o \
connection.o \
control.o \
svc.o \
svc_watchdog.o \
operation.o \
timesync.o \
timesync_platform.o
obj-$(CONFIG_GREYBUS) += greybus.o
# needed for trace events
ccflags-y += -I$(src)
# Greybus Host controller drivers
gb-es2-y := es2.o
obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
# Greybus class drivers
gb-bootrom-y := bootrom.o
gb-camera-y := camera.o
gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
gb-spilib-y := spilib.o
gb-hid-y := hid.o
gb-light-y := light.o
gb-log-y := log.o
gb-loopback-y := loopback.o
gb-power-supply-y := power_supply.o
gb-raw-y := raw.o
gb-vibrator-y := vibrator.o
obj-$(CONFIG_GREYBUS_BOOTROM) += gb-bootrom.o
obj-$(CONFIG_GREYBUS_CAMERA) += gb-camera.o
obj-$(CONFIG_GREYBUS_FIRMWARE) += gb-firmware.o gb-spilib.o
obj-$(CONFIG_GREYBUS_HID) += gb-hid.o
obj-$(CONFIG_GREYBUS_LIGHT) += gb-light.o
obj-$(CONFIG_GREYBUS_LOG) += gb-log.o
obj-$(CONFIG_GREYBUS_LOOPBACK) += gb-loopback.o
obj-$(CONFIG_GREYBUS_POWER) += gb-power-supply.o
obj-$(CONFIG_GREYBUS_RAW) += gb-raw.o
obj-$(CONFIG_GREYBUS_VIBRATOR) += gb-vibrator.o
# Greybus Audio is a bunch of modules
gb-audio-module-y := audio_module.o audio_topology.o
gb-audio-codec-y := audio_codec.o
gb-audio-gb-y := audio_gb.o
gb-audio-apbridgea-y := audio_apbridgea.o
gb-audio-manager-y := audio_manager.o audio_manager_module.o
# Greybus Audio sysfs helpers can be useful when debugging
#GB_AUDIO_MANAGER_SYSFS ?= true
#ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
#gb-audio-manager-y += audio_manager_sysfs.o
#ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
#endif
obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-codec.o
obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-module.o
obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-gb.o
obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-apbridgea.o
obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-manager.o
# Greybus Bridged PHY drivers
gb-gbphy-y := gbphy.o
gb-gpio-y := gpio.o
gb-i2c-y := i2c.o
gb-pwm-y := pwm.o
gb-sdio-y := sdio.o
gb-spi-y := spi.o
gb-uart-y := uart.o
gb-usb-y := usb.o
obj-$(CONFIG_GREYBUS_BRIDGED_PHY) += gb-gbphy.o
obj-$(CONFIG_GREYBUS_GPIO) += gb-gpio.o
obj-$(CONFIG_GREYBUS_I2C) += gb-i2c.o
obj-$(CONFIG_GREYBUS_PWM) += gb-pwm.o
obj-$(CONFIG_GREYBUS_SDIO) += gb-sdio.o
obj-$(CONFIG_GREYBUS_SPI) += gb-spi.o gb-spilib.o
obj-$(CONFIG_GREYBUS_UART) += gb-uart.o
obj-$(CONFIG_GREYBUS_USB) += gb-usb.o
# Greybus Platform driver
gb-arche-y := arche-platform.o arche-apb-ctrl.o
obj-$(CONFIG_USB_HSIC_USB3613) += gb-arche.o

View File

@ -0,0 +1,522 @@
/*
* Arche Platform driver to control APB.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include "arche_platform.h"
struct arche_apb_ctrl_drvdata {
/* Control GPIO signals to and from AP <=> AP Bridges */
int resetn_gpio;
int boot_ret_gpio;
int pwroff_gpio;
int wake_in_gpio;
int wake_out_gpio;
int pwrdn_gpio;
enum arche_platform_state state;
bool init_disabled;
struct regulator *vcore;
struct regulator *vio;
int clk_en_gpio;
struct clk *clk;
struct pinctrl *pinctrl;
struct pinctrl_state *pin_default;
/* V2: SPI Bus control */
int spi_en_gpio;
bool spi_en_polarity_high;
};
/*
* Note that these low level api's are active high
*/
static inline void deassert_reset(unsigned int gpio)
{
gpio_set_value(gpio, 1);
}
static inline void assert_reset(unsigned int gpio)
{
gpio_set_value(gpio, 0);
}
/*
* Note: Please do not modify the below sequence, as it is as per the spec
*/
static int coldboot_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret;
if (apb->init_disabled ||
apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
return 0;
/* Hold APB in reset state */
assert_reset(apb->resetn_gpio);
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* Enable power to APB */
if (!IS_ERR(apb->vcore)) {
ret = regulator_enable(apb->vcore);
if (ret) {
dev_err(dev, "failed to enable core regulator\n");
return ret;
}
}
if (!IS_ERR(apb->vio)) {
ret = regulator_enable(apb->vio);
if (ret) {
dev_err(dev, "failed to enable IO regulator\n");
return ret;
}
}
apb_bootret_deassert(dev);
/* On DB3 clock was not mandatory */
if (gpio_is_valid(apb->clk_en_gpio))
gpio_set_value(apb->clk_en_gpio, 1);
usleep_range(100, 200);
/* deassert reset to APB : Active-low signal */
deassert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
return 0;
}
static int fw_flashing_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret;
if (apb->init_disabled ||
apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return 0;
ret = regulator_enable(apb->vcore);
if (ret) {
dev_err(dev, "failed to enable core regulator\n");
return ret;
}
ret = regulator_enable(apb->vio);
if (ret) {
dev_err(dev, "failed to enable IO regulator\n");
return ret;
}
if (gpio_is_valid(apb->spi_en_gpio)) {
unsigned long flags;
if (apb->spi_en_polarity_high)
flags = GPIOF_OUT_INIT_HIGH;
else
flags = GPIOF_OUT_INIT_LOW;
ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
flags, "apb_spi_en");
if (ret) {
dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
apb->spi_en_gpio);
return ret;
}
}
/* for flashing device should be in reset state */
assert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
return 0;
}
static int standby_boot_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
if (apb->init_disabled)
return 0;
/* Even if it is in OFF state, then we do not want to change the state */
if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
apb->state == ARCHE_PLATFORM_STATE_OFF)
return 0;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/*
* As per WDM spec, do nothing
*
* Pasted from WDM spec,
* - A falling edge on POWEROFF_L is detected (a)
* - WDM enters standby mode, but no output signals are changed
* */
/* TODO: POWEROFF_L is input to WDM module */
apb->state = ARCHE_PLATFORM_STATE_STANDBY;
return 0;
}
static void poweroff_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
return;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* disable the clock */
if (gpio_is_valid(apb->clk_en_gpio))
gpio_set_value(apb->clk_en_gpio, 0);
if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
regulator_disable(apb->vcore);
if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
regulator_disable(apb->vio);
/* As part of exit, put APB back in reset state */
assert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_OFF;
/* TODO: May have to send an event to SVC about this exit */
}
void apb_bootret_assert(struct device *dev)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
gpio_set_value(apb->boot_ret_gpio, 1);
}
void apb_bootret_deassert(struct device *dev)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
gpio_set_value(apb->boot_ret_gpio, 0);
}
int apb_ctrl_coldboot(struct device *dev)
{
return coldboot_seq(to_platform_device(dev));
}
int apb_ctrl_fw_flashing(struct device *dev)
{
return fw_flashing_seq(to_platform_device(dev));
}
int apb_ctrl_standby_boot(struct device *dev)
{
return standby_boot_seq(to_platform_device(dev));
}
void apb_ctrl_poweroff(struct device *dev)
{
poweroff_seq(to_platform_device(dev));
}
static ssize_t state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret = 0;
bool is_disabled;
if (sysfs_streq(buf, "off")) {
if (apb->state == ARCHE_PLATFORM_STATE_OFF)
return count;
poweroff_seq(pdev);
} else if (sysfs_streq(buf, "active")) {
if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
return count;
poweroff_seq(pdev);
is_disabled = apb->init_disabled;
apb->init_disabled = false;
ret = coldboot_seq(pdev);
if (ret)
apb->init_disabled = is_disabled;
} else if (sysfs_streq(buf, "standby")) {
if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
return count;
ret = standby_boot_seq(pdev);
} else if (sysfs_streq(buf, "fw_flashing")) {
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return count;
/* First we want to make sure we power off everything
* and then enter FW flashing state */
poweroff_seq(pdev);
ret = fw_flashing_seq(pdev);
} else {
dev_err(dev, "unknown state\n");
ret = -EINVAL;
}
return ret ? ret : count;
}
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
switch (apb->state) {
case ARCHE_PLATFORM_STATE_OFF:
return sprintf(buf, "off%s\n",
apb->init_disabled ? ",disabled" : "");
case ARCHE_PLATFORM_STATE_ACTIVE:
return sprintf(buf, "active\n");
case ARCHE_PLATFORM_STATE_STANDBY:
return sprintf(buf, "standby\n");
case ARCHE_PLATFORM_STATE_FW_FLASHING:
return sprintf(buf, "fw_flashing\n");
default:
return sprintf(buf, "unknown state\n");
}
}
static DEVICE_ATTR_RW(state);
static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
struct arche_apb_ctrl_drvdata *apb)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
if (apb->resetn_gpio < 0) {
dev_err(dev, "failed to get reset gpio\n");
return apb->resetn_gpio;
}
ret = devm_gpio_request_one(dev, apb->resetn_gpio,
GPIOF_OUT_INIT_LOW, "apb-reset");
if (ret) {
dev_err(dev, "Failed requesting reset gpio %d\n",
apb->resetn_gpio);
return ret;
}
apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
if (apb->boot_ret_gpio < 0) {
dev_err(dev, "failed to get boot retention gpio\n");
return apb->boot_ret_gpio;
}
ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
GPIOF_OUT_INIT_LOW, "boot retention");
if (ret) {
dev_err(dev, "Failed requesting bootret gpio %d\n",
apb->boot_ret_gpio);
return ret;
}
/* It's not mandatory to support power management interface */
apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
if (apb->pwroff_gpio < 0) {
dev_err(dev, "failed to get power off gpio\n");
return apb->pwroff_gpio;
}
ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
GPIOF_IN, "pwroff_n");
if (ret) {
dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
apb->pwroff_gpio);
return ret;
}
/* Do not make clock mandatory as of now (for DB3) */
apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
if (apb->clk_en_gpio < 0) {
dev_warn(dev, "failed to get clock en gpio\n");
} else if (gpio_is_valid(apb->clk_en_gpio)) {
ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
GPIOF_OUT_INIT_LOW, "apb_clk_en");
if (ret) {
dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
apb->clk_en_gpio);
return ret;
}
}
apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
if (apb->pwrdn_gpio < 0)
dev_warn(dev, "failed to get power down gpio\n");
/* Regulators are optional, as we may have fixed supply coming in */
apb->vcore = devm_regulator_get(dev, "vcore");
if (IS_ERR(apb->vcore))
dev_warn(dev, "no core regulator found\n");
apb->vio = devm_regulator_get(dev, "vio");
if (IS_ERR(apb->vio))
dev_warn(dev, "no IO regulator found\n");
apb->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(apb->pinctrl)) {
dev_err(&pdev->dev, "could not get pinctrl handle\n");
return PTR_ERR(apb->pinctrl);
}
apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
if (IS_ERR(apb->pin_default)) {
dev_err(&pdev->dev, "could not get default pin state\n");
return PTR_ERR(apb->pin_default);
}
/* Only applicable for platform >= V2 */
apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
if (apb->spi_en_gpio >= 0) {
if (of_property_read_bool(pdev->dev.of_node,
"spi-en-active-high"))
apb->spi_en_polarity_high = true;
}
return 0;
}
static int arche_apb_ctrl_probe(struct platform_device *pdev)
{
int ret;
struct arche_apb_ctrl_drvdata *apb;
struct device *dev = &pdev->dev;
apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
if (!apb)
return -ENOMEM;
ret = apb_ctrl_get_devtree_data(pdev, apb);
if (ret) {
dev_err(dev, "failed to get apb devicetree data %d\n", ret);
return ret;
}
/* Initially set APB to OFF state */
apb->state = ARCHE_PLATFORM_STATE_OFF;
/* Check whether device needs to be enabled on boot */
if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
apb->init_disabled = true;
platform_set_drvdata(pdev, apb);
/* Create sysfs interface to allow user to change state dynamically */
ret = device_create_file(dev, &dev_attr_state);
if (ret) {
dev_err(dev, "failed to create state file in sysfs\n");
return ret;
}
dev_info(&pdev->dev, "Device registered successfully\n");
return 0;
}
static int arche_apb_ctrl_remove(struct platform_device *pdev)
{
device_remove_file(&pdev->dev, &dev_attr_state);
poweroff_seq(pdev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static int arche_apb_ctrl_suspend(struct device *dev)
{
/*
* If timing profile permits, we may shutdown bridge
* completely
*
* TODO: sequence ??
*
* Also, need to make sure we meet precondition for unipro suspend
* Precondition: Definition ???
*/
return 0;
}
static int arche_apb_ctrl_resume(struct device *dev)
{
/*
* Atleast for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
* Based on whether bridge is in standby or OFF state we may have to
* assert multiple signals. Please refer to WDM spec, for more info.
*
*/
return 0;
}
static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
{
apb_ctrl_poweroff(&pdev->dev);
}
static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
arche_apb_ctrl_resume);
static struct of_device_id arche_apb_ctrl_of_match[] = {
{ .compatible = "usbffff,2", },
{ },
};
static struct platform_driver arche_apb_ctrl_device_driver = {
.probe = arche_apb_ctrl_probe,
.remove = arche_apb_ctrl_remove,
.shutdown = arche_apb_ctrl_shutdown,
.driver = {
.name = "arche-apb-ctrl",
.pm = &arche_apb_ctrl_pm_ops,
.of_match_table = arche_apb_ctrl_of_match,
}
};
int __init arche_apb_init(void)
{
return platform_driver_register(&arche_apb_ctrl_device_driver);
}
void __exit arche_apb_exit(void)
{
platform_driver_unregister(&arche_apb_ctrl_device_driver);
}

View File

@ -0,0 +1,828 @@
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/suspend.h>
#include <linux/time.h>
#include "arche_platform.h"
#include "greybus.h"
#include <linux/usb/usb3613.h>
#define WD_COLDBOOT_PULSE_WIDTH_MS 30
enum svc_wakedetect_state {
WD_STATE_IDLE, /* Default state = pulled high/low */
WD_STATE_BOOT_INIT, /* WD = falling edge (low) */
WD_STATE_COLDBOOT_TRIG, /* WD = rising edge (high), > 30msec */
WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */
WD_STATE_COLDBOOT_START, /* Cold boot process started */
WD_STATE_STANDBYBOOT_START, /* Not used */
WD_STATE_TIMESYNC,
};
struct arche_platform_drvdata {
/* Control GPIO signals to and from AP <=> SVC */
int svc_reset_gpio;
bool is_reset_act_hi;
int svc_sysboot_gpio;
int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
enum arche_platform_state state;
int svc_refclk_req;
struct clk *svc_ref_clk;
struct pinctrl *pinctrl;
struct pinctrl_state *pin_default;
int num_apbs;
enum svc_wakedetect_state wake_detect_state;
int wake_detect_irq;
spinlock_t wake_lock; /* Protect wake_detect_state */
struct mutex platform_state_mutex; /* Protect state */
wait_queue_head_t wq; /* WQ for arche_pdata->state */
unsigned long wake_detect_start;
struct notifier_block pm_notifier;
struct device *dev;
struct gb_timesync_svc *timesync_svc_pdata;
};
static int arche_apb_bootret_assert(struct device *dev, void *data)
{
apb_bootret_assert(dev);
return 0;
}
static int arche_apb_bootret_deassert(struct device *dev, void *data)
{
apb_bootret_deassert(dev);
return 0;
}
/* Requires calling context to hold arche_pdata->platform_state_mutex */
static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
enum arche_platform_state state)
{
arche_pdata->state = state;
}
/*
* arche_platform_change_state: Change the operational state
*
* This exported function allows external drivers to change the state
* of the arche-platform driver.
* Note that this function only supports transitions between two states
* with limited functionality.
*
* - ARCHE_PLATFORM_STATE_TIME_SYNC:
* Once set, allows timesync operations between SVC <=> AP and makes
* sure that arche-platform driver ignores any subsequent events/pulses
* from SVC over wake/detect.
*
* - ARCHE_PLATFORM_STATE_ACTIVE:
* Puts back driver to active state, where any pulse from SVC on wake/detect
* line would trigger either cold/standby boot.
* Note: Transition request from this function does not trigger cold/standby
* boot. It just puts back driver book keeping variable back to ACTIVE
* state and restores the interrupt.
*
* Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
* satisfy the requested state-transition or -EINVAL for all other
* state-transition requests.
*/
int arche_platform_change_state(enum arche_platform_state state,
struct gb_timesync_svc *timesync_svc_pdata)
{
struct arche_platform_drvdata *arche_pdata;
struct platform_device *pdev;
struct device_node *np;
int ret = -EAGAIN;
unsigned long flags;
np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
if (!np) {
pr_err("google,arche-platform device node not found\n");
return -ENODEV;
}
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
return -ENODEV;
}
arche_pdata = platform_get_drvdata(pdev);
mutex_lock(&arche_pdata->platform_state_mutex);
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->state == state) {
ret = 0;
goto exit;
}
switch (state) {
case ARCHE_PLATFORM_STATE_TIME_SYNC:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
ret = -EINVAL;
goto exit;
}
if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
dev_err(arche_pdata->dev,
"driver busy with wake/detect line ops\n");
goto exit;
}
device_for_each_child(arche_pdata->dev, NULL,
arche_apb_bootret_assert);
arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
break;
case ARCHE_PLATFORM_STATE_ACTIVE:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
ret = -EINVAL;
goto exit;
}
device_for_each_child(arche_pdata->dev, NULL,
arche_apb_bootret_deassert);
arche_pdata->wake_detect_state = WD_STATE_IDLE;
break;
case ARCHE_PLATFORM_STATE_OFF:
case ARCHE_PLATFORM_STATE_STANDBY:
case ARCHE_PLATFORM_STATE_FW_FLASHING:
dev_err(arche_pdata->dev, "busy, request to retry later\n");
goto exit;
default:
ret = -EINVAL;
dev_err(arche_pdata->dev,
"invalid state transition request\n");
goto exit;
}
arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
arche_platform_set_state(arche_pdata, state);
if (state == ARCHE_PLATFORM_STATE_ACTIVE)
wake_up(&arche_pdata->wq);
ret = 0;
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
mutex_unlock(&arche_pdata->platform_state_mutex);
of_node_put(np);
return ret;
}
EXPORT_SYMBOL_GPL(arche_platform_change_state);
/* Requires arche_pdata->wake_lock is held by calling context */
static void arche_platform_set_wake_detect_state(
struct arche_platform_drvdata *arche_pdata,
enum svc_wakedetect_state state)
{
arche_pdata->wake_detect_state = state;
}
static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
{
gpio_set_value(gpio, onoff);
}
static int apb_cold_boot(struct device *dev, void *data)
{
int ret;
ret = apb_ctrl_coldboot(dev);
if (ret)
dev_warn(dev, "failed to coldboot\n");
/*Child nodes are independent, so do not exit coldboot operation */
return 0;
}
static int apb_poweroff(struct device *dev, void *data)
{
apb_ctrl_poweroff(dev);
/* Enable HUB3613 into HUB mode. */
if (usb3613_hub_mode_ctrl(false))
dev_warn(dev, "failed to control hub device\n");
return 0;
}
static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
{
/* Enable interrupt here, to read event back from SVC */
gpio_direction_input(arche_pdata->wake_detect_gpio);
enable_irq(arche_pdata->wake_detect_irq);
}
static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
{
struct arche_platform_drvdata *arche_pdata = devid;
unsigned long flags;
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
/* Something is wrong */
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_COLDBOOT_START);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
/* It should complete power cycle, so first make sure it is poweroff */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
/* Bring APB out of reset: cold boot sequence */
device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
/* Enable HUB3613 into HUB mode. */
if (usb3613_hub_mode_ctrl(true))
dev_warn(arche_pdata->dev, "failed to control hub device\n");
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
{
struct arche_platform_drvdata *arche_pdata = devid;
unsigned long flags;
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
gb_timesync_irq(arche_pdata->timesync_svc_pdata);
goto exit;
}
if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
/* wake/detect rising */
/*
* If wake/detect line goes high after low, within less than
* 30msec, then standby boot sequence is initiated, which is not
* supported/implemented as of now. So ignore it.
*/
if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
if (time_before(jiffies,
arche_pdata->wake_detect_start +
msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_IDLE);
} else {
/* Check we are not in middle of irq thread already */
if (arche_pdata->wake_detect_state !=
WD_STATE_COLDBOOT_START) {
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_COLDBOOT_TRIG);
spin_unlock_irqrestore(
&arche_pdata->wake_lock,
flags);
return IRQ_WAKE_THREAD;
}
}
}
} else {
/* wake/detect falling */
if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
arche_pdata->wake_detect_start = jiffies;
/*
* In the begining, when wake/detect goes low (first time), we assume
* it is meant for coldboot and set the flag. If wake/detect line stays low
* beyond 30msec, then it is coldboot else fallback to standby boot.
*/
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_BOOT_INIT);
}
}
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static int arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
{
int ret;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
return 0;
dev_info(arche_pdata->dev, "Booting from cold boot state\n");
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
usleep_range(100, 200);
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
ret);
return ret;
}
/* bring SVC out of reset */
svc_reset_onoff(arche_pdata->svc_reset_gpio,
!arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
return 0;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static int arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
{
int ret;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return 0;
dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
usleep_range(100, 200);
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
ret);
return ret;
}
svc_reset_onoff(arche_pdata->svc_reset_gpio,
!arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
return 0;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static void arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
{
unsigned long flags;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
return;
/* If in fw_flashing mode, then no need to repeate things again */
if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
disable_irq(arche_pdata->wake_detect_irq);
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_IDLE);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
}
clk_disable_unprepare(arche_pdata->svc_ref_clk);
/* As part of exit, put APB back in reset state */
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
}
static ssize_t state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
int ret = 0;
retry:
mutex_lock(&arche_pdata->platform_state_mutex);
if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
mutex_unlock(&arche_pdata->platform_state_mutex);
ret = wait_event_interruptible(
arche_pdata->wq,
arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
if (ret)
return ret;
goto retry;
}
if (sysfs_streq(buf, "off")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
goto exit;
/* If SVC goes down, bring down APB's as well */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
} else if (sysfs_streq(buf, "active")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
goto exit;
/* First we want to make sure we power off everything
* and then activate back again */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
arche_platform_wd_irq_en(arche_pdata);
ret = arche_platform_coldboot_seq(arche_pdata);
if (ret)
goto exit;
} else if (sysfs_streq(buf, "standby")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
goto exit;
dev_warn(arche_pdata->dev, "standby state not supported\n");
} else if (sysfs_streq(buf, "fw_flashing")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
goto exit;
/*
* Here we only control SVC.
*
* In case of FW_FLASHING mode we do not want to control
* APBs, as in case of V2, SPI bus is shared between both
* the APBs. So let user chose which APB he wants to flash.
*/
arche_platform_poweroff_seq(arche_pdata);
ret = arche_platform_fw_flashing_seq(arche_pdata);
if (ret)
goto exit;
} else {
dev_err(arche_pdata->dev, "unknown state\n");
ret = -EINVAL;
}
exit:
mutex_unlock(&arche_pdata->platform_state_mutex);
return ret ? ret : count;
}
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
switch (arche_pdata->state) {
case ARCHE_PLATFORM_STATE_OFF:
return sprintf(buf, "off\n");
case ARCHE_PLATFORM_STATE_ACTIVE:
return sprintf(buf, "active\n");
case ARCHE_PLATFORM_STATE_STANDBY:
return sprintf(buf, "standby\n");
case ARCHE_PLATFORM_STATE_FW_FLASHING:
return sprintf(buf, "fw_flashing\n");
case ARCHE_PLATFORM_STATE_TIME_SYNC:
return sprintf(buf, "time_sync\n");
default:
return sprintf(buf, "unknown state\n");
}
}
static DEVICE_ATTR_RW(state);
static int arche_platform_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
struct arche_platform_drvdata *arche_pdata =
container_of(notifier, struct arche_platform_drvdata,
pm_notifier);
int ret = NOTIFY_DONE;
mutex_lock(&arche_pdata->platform_state_mutex);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
ret = NOTIFY_STOP;
break;
}
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
break;
case PM_POST_SUSPEND:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
break;
arche_platform_wd_irq_en(arche_pdata);
arche_platform_coldboot_seq(arche_pdata);
break;
default:
break;
}
mutex_unlock(&arche_pdata->platform_state_mutex);
return ret;
}
static int arche_platform_probe(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata), GFP_KERNEL);
if (!arche_pdata)
return -ENOMEM;
/* setup svc reset gpio */
arche_pdata->is_reset_act_hi = of_property_read_bool(np,
"svc,reset-active-high");
arche_pdata->svc_reset_gpio = of_get_named_gpio(np, "svc,reset-gpio", 0);
if (arche_pdata->svc_reset_gpio < 0) {
dev_err(dev, "failed to get reset-gpio\n");
return arche_pdata->svc_reset_gpio;
}
ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
if (ret) {
dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
return ret;
}
ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
if (ret) {
dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
return ret;
}
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
"svc,sysboot-gpio", 0);
if (arche_pdata->svc_sysboot_gpio < 0) {
dev_err(dev, "failed to get sysboot gpio\n");
return arche_pdata->svc_sysboot_gpio;
}
ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
if (ret) {
dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
return ret;
}
ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
if (ret) {
dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
return ret;
}
/* setup the clock request gpio first */
arche_pdata->svc_refclk_req = of_get_named_gpio(np,
"svc,refclk-req-gpio", 0);
if (arche_pdata->svc_refclk_req < 0) {
dev_err(dev, "failed to get svc clock-req gpio\n");
return arche_pdata->svc_refclk_req;
}
ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req, "svc-clk-req");
if (ret) {
dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
return ret;
}
ret = gpio_direction_input(arche_pdata->svc_refclk_req);
if (ret) {
dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
return ret;
}
/* setup refclk2 to follow the pin */
arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
if (IS_ERR(arche_pdata->svc_ref_clk)) {
ret = PTR_ERR(arche_pdata->svc_ref_clk);
dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, arche_pdata);
arche_pdata->num_apbs = of_get_child_count(np);
dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
arche_pdata->wake_detect_gpio = of_get_named_gpio(np, "svc,wake-detect-gpio", 0);
if (arche_pdata->wake_detect_gpio < 0) {
dev_err(dev, "failed to get wake detect gpio\n");
ret = arche_pdata->wake_detect_gpio;
return ret;
}
ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio, "wake detect");
if (ret) {
dev_err(dev, "Failed requesting wake_detect gpio %d\n",
arche_pdata->wake_detect_gpio);
return ret;
}
arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
arche_pdata->dev = &pdev->dev;
spin_lock_init(&arche_pdata->wake_lock);
mutex_init(&arche_pdata->platform_state_mutex);
init_waitqueue_head(&arche_pdata->wq);
arche_pdata->wake_detect_irq =
gpio_to_irq(arche_pdata->wake_detect_gpio);
ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
arche_platform_wd_irq,
arche_platform_wd_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(dev), arche_pdata);
if (ret) {
dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
return ret;
}
disable_irq(arche_pdata->wake_detect_irq);
ret = device_create_file(dev, &dev_attr_state);
if (ret) {
dev_err(dev, "failed to create state file in sysfs\n");
return ret;
}
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
goto err_device_remove;
}
arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
ret = register_pm_notifier(&arche_pdata->pm_notifier);
if (ret) {
dev_err(dev, "failed to register pm notifier %d\n", ret);
goto err_device_remove;
}
/* Register callback pointer */
arche_platform_change_state_cb = arche_platform_change_state;
/* Explicitly power off if requested */
if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
mutex_lock(&arche_pdata->platform_state_mutex);
ret = arche_platform_coldboot_seq(arche_pdata);
if (ret) {
dev_err(dev, "Failed to cold boot svc %d\n", ret);
goto err_coldboot;
}
arche_platform_wd_irq_en(arche_pdata);
mutex_unlock(&arche_pdata->platform_state_mutex);
}
dev_info(dev, "Device registered successfully\n");
return 0;
err_coldboot:
mutex_unlock(&arche_pdata->platform_state_mutex);
err_device_remove:
device_remove_file(&pdev->dev, &dev_attr_state);
return ret;
}
static int arche_remove_child(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static int arche_platform_remove(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
unregister_pm_notifier(&arche_pdata->pm_notifier);
device_remove_file(&pdev->dev, &dev_attr_state);
device_for_each_child(&pdev->dev, NULL, arche_remove_child);
arche_platform_poweroff_seq(arche_pdata);
platform_set_drvdata(pdev, NULL);
if (usb3613_hub_mode_ctrl(false))
dev_warn(arche_pdata->dev, "failed to control hub device\n");
/* TODO: Should we do anything more here ?? */
return 0;
}
static int arche_platform_suspend(struct device *dev)
{
/*
* If timing profile premits, we may shutdown bridge
* completely
*
* TODO: sequence ??
*
* Also, need to make sure we meet precondition for unipro suspend
* Precondition: Definition ???
*/
return 0;
}
static int arche_platform_resume(struct device *dev)
{
/*
* Atleast for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
* Based on whether bridge is in standby or OFF state we may have to
* assert multiple signals. Please refer to WDM spec, for more info.
*
*/
return 0;
}
static void arche_platform_shutdown(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
arche_platform_poweroff_seq(arche_pdata);
usb3613_hub_mode_ctrl(false);
}
static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
arche_platform_suspend,
arche_platform_resume);
static struct of_device_id arche_platform_of_match[] = {
{ .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
{ },
};
static struct of_device_id arche_combined_id[] = {
{ .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
{ .compatible = "usbffff,2", },
{ },
};
MODULE_DEVICE_TABLE(of, arche_combined_id);
static struct platform_driver arche_platform_device_driver = {
.probe = arche_platform_probe,
.remove = arche_platform_remove,
.shutdown = arche_platform_shutdown,
.driver = {
.name = "arche-platform-ctrl",
.pm = &arche_platform_pm_ops,
.of_match_table = arche_platform_of_match,
}
};
static int __init arche_init(void)
{
int retval;
retval = platform_driver_register(&arche_platform_device_driver);
if (retval)
return retval;
retval = arche_apb_init();
if (retval)
platform_driver_unregister(&arche_platform_device_driver);
return retval;
}
module_init(arche_init);
static void __exit arche_exit(void)
{
arche_apb_exit();
platform_driver_unregister(&arche_platform_device_driver);
}
module_exit(arche_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
MODULE_DESCRIPTION("Arche Platform Driver");

View File

@ -0,0 +1,39 @@
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __ARCHE_PLATFORM_H
#define __ARCHE_PLATFORM_H
#include "timesync.h"
enum arche_platform_state {
ARCHE_PLATFORM_STATE_OFF,
ARCHE_PLATFORM_STATE_ACTIVE,
ARCHE_PLATFORM_STATE_STANDBY,
ARCHE_PLATFORM_STATE_FW_FLASHING,
ARCHE_PLATFORM_STATE_TIME_SYNC,
};
int arche_platform_change_state(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
int __init arche_apb_init(void);
void __exit arche_apb_exit(void);
/* Operational states for the APB device */
int apb_ctrl_coldboot(struct device *dev);
int apb_ctrl_fw_flashing(struct device *dev);
int apb_ctrl_standby_boot(struct device *dev);
void apb_ctrl_poweroff(struct device *dev);
void apb_bootret_assert(struct device *dev);
void apb_bootret_deassert(struct device *dev);
#endif /* __ARCHE_PLATFORM_H */

View File

@ -0,0 +1,109 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ARPC_H
#define __ARPC_H
/* APBridgeA RPC (ARPC) */
enum arpc_result {
ARPC_SUCCESS = 0x00,
ARPC_NO_MEMORY = 0x01,
ARPC_INVALID = 0x02,
ARPC_TIMEOUT = 0x03,
ARPC_UNKNOWN_ERROR = 0xff,
};
struct arpc_request_message {
__le16 id; /* RPC unique id */
__le16 size; /* Size in bytes of header + payload */
__u8 type; /* RPC type */
__u8 data[0]; /* ARPC data */
} __packed;
struct arpc_response_message {
__le16 id; /* RPC unique id */
__u8 result; /* Result of RPC */
} __packed;
/* ARPC requests */
#define ARPC_TYPE_CPORT_CONNECTED 0x01
#define ARPC_TYPE_CPORT_QUIESCE 0x02
#define ARPC_TYPE_CPORT_CLEAR 0x03
#define ARPC_TYPE_CPORT_FLUSH 0x04
#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
struct arpc_cport_connected_req {
__le16 cport_id;
} __packed;
struct arpc_cport_quiesce_req {
__le16 cport_id;
__le16 peer_space;
__le16 timeout;
} __packed;
struct arpc_cport_clear_req {
__le16 cport_id;
} __packed;
struct arpc_cport_flush_req {
__le16 cport_id;
} __packed;
struct arpc_cport_shutdown_req {
__le16 cport_id;
__le16 timeout;
__u8 phase;
} __packed;
#endif /* __ARPC_H */

View File

@ -0,0 +1,207 @@
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_protocols.h"
#include "audio_apbridgea.h"
#include "audio_codec.h"
int gb_audio_apbridgea_set_config(struct gb_connection *connection,
__u16 i2s_port, __u32 format, __u32 rate,
__u32 mclk_freq)
{
struct audio_apbridgea_set_config_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.format = cpu_to_le32(format);
req.rate = cpu_to_le32(rate);
req.mclk_freq = cpu_to_le32(mclk_freq);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction)
{
struct audio_apbridgea_register_cport_request req;
int ret;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.cport = cpu_to_le16(cportid);
req.direction = direction;
ret = gb_pm_runtime_get_sync(connection->bundle);
if (ret)
return ret;
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction)
{
struct audio_apbridgea_unregister_cport_request req;
int ret;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.cport = cpu_to_le16(cportid);
req.direction = direction;
ret = gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
gb_pm_runtime_put_autosuspend(connection->bundle);
return ret;
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size)
{
struct audio_apbridgea_set_tx_data_size_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.size = cpu_to_le16(size);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_prepare_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
__u16 i2s_port, __u64 timestamp)
{
struct audio_apbridgea_start_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.timestamp = cpu_to_le64(timestamp);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
{
struct audio_apbridgea_stop_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_shutdown_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size)
{
struct audio_apbridgea_set_rx_data_size_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.size = cpu_to_le16(size);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_prepare_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_start_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
{
struct audio_apbridgea_stop_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_shutdown_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("greybus:audio-apbridgea");
MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");

View File

@ -0,0 +1,156 @@
/**
* Copyright (c) 2015-2016 Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This is a special protocol for configuring communication over the
* I2S bus between the DSP on the MSM8994 and APBridgeA. Therefore,
* we can predefine several low-level attributes of the communication
* because we know that they are supported. In particular, the following
* assumptions are made:
* - there are two channels (i.e., stereo)
* - the low-level protocol is I2S as defined by Philips/NXP
* - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
* - WCLK changes on the falling edge of BCLK
* - WCLK low for left channel; high for right channel
* - TX data is sent on the falling edge of BCLK
* - RX data is received/latched on the rising edge of BCLK
*/
#ifndef __AUDIO_APBRIDGEA_H
#define __AUDIO_APBRIDGEA_H
#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG 0x01
#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT 0x02
#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT 0x03
#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE 0x04
/* 0x05 unused */
#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX 0x06
#define AUDIO_APBRIDGEA_TYPE_START_TX 0x07
#define AUDIO_APBRIDGEA_TYPE_STOP_TX 0x08
#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX 0x09
#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE 0x0a
/* 0x0b unused */
#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX 0x0c
#define AUDIO_APBRIDGEA_TYPE_START_RX 0x0d
#define AUDIO_APBRIDGEA_TYPE_STOP_RX 0x0e
#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX 0x0f
#define AUDIO_APBRIDGEA_PCM_FMT_8 BIT(0)
#define AUDIO_APBRIDGEA_PCM_FMT_16 BIT(1)
#define AUDIO_APBRIDGEA_PCM_FMT_24 BIT(2)
#define AUDIO_APBRIDGEA_PCM_FMT_32 BIT(3)
#define AUDIO_APBRIDGEA_PCM_FMT_64 BIT(4)
#define AUDIO_APBRIDGEA_PCM_RATE_5512 BIT(0)
#define AUDIO_APBRIDGEA_PCM_RATE_8000 BIT(1)
#define AUDIO_APBRIDGEA_PCM_RATE_11025 BIT(2)
#define AUDIO_APBRIDGEA_PCM_RATE_16000 BIT(3)
#define AUDIO_APBRIDGEA_PCM_RATE_22050 BIT(4)
#define AUDIO_APBRIDGEA_PCM_RATE_32000 BIT(5)
#define AUDIO_APBRIDGEA_PCM_RATE_44100 BIT(6)
#define AUDIO_APBRIDGEA_PCM_RATE_48000 BIT(7)
#define AUDIO_APBRIDGEA_PCM_RATE_64000 BIT(8)
#define AUDIO_APBRIDGEA_PCM_RATE_88200 BIT(9)
#define AUDIO_APBRIDGEA_PCM_RATE_96000 BIT(10)
#define AUDIO_APBRIDGEA_PCM_RATE_176400 BIT(11)
#define AUDIO_APBRIDGEA_PCM_RATE_192000 BIT(12)
#define AUDIO_APBRIDGEA_DIRECTION_TX BIT(0)
#define AUDIO_APBRIDGEA_DIRECTION_RX BIT(1)
/* The I2S port is passed in the 'index' parameter of the USB request */
/* The CPort is passed in the 'value' parameter of the USB request */
struct audio_apbridgea_hdr {
__u8 type;
__le16 i2s_port;
__u8 data[0];
} __packed;
struct audio_apbridgea_set_config_request {
struct audio_apbridgea_hdr hdr;
__le32 format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
__le32 rate; /* AUDIO_APBRIDGEA_PCM_RATE_* */
__le32 mclk_freq; /* XXX Remove? */
} __packed;
struct audio_apbridgea_register_cport_request {
struct audio_apbridgea_hdr hdr;
__le16 cport;
__u8 direction;
} __packed;
struct audio_apbridgea_unregister_cport_request {
struct audio_apbridgea_hdr hdr;
__le16 cport;
__u8 direction;
} __packed;
struct audio_apbridgea_set_tx_data_size_request {
struct audio_apbridgea_hdr hdr;
__le16 size;
} __packed;
struct audio_apbridgea_prepare_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_start_tx_request {
struct audio_apbridgea_hdr hdr;
__le64 timestamp;
} __packed;
struct audio_apbridgea_stop_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_shutdown_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_set_rx_data_size_request {
struct audio_apbridgea_hdr hdr;
__le16 size;
} __packed;
struct audio_apbridgea_prepare_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_start_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_stop_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_shutdown_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
#endif /*__AUDIO_APBRIDGEA_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,283 @@
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __LINUX_GBAUDIO_CODEC_H
#define __LINUX_GBAUDIO_CODEC_H
#include <sound/soc.h>
#include <sound/jack.h>
#include "greybus.h"
#include "greybus_protocols.h"
#define NAME_SIZE 32
#define MAX_DAIS 2 /* APB1, APB2 */
enum {
APB1_PCM = 0,
APB2_PCM,
NUM_CODEC_DAIS,
};
enum gbcodec_reg_index {
GBCODEC_CTL_REG,
GBCODEC_MUTE_REG,
GBCODEC_PB_LVOL_REG,
GBCODEC_PB_RVOL_REG,
GBCODEC_CAP_LVOL_REG,
GBCODEC_CAP_RVOL_REG,
GBCODEC_APB1_MUX_REG,
GBCODEC_APB2_MUX_REG,
GBCODEC_REG_COUNT
};
/* device_type should be same as defined in audio.h (Android media layer) */
enum {
GBAUDIO_DEVICE_NONE = 0x0,
/* reserved bits */
GBAUDIO_DEVICE_BIT_IN = 0x80000000,
GBAUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
/* output devices */
GBAUDIO_DEVICE_OUT_SPEAKER = 0x2,
GBAUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
/* input devices */
GBAUDIO_DEVICE_IN_BUILTIN_MIC = GBAUDIO_DEVICE_BIT_IN | 0x4,
GBAUDIO_DEVICE_IN_WIRED_HEADSET = GBAUDIO_DEVICE_BIT_IN | 0x10,
};
/* bit 0-SPK, 1-HP, 2-DAC,
* 4-MIC, 5-HSMIC, 6-MIC2
*/
#define GBCODEC_CTL_REG_DEFAULT 0x00
/* bit 0,1 - APB1-PB-L/R
* bit 2,3 - APB2-PB-L/R
* bit 4,5 - APB1-Cap-L/R
* bit 6,7 - APB2-Cap-L/R
*/
#define GBCODEC_MUTE_REG_DEFAULT 0x00
/* 0-127 steps */
#define GBCODEC_PB_VOL_REG_DEFAULT 0x00
#define GBCODEC_CAP_VOL_REG_DEFAULT 0x00
/* bit 0,1,2 - PB stereo, left, right
* bit 8,9,10 - Cap stereo, left, right
*/
#define GBCODEC_APB1_MUX_REG_DEFAULT 0x00
#define GBCODEC_APB2_MUX_REG_DEFAULT 0x00
#define GBCODEC_JACK_MASK 0x0000FFFF
#define GBCODEC_JACK_BUTTON_MASK 0xFFFF0000
static const u8 gbcodec_reg_defaults[GBCODEC_REG_COUNT] = {
GBCODEC_CTL_REG_DEFAULT,
GBCODEC_MUTE_REG_DEFAULT,
GBCODEC_PB_VOL_REG_DEFAULT,
GBCODEC_PB_VOL_REG_DEFAULT,
GBCODEC_CAP_VOL_REG_DEFAULT,
GBCODEC_CAP_VOL_REG_DEFAULT,
GBCODEC_APB1_MUX_REG_DEFAULT,
GBCODEC_APB2_MUX_REG_DEFAULT,
};
enum gbaudio_codec_state {
GBAUDIO_CODEC_SHUTDOWN = 0,
GBAUDIO_CODEC_STARTUP,
GBAUDIO_CODEC_HWPARAMS,
GBAUDIO_CODEC_PREPARE,
GBAUDIO_CODEC_START,
GBAUDIO_CODEC_STOP,
};
struct gbaudio_stream_params {
int state;
uint8_t sig_bits, channels;
uint32_t format, rate;
};
struct gbaudio_codec_dai {
int id;
/* runtime params for playback/capture streams */
struct gbaudio_stream_params params[2];
struct list_head list;
};
struct gbaudio_codec_info {
struct device *dev;
struct snd_soc_codec *codec;
struct list_head module_list;
/* to maintain runtime stream params for each DAI */
struct list_head dai_list;
struct mutex lock;
u8 reg[GBCODEC_REG_COUNT];
};
struct gbaudio_widget {
__u8 id;
const char *name;
struct list_head list;
};
struct gbaudio_control {
__u8 id;
char *name;
char *wname;
const char * const *texts;
int items;
struct list_head list;
};
struct gbaudio_data_connection {
int id;
__le16 data_cport;
struct gb_connection *connection;
struct list_head list;
/* maintain runtime state for playback/capture stream */
int state[2];
};
/* stream direction */
#define GB_PLAYBACK BIT(0)
#define GB_CAPTURE BIT(1)
enum gbaudio_module_state {
GBAUDIO_MODULE_OFF = 0,
GBAUDIO_MODULE_ON,
};
struct gbaudio_module_info {
/* module info */
struct device *dev;
int dev_id; /* check if it should be bundle_id/hd_cport_id */
int vid;
int pid;
int slot;
int type;
int set_uevent;
char vstr[NAME_SIZE];
char pstr[NAME_SIZE];
struct list_head list;
/* need to share this info to above user space */
int manager_id;
char name[NAME_SIZE];
unsigned int ip_devices;
unsigned int op_devices;
/* jack related */
char jack_name[NAME_SIZE];
char button_name[NAME_SIZE];
int jack_type;
int jack_mask;
int button_mask;
int button_status;
struct snd_soc_jack headset_jack;
struct snd_soc_jack button_jack;
/* connection info */
struct gb_connection *mgmt_connection;
size_t num_data_connections;
struct list_head data_list;
/* topology related */
int num_dais;
int num_controls;
int num_dapm_widgets;
int num_dapm_routes;
unsigned long dai_offset;
unsigned long widget_offset;
unsigned long control_offset;
unsigned long route_offset;
struct snd_kcontrol_new *controls;
struct snd_soc_dapm_widget *dapm_widgets;
struct snd_soc_dapm_route *dapm_routes;
struct snd_soc_dai_driver *dais;
struct list_head widget_list;
struct list_head ctl_list;
struct list_head widget_ctl_list;
struct gb_audio_topology *topology;
};
int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
struct gb_audio_topology *tplg_data);
void gbaudio_tplg_release(struct gbaudio_module_info *module);
int gbaudio_module_update(struct gbaudio_codec_info *codec,
struct snd_soc_dapm_widget *w,
struct gbaudio_module_info *module,
int enable);
int gbaudio_register_module(struct gbaudio_module_info *module);
void gbaudio_unregister_module(struct gbaudio_module_info *module);
/* protocol related */
extern int gb_audio_gb_get_topology(struct gb_connection *connection,
struct gb_audio_topology **topology);
extern int gb_audio_gb_get_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value);
extern int gb_audio_gb_set_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value);
extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
uint8_t widget_id);
extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
uint8_t widget_id);
extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
uint16_t data_cport, uint32_t *format,
uint32_t *rate, uint8_t *channels,
uint8_t *sig_bits);
extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
uint16_t data_cport, uint32_t format,
uint32_t rate, uint8_t channels,
uint8_t sig_bits);
extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size);
extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size);
extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
__u16 i2s_port, __u32 format,
__u32 rate, __u32 mclk_freq);
extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction);
extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction);
extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size);
extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
__u16 i2s_port, __u64 timestamp);
extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size);
extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
__u16 i2s_port);
#endif /* __LINUX_GBAUDIO_CODEC_H */

View File

@ -0,0 +1,228 @@
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_protocols.h"
#include "operation.h"
#include "audio_codec.h"
/* TODO: Split into separate calls */
int gb_audio_gb_get_topology(struct gb_connection *connection,
struct gb_audio_topology **topology)
{
struct gb_audio_get_topology_size_response size_resp;
struct gb_audio_topology *topo;
uint16_t size;
int ret;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
NULL, 0, &size_resp, sizeof(size_resp));
if (ret)
return ret;
size = le16_to_cpu(size_resp.size);
if (size < sizeof(*topo))
return -ENODATA;
topo = kzalloc(size, GFP_KERNEL);
if (!topo)
return -ENOMEM;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
topo, size);
if (ret) {
kfree(topo);
return ret;
}
*topology = topo;
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
int gb_audio_gb_get_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value)
{
struct gb_audio_get_control_request req;
struct gb_audio_get_control_response resp;
int ret;
req.control_id = control_id;
req.index = index;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
&req, sizeof(req), &resp, sizeof(resp));
if (ret)
return ret;
memcpy(value, &resp.value, sizeof(*value));
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
int gb_audio_gb_set_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value)
{
struct gb_audio_set_control_request req;
req.control_id = control_id;
req.index = index;
memcpy(&req.value, value, sizeof(req.value));
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
int gb_audio_gb_enable_widget(struct gb_connection *connection,
uint8_t widget_id)
{
struct gb_audio_enable_widget_request req;
req.widget_id = widget_id;
return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
int gb_audio_gb_disable_widget(struct gb_connection *connection,
uint8_t widget_id)
{
struct gb_audio_disable_widget_request req;
req.widget_id = widget_id;
return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
int gb_audio_gb_get_pcm(struct gb_connection *connection, uint16_t data_cport,
uint32_t *format, uint32_t *rate, uint8_t *channels,
uint8_t *sig_bits)
{
struct gb_audio_get_pcm_request req;
struct gb_audio_get_pcm_response resp;
int ret;
req.data_cport = cpu_to_le16(data_cport);
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
&req, sizeof(req), &resp, sizeof(resp));
if (ret)
return ret;
*format = le32_to_cpu(resp.format);
*rate = le32_to_cpu(resp.rate);
*channels = resp.channels;
*sig_bits = resp.sig_bits;
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
int gb_audio_gb_set_pcm(struct gb_connection *connection, uint16_t data_cport,
uint32_t format, uint32_t rate, uint8_t channels,
uint8_t sig_bits)
{
struct gb_audio_set_pcm_request req;
req.data_cport = cpu_to_le16(data_cport);
req.format = cpu_to_le32(format);
req.rate = cpu_to_le32(rate);
req.channels = channels;
req.sig_bits = sig_bits;
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size)
{
struct gb_audio_set_tx_data_size_request req;
req.data_cport = cpu_to_le16(data_cport);
req.size = cpu_to_le16(size);
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
int gb_audio_gb_activate_tx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_activate_tx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_deactivate_tx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size)
{
struct gb_audio_set_rx_data_size_request req;
req.data_cport = cpu_to_le16(data_cport);
req.size = cpu_to_le16(size);
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
int gb_audio_gb_activate_rx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_activate_rx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_deactivate_rx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("greybus:audio-gb");
MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");

View File

@ -0,0 +1,184 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rwlock.h>
#include <linux/idr.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
static struct kset *manager_kset;
static LIST_HEAD(modules_list);
static DECLARE_RWSEM(modules_rwsem);
static DEFINE_IDA(module_id);
/* helpers */
static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
{
struct gb_audio_manager_module *module;
if (id < 0)
return NULL;
list_for_each_entry(module, &modules_list, list) {
if (module->id == id)
return module;
}
return NULL;
}
/* public API */
int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
{
struct gb_audio_manager_module *module;
int id;
int err;
id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
err = gb_audio_manager_module_create(&module, manager_kset,
id, desc);
if (err) {
ida_simple_remove(&module_id, id);
return err;
}
/* Add it to the list */
down_write(&modules_rwsem);
list_add_tail(&module->list, &modules_list);
up_write(&modules_rwsem);
return module->id;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_add);
int gb_audio_manager_remove(int id)
{
struct gb_audio_manager_module *module;
down_write(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
if (!module) {
up_write(&modules_rwsem);
return -EINVAL;
}
list_del(&module->list);
kobject_put(&module->kobj);
up_write(&modules_rwsem);
ida_simple_remove(&module_id, id);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
void gb_audio_manager_remove_all(void)
{
struct gb_audio_manager_module *module, *next;
int is_empty = 1;
down_write(&modules_rwsem);
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
kobject_put(&module->kobj);
ida_simple_remove(&module_id, module->id);
}
is_empty = list_empty(&modules_list);
up_write(&modules_rwsem);
if (!is_empty)
pr_warn("Not all nodes were deleted\n");
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
{
struct gb_audio_manager_module *module;
down_read(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
kobject_get(&module->kobj);
up_read(&modules_rwsem);
return module;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
{
kobject_put(&module->kobj);
}
EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
int gb_audio_manager_dump_module(int id)
{
struct gb_audio_manager_module *module;
down_read(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
up_read(&modules_rwsem);
if (!module)
return -EINVAL;
gb_audio_manager_module_dump(module);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
void gb_audio_manager_dump_all(void)
{
struct gb_audio_manager_module *module;
int count = 0;
down_read(&modules_rwsem);
list_for_each_entry(module, &modules_list, list) {
gb_audio_manager_module_dump(module);
count++;
}
up_read(&modules_rwsem);
pr_info("Number of connected modules: %d\n", count);
}
EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
/*
* module init/deinit
*/
static int __init manager_init(void)
{
manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
kernel_kobj);
if (!manager_kset)
return -ENOMEM;
#ifdef GB_AUDIO_MANAGER_SYSFS
gb_audio_manager_sysfs_init(&manager_kset->kobj);
#endif
return 0;
}
static void __exit manager_exit(void)
{
gb_audio_manager_remove_all();
kset_unregister(manager_kset);
ida_destroy(&module_id);
}
module_init(manager_init);
module_exit(manager_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");

View File

@ -0,0 +1,83 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_H_
#define _GB_AUDIO_MANAGER_H_
#include <linux/kobject.h>
#include <linux/list.h>
#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
struct gb_audio_manager_module_descriptor {
char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
int slot;
int vid;
int pid;
int cport;
unsigned int ip_devices;
unsigned int op_devices;
};
struct gb_audio_manager_module {
struct kobject kobj;
struct list_head list;
int id;
struct gb_audio_manager_module_descriptor desc;
};
/*
* Creates a new gb_audio_manager_module_descriptor, using the specified
* descriptor.
*
* Returns a negative result on error, or the id of the newly created module.
*
*/
int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
/*
* Removes a connected gb_audio_manager_module_descriptor for the specified ID.
*
* Returns zero on success, or a negative value on error.
*/
int gb_audio_manager_remove(int id);
/*
* Removes all connected gb_audio_modules
*
* Returns zero on success, or a negative value on error.
*/
void gb_audio_manager_remove_all(void);
/*
* Retrieves a gb_audio_manager_module_descriptor for the specified id.
* Returns the gb_audio_manager_module_descriptor structure,
* or NULL if there is no module with the specified ID.
*/
struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
/*
* Decreases the refcount of the module, obtained by the get function.
* Modules are removed via gb_audio_manager_remove
*/
void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
/*
* Dumps the module for the specified id
* Return 0 on success
*/
int gb_audio_manager_dump_module(int id);
/*
* Dumps all connected modules
*/
void gb_audio_manager_dump_all(void);
#endif /* _GB_AUDIO_MANAGER_H_ */

View File

@ -0,0 +1,258 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/slab.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
#define to_gb_audio_module_attr(x) \
container_of(x, struct gb_audio_manager_module_attribute, attr)
#define to_gb_audio_module(x) \
container_of(x, struct gb_audio_manager_module, kobj)
struct gb_audio_manager_module_attribute {
struct attribute attr;
ssize_t (*show)(struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr,
char *buf);
ssize_t (*store)(struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr,
const char *buf, size_t count);
};
static ssize_t gb_audio_module_attr_show(
struct kobject *kobj, struct attribute *attr, char *buf)
{
struct gb_audio_manager_module_attribute *attribute;
struct gb_audio_manager_module *module;
attribute = to_gb_audio_module_attr(attr);
module = to_gb_audio_module(kobj);
if (!attribute->show)
return -EIO;
return attribute->show(module, attribute, buf);
}
static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct gb_audio_manager_module_attribute *attribute;
struct gb_audio_manager_module *module;
attribute = to_gb_audio_module_attr(attr);
module = to_gb_audio_module(kobj);
if (!attribute->store)
return -EIO;
return attribute->store(module, attribute, buf, len);
}
static const struct sysfs_ops gb_audio_module_sysfs_ops = {
.show = gb_audio_module_attr_show,
.store = gb_audio_module_attr_store,
};
static void gb_audio_module_release(struct kobject *kobj)
{
struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
pr_info("Destroying audio module #%d\n", module->id);
/* TODO -> delete from list */
kfree(module);
}
static ssize_t gb_audio_module_name_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%s", module->desc.name);
}
static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
__ATTR(name, 0664, gb_audio_module_name_show, NULL);
static ssize_t gb_audio_module_slot_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.slot);
}
static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
__ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
static ssize_t gb_audio_module_vid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.vid);
}
static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
__ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
static ssize_t gb_audio_module_pid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.pid);
}
static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
__ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
static ssize_t gb_audio_module_cport_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.cport);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_cport_attribute =
__ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
static ssize_t gb_audio_module_ip_devices_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "0x%X", module->desc.ip_devices);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_ip_devices_attribute =
__ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
static ssize_t gb_audio_module_op_devices_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "0x%X", module->desc.op_devices);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_op_devices_attribute =
__ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_name_attribute.attr,
&gb_audio_module_slot_attribute.attr,
&gb_audio_module_vid_attribute.attr,
&gb_audio_module_pid_attribute.attr,
&gb_audio_module_cport_attribute.attr,
&gb_audio_module_ip_devices_attribute.attr,
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct kobj_type gb_audio_module_type = {
.sysfs_ops = &gb_audio_module_sysfs_ops,
.release = gb_audio_module_release,
.default_attrs = gb_audio_module_default_attrs,
};
static void send_add_uevent(struct gb_audio_manager_module *module)
{
char name_string[128];
char slot_string[64];
char vid_string[64];
char pid_string[64];
char cport_string[64];
char ip_devices_string[64];
char op_devices_string[64];
char *envp[] = {
name_string,
slot_string,
vid_string,
pid_string,
cport_string,
ip_devices_string,
op_devices_string,
NULL
};
snprintf(name_string, 128, "NAME=%s", module->desc.name);
snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
snprintf(vid_string, 64, "VID=%d", module->desc.vid);
snprintf(pid_string, 64, "PID=%d", module->desc.pid);
snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
module->desc.ip_devices);
snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
module->desc.op_devices);
kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
}
int gb_audio_manager_module_create(
struct gb_audio_manager_module **module,
struct kset *manager_kset,
int id, struct gb_audio_manager_module_descriptor *desc)
{
int err;
struct gb_audio_manager_module *m;
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
return -ENOMEM;
/* Initialize the node */
INIT_LIST_HEAD(&m->list);
/* Set the module id */
m->id = id;
/* Copy the provided descriptor */
memcpy(&m->desc, desc, sizeof(*desc));
/* set the kset */
m->kobj.kset = manager_kset;
/*
* Initialize and add the kobject to the kernel. All the default files
* will be created here. As we have already specified a kset for this
* kobject, we don't have to set a parent for the kobject, the kobject
* will be placed beneath that kset automatically.
*/
err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
id);
if (err) {
pr_err("failed initializing kobject for audio module #%d\n",
id);
kobject_put(&m->kobj);
return err;
}
/*
* Notify the object was created
*/
send_add_uevent(m);
*module = m;
pr_info("Created audio module #%d\n", id);
return 0;
}
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
{
pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
module->id,
module->desc.name,
module->desc.slot,
module->desc.vid,
module->desc.pid,
module->desc.cport,
module->desc.ip_devices,
module->desc.op_devices);
}

View File

@ -0,0 +1,28 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
#define _GB_AUDIO_MANAGER_PRIVATE_H_
#include <linux/kobject.h>
#include "audio_manager.h"
int gb_audio_manager_module_create(
struct gb_audio_manager_module **module,
struct kset *manager_kset,
int id, struct gb_audio_manager_module_descriptor *desc);
/* module destroyed via kobject_put */
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
/* sysfs control */
void gb_audio_manager_sysfs_init(struct kobject *kobj);
#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */

View File

@ -0,0 +1,102 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
static ssize_t manager_sysfs_add_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct gb_audio_manager_module_descriptor desc = { {0} };
int num = sscanf(buf,
"name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
"slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
"o/p devices=0x%X",
desc.name, &desc.slot, &desc.vid, &desc.pid,
&desc.cport, &desc.ip_devices, &desc.op_devices);
if (num != 7)
return -EINVAL;
num = gb_audio_manager_add(&desc);
if (num < 0)
return -EINVAL;
return count;
}
static struct kobj_attribute manager_add_attribute =
__ATTR(add, 0664, NULL, manager_sysfs_add_store);
static ssize_t manager_sysfs_remove_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int id;
int num = sscanf(buf, "%d", &id);
if (num != 1)
return -EINVAL;
num = gb_audio_manager_remove(id);
if (num)
return num;
return count;
}
static struct kobj_attribute manager_remove_attribute =
__ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
static ssize_t manager_sysfs_dump_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int id;
int num = sscanf(buf, "%d", &id);
if (num == 1) {
num = gb_audio_manager_dump_module(id);
if (num)
return num;
} else if (!strncmp("all", buf, 3))
gb_audio_manager_dump_all();
else
return -EINVAL;
return count;
}
static struct kobj_attribute manager_dump_attribute =
__ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
static void manager_sysfs_init_attribute(
struct kobject *kobj, struct kobj_attribute *kattr)
{
int err;
err = sysfs_create_file(kobj, &kattr->attr);
if (err) {
pr_warn("creating the sysfs entry for %s failed: %d\n",
kattr->attr.name, err);
}
}
void gb_audio_manager_sysfs_init(struct kobject *kobj)
{
manager_sysfs_init_attribute(kobj, &manager_add_attribute);
manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
}

View File

@ -0,0 +1,482 @@
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include "audio_codec.h"
#include "audio_apbridgea.h"
#include "audio_manager.h"
/*
* gb_snd management functions
*/
static int gbaudio_request_jack(struct gbaudio_module_info *module,
struct gb_audio_jack_event_request *req)
{
int report;
struct snd_jack *jack = module->headset_jack.jack;
struct snd_jack *btn_jack = module->button_jack.jack;
if (!jack) {
dev_err_ratelimited(module->dev,
"Invalid jack event received:type: %u, event: %u\n",
req->jack_attribute, req->event);
return -EINVAL;
}
dev_warn_ratelimited(module->dev,
"Jack Event received: type: %u, event: %u\n",
req->jack_attribute, req->event);
if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
module->jack_type = 0;
if (btn_jack && module->button_status) {
snd_soc_jack_report(&module->button_jack, 0,
module->button_mask);
module->button_status = 0;
}
snd_soc_jack_report(&module->headset_jack, 0,
module->jack_mask);
return 0;
}
report = req->jack_attribute & module->jack_mask;
if (!report) {
dev_err_ratelimited(module->dev,
"Invalid jack event received:type: %u, event: %u\n",
req->jack_attribute, req->event);
return -EINVAL;
}
if (module->jack_type)
dev_warn_ratelimited(module->dev,
"Modifying jack from %d to %d\n",
module->jack_type, report);
module->jack_type = report;
snd_soc_jack_report(&module->headset_jack, report, module->jack_mask);
return 0;
}
static int gbaudio_request_button(struct gbaudio_module_info *module,
struct gb_audio_button_event_request *req)
{
int soc_button_id, report;
struct snd_jack *btn_jack = module->button_jack.jack;
if (!btn_jack) {
dev_err_ratelimited(module->dev,
"Invalid button event received:type: %u, event: %u\n",
req->button_id, req->event);
return -EINVAL;
}
dev_warn_ratelimited(module->dev,
"Button Event received: id: %u, event: %u\n",
req->button_id, req->event);
/* currently supports 4 buttons only */
if (!module->jack_type) {
dev_err_ratelimited(module->dev,
"Jack not present. Bogus event!!\n");
return -EINVAL;
}
report = module->button_status & module->button_mask;
soc_button_id = 0;
switch (req->button_id) {
case 1:
soc_button_id = SND_JACK_BTN_0 & module->button_mask;
break;
case 2:
soc_button_id = SND_JACK_BTN_1 & module->button_mask;
break;
case 3:
soc_button_id = SND_JACK_BTN_2 & module->button_mask;
break;
case 4:
soc_button_id = SND_JACK_BTN_3 & module->button_mask;
break;
}
if (!soc_button_id) {
dev_err_ratelimited(module->dev,
"Invalid button request received\n");
return -EINVAL;
}
if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
report = report | soc_button_id;
else
report = report & ~soc_button_id;
module->button_status = report;
snd_soc_jack_report(&module->button_jack, report, module->button_mask);
return 0;
}
static int gbaudio_request_stream(struct gbaudio_module_info *module,
struct gb_audio_streaming_event_request *req)
{
dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
req->data_cport, req->event);
return 0;
}
static int gbaudio_codec_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gbaudio_module_info *module =
greybus_get_drvdata(connection->bundle);
struct gb_operation_msg_hdr *header = op->request->header;
struct gb_audio_streaming_event_request *stream_req;
struct gb_audio_jack_event_request *jack_req;
struct gb_audio_button_event_request *button_req;
int ret;
switch (header->type) {
case GB_AUDIO_TYPE_STREAMING_EVENT:
stream_req = op->request->payload;
ret = gbaudio_request_stream(module, stream_req);
break;
case GB_AUDIO_TYPE_JACK_EVENT:
jack_req = op->request->payload;
ret = gbaudio_request_jack(module, jack_req);
break;
case GB_AUDIO_TYPE_BUTTON_EVENT:
button_req = op->request->payload;
ret = gbaudio_request_button(module, button_req);
break;
default:
dev_err_ratelimited(&connection->bundle->dev,
"Invalid Audio Event received\n");
return -EINVAL;
}
return ret;
}
static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
struct greybus_descriptor_cport *cport_desc,
struct gb_bundle *bundle)
{
struct gb_connection *connection;
/* Management Cport */
if (gbmodule->mgmt_connection) {
dev_err(&bundle->dev,
"Can't have multiple Management connections\n");
return -ENODEV;
}
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
gbaudio_codec_request_handler);
if (IS_ERR(connection))
return PTR_ERR(connection);
greybus_set_drvdata(bundle, gbmodule);
gbmodule->mgmt_connection = connection;
return 0;
}
static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
struct greybus_descriptor_cport *cport_desc,
struct gb_bundle *bundle)
{
struct gb_connection *connection;
struct gbaudio_data_connection *dai;
dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
if (!dai) {
dev_err(gbmodule->dev, "DAI Malloc failure\n");
return -ENOMEM;
}
connection = gb_connection_create_offloaded(bundle,
le16_to_cpu(cport_desc->id),
GB_CONNECTION_FLAG_CSD);
if (IS_ERR(connection)) {
devm_kfree(gbmodule->dev, dai);
return PTR_ERR(connection);
}
greybus_set_drvdata(bundle, gbmodule);
dai->id = 0;
dai->data_cport = connection->intf_cport_id;
dai->connection = connection;
list_add(&dai->list, &gbmodule->data_list);
return 0;
}
/*
* This is the basic hook get things initialized and registered w/ gb
*/
static int gb_audio_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct device *dev = &bundle->dev;
struct gbaudio_module_info *gbmodule;
struct greybus_descriptor_cport *cport_desc;
struct gb_audio_manager_module_descriptor desc;
struct gbaudio_data_connection *dai, *_dai;
int ret, i;
struct gb_audio_topology *topology;
/* There should be at least one Management and one Data cport */
if (bundle->num_cports < 2)
return -ENODEV;
/*
* There can be only one Management connection and any number of data
* connections.
*/
gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
if (!gbmodule)
return -ENOMEM;
gbmodule->num_data_connections = bundle->num_cports - 1;
INIT_LIST_HEAD(&gbmodule->data_list);
INIT_LIST_HEAD(&gbmodule->widget_list);
INIT_LIST_HEAD(&gbmodule->ctl_list);
INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
gbmodule->dev = dev;
snprintf(gbmodule->name, NAME_SIZE, "%s.%s", dev->driver->name,
dev_name(dev));
greybus_set_drvdata(bundle, gbmodule);
/* Create all connections */
for (i = 0; i < bundle->num_cports; i++) {
cport_desc = &bundle->cport_desc[i];
switch (cport_desc->protocol_id) {
case GREYBUS_PROTOCOL_AUDIO_MGMT:
ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
bundle);
if (ret)
goto destroy_connections;
break;
case GREYBUS_PROTOCOL_AUDIO_DATA:
ret = gb_audio_add_data_connection(gbmodule, cport_desc,
bundle);
if (ret)
goto destroy_connections;
break;
default:
dev_err(dev, "Unsupported protocol: 0x%02x\n",
cport_desc->protocol_id);
ret = -ENODEV;
goto destroy_connections;
}
}
/* There must be a management cport */
if (!gbmodule->mgmt_connection) {
ret = -EINVAL;
dev_err(dev, "Missing management connection\n");
goto destroy_connections;
}
/* Initialize management connection */
ret = gb_connection_enable(gbmodule->mgmt_connection);
if (ret) {
dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
goto destroy_connections;
}
gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
/*
* FIXME: malloc for topology happens via audio_gb driver
* should be done within codec driver itself
*/
ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
if (ret) {
dev_err(dev, "%d:Error while fetching topology\n", ret);
goto disable_connection;
}
/* process topology data */
ret = gbaudio_tplg_parse_data(gbmodule, topology);
if (ret) {
dev_err(dev, "%d:Error while parsing topology data\n",
ret);
goto free_topology;
}
gbmodule->topology = topology;
/* Initialize data connections */
list_for_each_entry(dai, &gbmodule->data_list, list) {
ret = gb_connection_enable(dai->connection);
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
ret, dai->data_cport);
goto disable_data_connection;
}
}
/* register module with gbcodec */
ret = gbaudio_register_module(gbmodule);
if (ret)
goto disable_data_connection;
/* inform above layer for uevent */
dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
/* prepare for the audio manager */
strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
desc.slot = 1; /* todo */
desc.vid = 2; /* todo */
desc.pid = 3; /* todo */
desc.cport = gbmodule->dev_id;
desc.op_devices = gbmodule->op_devices;
desc.ip_devices = gbmodule->ip_devices;
gbmodule->manager_id = gb_audio_manager_add(&desc);
dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
gb_pm_runtime_put_autosuspend(bundle);
return 0;
disable_data_connection:
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
gb_connection_disable(dai->connection);
gbaudio_tplg_release(gbmodule);
gbmodule->topology = NULL;
free_topology:
kfree(topology);
disable_connection:
gb_connection_disable(gbmodule->mgmt_connection);
destroy_connections:
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
gb_connection_destroy(dai->connection);
list_del(&dai->list);
devm_kfree(dev, dai);
}
if (gbmodule->mgmt_connection)
gb_connection_destroy(gbmodule->mgmt_connection);
devm_kfree(dev, gbmodule);
return ret;
}
static void gb_audio_disconnect(struct gb_bundle *bundle)
{
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai, *_dai;
gb_pm_runtime_get_sync(bundle);
/* cleanup module related resources first */
gbaudio_unregister_module(gbmodule);
/* inform uevent to above layers */
gb_audio_manager_remove(gbmodule->manager_id);
gbaudio_tplg_release(gbmodule);
kfree(gbmodule->topology);
gbmodule->topology = NULL;
gb_connection_disable(gbmodule->mgmt_connection);
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
gb_connection_disable(dai->connection);
gb_connection_destroy(dai->connection);
list_del(&dai->list);
devm_kfree(gbmodule->dev, dai);
}
gb_connection_destroy(gbmodule->mgmt_connection);
gbmodule->mgmt_connection = NULL;
devm_kfree(&bundle->dev, gbmodule);
}
static const struct greybus_bundle_id gb_audio_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
#ifdef CONFIG_PM
static int gb_audio_suspend(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai;
list_for_each_entry(dai, &gbmodule->data_list, list)
gb_connection_disable(dai->connection);
gb_connection_disable(gbmodule->mgmt_connection);
return 0;
}
static int gb_audio_resume(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai;
int ret;
ret = gb_connection_enable(gbmodule->mgmt_connection);
if (ret) {
dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
return ret;
}
list_for_each_entry(dai, &gbmodule->data_list, list) {
ret = gb_connection_enable(dai->connection);
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
ret, dai->data_cport);
return ret;
}
}
return 0;
}
#endif
static const struct dev_pm_ops gb_audio_pm_ops = {
SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
};
static struct greybus_driver gb_audio_driver = {
.name = "gb-audio",
.probe = gb_audio_probe,
.disconnect = gb_audio_disconnect,
.id_table = gb_audio_id_table,
.driver.pm = &gb_audio_pm_ops,
};
module_greybus_driver(gb_audio_driver);
MODULE_DESCRIPTION("Greybus Audio module driver");
MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:gbaudio-module");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,429 @@
/*
* Greybus Component Authentication Protocol (CAP) Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include "greybus_authentication.h"
#include "firmware.h"
#include "greybus.h"
#define CAP_TIMEOUT_MS 1000
/*
* Number of minor devices this driver supports.
* There will be exactly one required per Interface.
*/
#define NUM_MINORS U8_MAX
struct gb_cap {
struct device *parent;
struct gb_connection *connection;
struct kref kref;
struct list_head node;
bool disabled; /* connection getting disabled */
struct mutex mutex;
struct cdev cdev;
struct device *class_device;
dev_t dev_num;
};
static struct class *cap_class;
static dev_t cap_dev_num;
static DEFINE_IDA(cap_minors_map);
static LIST_HEAD(cap_list);
static DEFINE_MUTEX(list_mutex);
static void cap_kref_release(struct kref *kref)
{
struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
kfree(cap);
}
/*
* All users of cap take a reference (from within list_mutex lock), before
* they get a pointer to play with. And the structure will be freed only after
* the last user has put the reference to it.
*/
static void put_cap(struct gb_cap *cap)
{
kref_put(&cap->kref, cap_kref_release);
}
/* Caller must call put_cap() after using struct gb_cap */
static struct gb_cap *get_cap(struct cdev *cdev)
{
struct gb_cap *cap;
mutex_lock(&list_mutex);
list_for_each_entry(cap, &cap_list, node) {
if (&cap->cdev == cdev) {
kref_get(&cap->kref);
goto unlock;
}
}
cap = NULL;
unlock:
mutex_unlock(&list_mutex);
return cap;
}
static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_get_endpoint_uid_response response;
int ret;
ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
0, &response, sizeof(response));
if (ret) {
dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
return ret;
}
memcpy(euid, response.uid, sizeof(response.uid));
return 0;
}
static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
u8 *certificate, u32 *size, u8 *result)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_get_ims_certificate_request *request;
struct gb_cap_get_ims_certificate_response *response;
size_t max_size = gb_operation_get_payload_size_max(connection);
struct gb_operation *op;
int ret;
op = gb_operation_create_flags(connection,
GB_CAP_TYPE_GET_IMS_CERTIFICATE,
sizeof(*request), max_size,
GB_OPERATION_FLAG_SHORT_RESPONSE,
GFP_KERNEL);
if (!op)
return -ENOMEM;
request = op->request->payload;
request->certificate_class = cpu_to_le32(class);
request->certificate_id = cpu_to_le32(id);
ret = gb_operation_request_send_sync(op);
if (ret) {
dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
goto done;
}
response = op->response->payload;
*result = response->result_code;
*size = op->response->payload_size - sizeof(*response);
memcpy(certificate, response->certificate, *size);
done:
gb_operation_put(op);
return ret;
}
static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
u8 *challenge, u8 *result, u8 *auth_response,
u32 *signature_size, u8 *signature)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_authenticate_request *request;
struct gb_cap_authenticate_response *response;
size_t max_size = gb_operation_get_payload_size_max(connection);
struct gb_operation *op;
int ret;
op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
sizeof(*request), max_size,
GB_OPERATION_FLAG_SHORT_RESPONSE,
GFP_KERNEL);
if (!op)
return -ENOMEM;
request = op->request->payload;
request->auth_type = cpu_to_le32(auth_type);
memcpy(request->uid, uid, sizeof(request->uid));
memcpy(request->challenge, challenge, sizeof(request->challenge));
ret = gb_operation_request_send_sync(op);
if (ret) {
dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
goto done;
}
response = op->response->payload;
*result = response->result_code;
*signature_size = op->response->payload_size - sizeof(*response);
memcpy(auth_response, response->response, sizeof(response->response));
memcpy(signature, response->signature, *signature_size);
done:
gb_operation_put(op);
return ret;
}
/* Char device fops */
static int cap_open(struct inode *inode, struct file *file)
{
struct gb_cap *cap = get_cap(inode->i_cdev);
/* cap structure can't get freed until file descriptor is closed */
if (cap) {
file->private_data = cap;
return 0;
}
return -ENODEV;
}
static int cap_release(struct inode *inode, struct file *file)
{
struct gb_cap *cap = file->private_data;
put_cap(cap);
return 0;
}
static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
void __user *buf)
{
struct cap_ioc_get_endpoint_uid endpoint_uid;
struct cap_ioc_get_ims_certificate *ims_cert;
struct cap_ioc_authenticate *authenticate;
size_t size;
int ret;
switch (cmd) {
case CAP_IOC_GET_ENDPOINT_UID:
ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
if (ret)
return ret;
if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
return -EFAULT;
return 0;
case CAP_IOC_GET_IMS_CERTIFICATE:
size = sizeof(*ims_cert);
ims_cert = memdup_user(buf, size);
if (IS_ERR(ims_cert))
return PTR_ERR(ims_cert);
ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
ims_cert->certificate_id,
ims_cert->certificate,
&ims_cert->cert_size,
&ims_cert->result_code);
if (!ret && copy_to_user(buf, ims_cert, size))
ret = -EFAULT;
kfree(ims_cert);
return ret;
case CAP_IOC_AUTHENTICATE:
size = sizeof(*authenticate);
authenticate = memdup_user(buf, size);
if (IS_ERR(authenticate))
return PTR_ERR(authenticate);
ret = cap_authenticate(cap, authenticate->auth_type,
authenticate->uid,
authenticate->challenge,
&authenticate->result_code,
authenticate->response,
&authenticate->signature_size,
authenticate->signature);
if (!ret && copy_to_user(buf, authenticate, size))
ret = -EFAULT;
kfree(authenticate);
return ret;
default:
return -ENOTTY;
}
}
static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct gb_cap *cap = file->private_data;
struct gb_bundle *bundle = cap->connection->bundle;
int ret = -ENODEV;
/*
* Serialize ioctls.
*
* We don't want the user to do multiple authentication operations in
* parallel.
*
* This is also used to protect ->disabled, which is used to check if
* the connection is getting disconnected, so that we don't start any
* new operations.
*/
mutex_lock(&cap->mutex);
if (!cap->disabled) {
ret = gb_pm_runtime_get_sync(bundle);
if (!ret) {
ret = cap_ioctl(cap, cmd, (void __user *)arg);
gb_pm_runtime_put_autosuspend(bundle);
}
}
mutex_unlock(&cap->mutex);
return ret;
}
static const struct file_operations cap_fops = {
.owner = THIS_MODULE,
.open = cap_open,
.release = cap_release,
.unlocked_ioctl = cap_ioctl_unlocked,
};
int gb_cap_connection_init(struct gb_connection *connection)
{
struct gb_cap *cap;
int ret, minor;
if (!connection)
return 0;
cap = kzalloc(sizeof(*cap), GFP_KERNEL);
if (!cap)
return -ENOMEM;
cap->parent = &connection->bundle->dev;
cap->connection = connection;
mutex_init(&cap->mutex);
gb_connection_set_data(connection, cap);
kref_init(&cap->kref);
mutex_lock(&list_mutex);
list_add(&cap->node, &cap_list);
mutex_unlock(&list_mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_list_del;
minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
}
/* Add a char device to allow userspace to interact with cap */
cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
cdev_init(&cap->cdev, &cap_fops);
ret = cdev_add(&cap->cdev, cap->dev_num, 1);
if (ret)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
NULL, "gb-authenticate-%d", minor);
if (IS_ERR(cap->class_device)) {
ret = PTR_ERR(cap->class_device);
goto err_del_cdev;
}
return 0;
err_del_cdev:
cdev_del(&cap->cdev);
err_remove_ida:
ida_simple_remove(&cap_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
mutex_lock(&list_mutex);
list_del(&cap->node);
mutex_unlock(&list_mutex);
put_cap(cap);
return ret;
}
void gb_cap_connection_exit(struct gb_connection *connection)
{
struct gb_cap *cap;
if (!connection)
return;
cap = gb_connection_get_data(connection);
device_destroy(cap_class, cap->dev_num);
cdev_del(&cap->cdev);
ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
* existing ones to finish.
*/
mutex_lock(&cap->mutex);
cap->disabled = true;
mutex_unlock(&cap->mutex);
/* All pending greybus operations should have finished by now */
gb_connection_disable(cap->connection);
/* Disallow new users to get access to the cap structure */
mutex_lock(&list_mutex);
list_del(&cap->node);
mutex_unlock(&list_mutex);
/*
* All current users of cap would have taken a reference to it by
* now, we can drop our reference and wait the last user will get
* cap freed.
*/
put_cap(cap);
}
int cap_init(void)
{
int ret;
cap_class = class_create(THIS_MODULE, "gb_authenticate");
if (IS_ERR(cap_class))
return PTR_ERR(cap_class);
ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
"gb_authenticate");
if (ret)
goto err_remove_class;
return 0;
err_remove_class:
class_destroy(cap_class);
return ret;
}
void cap_exit(void)
{
unregister_chrdev_region(cap_dev_num, NUM_MINORS);
class_destroy(cap_class);
ida_destroy(&cap_minors_map);
}

View File

@ -0,0 +1,524 @@
/*
* BOOTROM Greybus driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "greybus.h"
#include "firmware.h"
/* Timeout, in jiffies, within which the next request must be received */
#define NEXT_REQ_TIMEOUT_MS 1000
/*
* FIXME: Reduce this timeout once svc core handles parallel processing of
* events from the SVC, which are handled sequentially today.
*/
#define MODE_SWITCH_TIMEOUT_MS 10000
enum next_request_type {
NEXT_REQ_FIRMWARE_SIZE,
NEXT_REQ_GET_FIRMWARE,
NEXT_REQ_READY_TO_BOOT,
NEXT_REQ_MODE_SWITCH,
};
struct gb_bootrom {
struct gb_connection *connection;
const struct firmware *fw;
u8 protocol_major;
u8 protocol_minor;
enum next_request_type next_request;
struct delayed_work dwork;
struct mutex mutex; /* Protects bootrom->fw */
};
static void free_firmware(struct gb_bootrom *bootrom)
{
if (!bootrom->fw)
return;
release_firmware(bootrom->fw);
bootrom->fw = NULL;
}
static void gb_bootrom_timedout(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork);
struct device *dev = &bootrom->connection->bundle->dev;
const char *reason;
switch (bootrom->next_request) {
case NEXT_REQ_FIRMWARE_SIZE:
reason = "Firmware Size Request";
break;
case NEXT_REQ_GET_FIRMWARE:
reason = "Get Firmware Request";
break;
case NEXT_REQ_READY_TO_BOOT:
reason = "Ready to Boot Request";
break;
case NEXT_REQ_MODE_SWITCH:
reason = "Interface Mode Switch";
break;
default:
reason = NULL;
dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
break;
}
dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
mutex_lock(&bootrom->mutex);
free_firmware(bootrom);
mutex_unlock(&bootrom->mutex);
/* TODO: Power-off Module ? */
}
static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
enum next_request_type next, unsigned long timeout)
{
bootrom->next_request = next;
schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
}
static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
{
cancel_delayed_work_sync(&bootrom->dwork);
}
/*
* The es2 chip doesn't have VID/PID programmed into the hardware and we need to
* hack that up to distinguish different modules and their firmware blobs.
*
* This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
* already sent during hotplug are 0.
*
* Otherwise, we keep intf->vendor_id/product_id same as what's passed
* during hotplug.
*/
static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
{
struct gb_bootrom_get_vid_pid_response response;
struct gb_connection *connection = bootrom->connection;
struct gb_interface *intf = connection->bundle->intf;
int ret;
if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
return;
ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
NULL, 0, &response, sizeof(response));
if (ret) {
dev_err(&connection->bundle->dev,
"Bootrom get vid/pid operation failed (%d)\n", ret);
return;
}
/*
* NOTE: This is hacked, so that the same values of VID/PID can be used
* by next firmware level as well. The uevent for bootrom will still
* have VID/PID as 0, though after this point the sysfs files will start
* showing the updated values. But yeah, that's a bit racy as the same
* sysfs files would be showing 0 before this point.
*/
intf->vendor_id = le32_to_cpu(response.vendor_id);
intf->product_id = le32_to_cpu(response.product_id);
dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
intf->vendor_id, intf->product_id);
}
/* This returns path of the firmware blob on the disk */
static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
{
struct gb_connection *connection = bootrom->connection;
struct gb_interface *intf = connection->bundle->intf;
char firmware_name[49];
int rc;
/* Already have a firmware, free it */
free_firmware(bootrom);
/* Bootrom protocol is only supported for loading Stage 2 firmware */
if (stage != 2) {
dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
stage);
return -EINVAL;
}
/*
* Create firmware name
*
* XXX Name it properly..
*/
snprintf(firmware_name, sizeof(firmware_name),
FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
intf->vendor_id, intf->product_id);
// FIXME:
// Turn to dev_dbg later after everyone has valid bootloaders with good
// ids, but leave this as dev_info for now to make it easier to track
// down "empty" vid/pid modules.
dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
firmware_name);
rc = request_firmware(&bootrom->fw, firmware_name,
&connection->bundle->dev);
if (rc) {
dev_err(&connection->bundle->dev,
"failed to find %s firmware (%d)\n", firmware_name, rc);
}
return rc;
}
static int gb_bootrom_firmware_size_request(struct gb_operation *op)
{
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
struct gb_bootrom_firmware_size_request *size_request = op->request->payload;
struct gb_bootrom_firmware_size_response *size_response;
struct device *dev = &op->connection->bundle->dev;
int ret;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*size_request)) {
dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
__func__, op->request->payload_size,
sizeof(*size_request));
ret = -EINVAL;
goto queue_work;
}
mutex_lock(&bootrom->mutex);
ret = find_firmware(bootrom, size_request->stage);
if (ret)
goto unlock;
if (!gb_operation_response_alloc(op, sizeof(*size_response),
GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
free_firmware(bootrom);
ret = -ENOMEM;
goto unlock;
}
size_response = op->response->payload;
size_response->size = cpu_to_le32(bootrom->fw->size);
dev_dbg(dev, "%s: firmware size %d bytes\n", __func__, size_response->size);
unlock:
mutex_unlock(&bootrom->mutex);
queue_work:
if (!ret) {
/* Refresh timeout */
gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
NEXT_REQ_TIMEOUT_MS);
}
return ret;
}
static int gb_bootrom_get_firmware(struct gb_operation *op)
{
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
const struct firmware *fw;
struct gb_bootrom_get_firmware_request *firmware_request;
struct gb_bootrom_get_firmware_response *firmware_response;
struct device *dev = &op->connection->bundle->dev;
unsigned int offset, size;
enum next_request_type next_request;
int ret = 0;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*firmware_request)) {
dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
__func__, op->request->payload_size,
sizeof(*firmware_request));
ret = -EINVAL;
goto queue_work;
}
mutex_lock(&bootrom->mutex);
fw = bootrom->fw;
if (!fw) {
dev_err(dev, "%s: firmware not available\n", __func__);
ret = -EINVAL;
goto unlock;
}
firmware_request = op->request->payload;
offset = le32_to_cpu(firmware_request->offset);
size = le32_to_cpu(firmware_request->size);
if (offset >= fw->size || size > fw->size - offset) {
dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
offset, size);
ret = -EINVAL;
goto unlock;
}
if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
ret = -ENOMEM;
goto unlock;
}
firmware_response = op->response->payload;
memcpy(firmware_response->data, fw->data + offset, size);
dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n", offset,
size);
unlock:
mutex_unlock(&bootrom->mutex);
queue_work:
/* Refresh timeout */
if (!ret && (offset + size == fw->size))
next_request = NEXT_REQ_READY_TO_BOOT;
else
next_request = NEXT_REQ_GET_FIRMWARE;
gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
return ret;
}
static int gb_bootrom_ready_to_boot(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gb_bootrom *bootrom = gb_connection_get_data(connection);
struct gb_bootrom_ready_to_boot_request *rtb_request;
struct device *dev = &connection->bundle->dev;
u8 status;
int ret = 0;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*rtb_request)) {
dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
__func__, op->request->payload_size,
sizeof(*rtb_request));
ret = -EINVAL;
goto queue_work;
}
rtb_request = op->request->payload;
status = rtb_request->status;
/* Return error if the blob was invalid */
if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
ret = -EINVAL;
goto queue_work;
}
/*
* XXX Should we return error for insecure firmware?
*/
dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
queue_work:
/*
* Refresh timeout, the Interface shall load the new personality and
* send a new hotplug request, which shall get rid of the bootrom
* connection. As that can take some time, increase the timeout a bit.
*/
gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
MODE_SWITCH_TIMEOUT_MS);
return ret;
}
static int gb_bootrom_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
return gb_bootrom_firmware_size_request(op);
case GB_BOOTROM_TYPE_GET_FIRMWARE:
return gb_bootrom_get_firmware(op);
case GB_BOOTROM_TYPE_READY_TO_BOOT:
return gb_bootrom_ready_to_boot(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
{
struct gb_bundle *bundle = bootrom->connection->bundle;
struct gb_bootrom_version_request request;
struct gb_bootrom_version_response response;
int ret;
request.major = GB_BOOTROM_VERSION_MAJOR;
request.minor = GB_BOOTROM_VERSION_MINOR;
ret = gb_operation_sync(bootrom->connection,
GB_BOOTROM_TYPE_VERSION,
&request, sizeof(request), &response,
sizeof(response));
if (ret) {
dev_err(&bundle->dev,
"failed to get protocol version: %d\n",
ret);
return ret;
}
if (response.major > request.major) {
dev_err(&bundle->dev,
"unsupported major protocol version (%u > %u)\n",
response.major, request.major);
return -ENOTSUPP;
}
bootrom->protocol_major = response.major;
bootrom->protocol_minor = response.minor;
dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
response.minor);
return 0;
}
static int gb_bootrom_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_bootrom *bootrom;
int ret;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
return -ENODEV;
bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
if (!bootrom)
return -ENOMEM;
connection = gb_connection_create(bundle,
le16_to_cpu(cport_desc->id),
gb_bootrom_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto err_free_bootrom;
}
gb_connection_set_data(connection, bootrom);
bootrom->connection = connection;
mutex_init(&bootrom->mutex);
INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
greybus_set_drvdata(bundle, bootrom);
ret = gb_connection_enable_tx(connection);
if (ret)
goto err_connection_destroy;
ret = gb_bootrom_get_version(bootrom);
if (ret)
goto err_connection_disable;
bootrom_es2_fixup_vid_pid(bootrom);
ret = gb_connection_enable(connection);
if (ret)
goto err_connection_disable;
/* Refresh timeout */
gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
NEXT_REQ_TIMEOUT_MS);
/* Tell bootrom we're ready. */
ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
NULL, 0);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to send AP READY: %d\n", ret);
goto err_cancel_timeout;
}
dev_dbg(&bundle->dev, "AP_READY sent\n");
return 0;
err_cancel_timeout:
gb_bootrom_cancel_timeout(bootrom);
err_connection_disable:
gb_connection_disable(connection);
err_connection_destroy:
gb_connection_destroy(connection);
err_free_bootrom:
kfree(bootrom);
return ret;
}
static void gb_bootrom_disconnect(struct gb_bundle *bundle)
{
struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
dev_dbg(&bundle->dev, "%s\n", __func__);
gb_connection_disable(bootrom->connection);
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
/*
* Release firmware:
*
* As the connection and the delayed work are already disabled, we don't
* need to lock access to bootrom->fw here.
*/
free_firmware(bootrom);
gb_connection_destroy(bootrom->connection);
kfree(bootrom);
}
static const struct greybus_bundle_id gb_bootrom_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
{ }
};
static struct greybus_driver gb_bootrom_driver = {
.name = "bootrom",
.probe = gb_bootrom_probe,
.disconnect = gb_bootrom_disconnect,
.id_table = gb_bootrom_id_table,
};
module_greybus_driver(gb_bootrom_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,253 @@
/*
* Greybus bundles
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_trace.h"
static ssize_t bundle_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
return sprintf(buf, "0x%02x\n", bundle->class);
}
static DEVICE_ATTR_RO(bundle_class);
static ssize_t bundle_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
return sprintf(buf, "%u\n", bundle->id);
}
static DEVICE_ATTR_RO(bundle_id);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
if (bundle->state == NULL)
return sprintf(buf, "\n");
return sprintf(buf, "%s\n", bundle->state);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
kfree(bundle->state);
bundle->state = kstrdup(buf, GFP_KERNEL);
if (!bundle->state)
return -ENOMEM;
/* Tell userspace that the file contents changed */
sysfs_notify(&bundle->dev.kobj, NULL, "state");
return size;
}
static DEVICE_ATTR_RW(state);
static struct attribute *bundle_attrs[] = {
&dev_attr_bundle_class.attr,
&dev_attr_bundle_id.attr,
&dev_attr_state.attr,
NULL,
};
ATTRIBUTE_GROUPS(bundle);
static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
u8 bundle_id)
{
struct gb_bundle *bundle;
list_for_each_entry(bundle, &intf->bundles, links) {
if (bundle->id == bundle_id)
return bundle;
}
return NULL;
}
static void gb_bundle_release(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
trace_gb_bundle_release(bundle);
kfree(bundle->state);
kfree(bundle->cport_desc);
kfree(bundle);
}
#ifdef CONFIG_PM
static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
{
struct gb_connection *connection;
list_for_each_entry(connection, &bundle->connections, bundle_links)
gb_connection_disable(connection);
}
static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
{
struct gb_connection *connection;
list_for_each_entry(connection, &bundle->connections, bundle_links)
gb_connection_enable(connection);
}
static int gb_bundle_suspend(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct dev_pm_ops *pm = dev->driver->pm;
int ret;
if (pm && pm->runtime_suspend) {
ret = pm->runtime_suspend(&bundle->dev);
if (ret)
return ret;
} else {
gb_bundle_disable_all_connections(bundle);
}
ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
if (ret) {
if (pm && pm->runtime_resume)
ret = pm->runtime_resume(dev);
else
gb_bundle_enable_all_connections(bundle);
return ret;
}
return 0;
}
static int gb_bundle_resume(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct dev_pm_ops *pm = dev->driver->pm;
int ret;
ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
if (ret)
return ret;
if (pm && pm->runtime_resume) {
ret = pm->runtime_resume(dev);
if (ret)
return ret;
} else {
gb_bundle_enable_all_connections(bundle);
}
return 0;
}
static int gb_bundle_idle(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
return 0;
}
#endif
static const struct dev_pm_ops gb_bundle_pm_ops = {
SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
};
struct device_type greybus_bundle_type = {
.name = "greybus_bundle",
.release = gb_bundle_release,
.pm = &gb_bundle_pm_ops,
};
/*
* Create a gb_bundle structure to represent a discovered
* bundle. Returns a pointer to the new bundle or a null
* pointer if a failure occurs due to memory exhaustion.
*/
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
u8 class)
{
struct gb_bundle *bundle;
if (bundle_id == BUNDLE_ID_NONE) {
dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
return NULL;
}
/*
* Reject any attempt to reuse a bundle id. We initialize
* these serially, so there's no need to worry about keeping
* the interface bundle list locked here.
*/
if (gb_bundle_find(intf, bundle_id)) {
dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
return NULL;
}
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle)
return NULL;
bundle->intf = intf;
bundle->id = bundle_id;
bundle->class = class;
INIT_LIST_HEAD(&bundle->connections);
bundle->dev.parent = &intf->dev;
bundle->dev.bus = &greybus_bus_type;
bundle->dev.type = &greybus_bundle_type;
bundle->dev.groups = bundle_groups;
bundle->dev.dma_mask = intf->dev.dma_mask;
device_initialize(&bundle->dev);
dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
list_add(&bundle->links, &intf->bundles);
trace_gb_bundle_create(bundle);
return bundle;
}
int gb_bundle_add(struct gb_bundle *bundle)
{
int ret;
ret = device_add(&bundle->dev);
if (ret) {
dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
return ret;
}
trace_gb_bundle_add(bundle);
return 0;
}
/*
* Tear down a previously set up bundle.
*/
void gb_bundle_destroy(struct gb_bundle *bundle)
{
trace_gb_bundle_destroy(bundle);
if (device_is_registered(&bundle->dev))
device_del(&bundle->dev);
list_del(&bundle->links);
put_device(&bundle->dev);
}

View File

@ -0,0 +1,90 @@
/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __BUNDLE_H
#define __BUNDLE_H
#include <linux/list.h>
#define BUNDLE_ID_NONE U8_MAX
/* Greybus "public" definitions" */
struct gb_bundle {
struct device dev;
struct gb_interface *intf;
u8 id;
u8 class;
u8 class_major;
u8 class_minor;
size_t num_cports;
struct greybus_descriptor_cport *cport_desc;
struct list_head connections;
u8 *state;
struct list_head links; /* interface->bundles */
};
#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
/* Greybus "private" definitions" */
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
u8 class);
int gb_bundle_add(struct gb_bundle *bundle);
void gb_bundle_destroy(struct gb_bundle *bundle);
/* Bundle Runtime PM wrappers */
#ifdef CONFIG_PM
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{
int retval;
retval = pm_runtime_get_sync(&bundle->dev);
if (retval < 0) {
dev_err(&bundle->dev,
"pm_runtime_get_sync failed: %d\n", retval);
pm_runtime_put_noidle(&bundle->dev);
return retval;
}
return 0;
}
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{
int retval;
pm_runtime_mark_last_busy(&bundle->dev);
retval = pm_runtime_put_autosuspend(&bundle->dev);
return retval;
}
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
{
pm_runtime_get_noresume(&bundle->dev);
}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
{
pm_runtime_put_noidle(&bundle->dev);
}
#else
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{ return 0; }
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{ return 0; }
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
#endif
#endif /* __BUNDLE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,938 @@
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/workqueue.h>
#include "greybus.h"
#include "greybus_trace.h"
#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
static void gb_connection_kref_release(struct kref *kref);
static DEFINE_SPINLOCK(gb_connections_lock);
static DEFINE_MUTEX(gb_connection_mutex);
/* Caller holds gb_connection_mutex. */
static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
{
struct gb_host_device *hd = intf->hd;
struct gb_connection *connection;
list_for_each_entry(connection, &hd->connections, hd_links) {
if (connection->intf == intf &&
connection->intf_cport_id == cport_id)
return true;
}
return false;
}
static void gb_connection_get(struct gb_connection *connection)
{
kref_get(&connection->kref);
trace_gb_connection_get(connection);
}
static void gb_connection_put(struct gb_connection *connection)
{
trace_gb_connection_put(connection);
kref_put(&connection->kref, gb_connection_kref_release);
}
/*
* Returns a reference-counted pointer to the connection if found.
*/
static struct gb_connection *
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
{
struct gb_connection *connection;
unsigned long flags;
spin_lock_irqsave(&gb_connections_lock, flags);
list_for_each_entry(connection, &hd->connections, hd_links)
if (connection->hd_cport_id == cport_id) {
gb_connection_get(connection);
goto found;
}
connection = NULL;
found:
spin_unlock_irqrestore(&gb_connections_lock, flags);
return connection;
}
/*
* Callback from the host driver to let us know that data has been
* received on the bundle.
*/
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length)
{
struct gb_connection *connection;
trace_gb_hd_in(hd);
connection = gb_connection_hd_find(hd, cport_id);
if (!connection) {
dev_err(&hd->dev,
"nonexistent connection (%zu bytes dropped)\n", length);
return;
}
gb_connection_recv(connection, data, length);
gb_connection_put(connection);
}
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
static void gb_connection_kref_release(struct kref *kref)
{
struct gb_connection *connection;
connection = container_of(kref, struct gb_connection, kref);
trace_gb_connection_release(connection);
kfree(connection);
}
static void gb_connection_init_name(struct gb_connection *connection)
{
u16 hd_cport_id = connection->hd_cport_id;
u16 cport_id = 0;
u8 intf_id = 0;
if (connection->intf) {
intf_id = connection->intf->interface_id;
cport_id = connection->intf_cport_id;
}
snprintf(connection->name, sizeof(connection->name),
"%u/%u:%u", hd_cport_id, intf_id, cport_id);
}
/*
* _gb_connection_create() - create a Greybus connection
* @hd: host device of the connection
* @hd_cport_id: host-device cport id, or -1 for dynamic allocation
* @intf: remote interface, or NULL for static connections
* @bundle: remote-interface bundle (may be NULL)
* @cport_id: remote-interface cport id, or 0 for static connections
* @handler: request handler (may be NULL)
* @flags: connection flags
*
* Create a Greybus connection, representing the bidirectional link
* between a CPort on a (local) Greybus host device and a CPort on
* another Greybus interface.
*
* A connection also maintains the state of operations sent over the
* connection.
*
* Serialised against concurrent create and destroy using the
* gb_connection_mutex.
*
* Return: A pointer to the new connection if successful, or an ERR_PTR
* otherwise.
*/
static struct gb_connection *
_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
struct gb_interface *intf,
struct gb_bundle *bundle, int cport_id,
gb_request_handler_t handler,
unsigned long flags)
{
struct gb_connection *connection;
int ret;
mutex_lock(&gb_connection_mutex);
if (intf && gb_connection_cport_in_use(intf, cport_id)) {
dev_err(&intf->dev, "cport %u already in use\n", cport_id);
ret = -EBUSY;
goto err_unlock;
}
ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
if (ret < 0) {
dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
goto err_unlock;
}
hd_cport_id = ret;
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
if (!connection) {
ret = -ENOMEM;
goto err_hd_cport_release;
}
connection->hd_cport_id = hd_cport_id;
connection->intf_cport_id = cport_id;
connection->hd = hd;
connection->intf = intf;
connection->bundle = bundle;
connection->handler = handler;
connection->flags = flags;
if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
connection->state = GB_CONNECTION_STATE_DISABLED;
atomic_set(&connection->op_cycle, 0);
mutex_init(&connection->mutex);
spin_lock_init(&connection->lock);
INIT_LIST_HEAD(&connection->operations);
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
dev_name(&hd->dev), hd_cport_id);
if (!connection->wq) {
ret = -ENOMEM;
goto err_free_connection;
}
kref_init(&connection->kref);
gb_connection_init_name(connection);
spin_lock_irq(&gb_connections_lock);
list_add(&connection->hd_links, &hd->connections);
if (bundle)
list_add(&connection->bundle_links, &bundle->connections);
else
INIT_LIST_HEAD(&connection->bundle_links);
spin_unlock_irq(&gb_connections_lock);
mutex_unlock(&gb_connection_mutex);
trace_gb_connection_create(connection);
return connection;
err_free_connection:
kfree(connection);
err_hd_cport_release:
gb_hd_cport_release(hd, hd_cport_id);
err_unlock:
mutex_unlock(&gb_connection_mutex);
return ERR_PTR(ret);
}
struct gb_connection *
gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
gb_request_handler_t handler)
{
return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
GB_CONNECTION_FLAG_HIGH_PRIO);
}
struct gb_connection *
gb_connection_create_control(struct gb_interface *intf)
{
return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
GB_CONNECTION_FLAG_CONTROL |
GB_CONNECTION_FLAG_HIGH_PRIO);
}
struct gb_connection *
gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
gb_request_handler_t handler)
{
struct gb_interface *intf = bundle->intf;
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
handler, 0);
}
EXPORT_SYMBOL_GPL(gb_connection_create);
struct gb_connection *
gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
gb_request_handler_t handler,
unsigned long flags)
{
struct gb_interface *intf = bundle->intf;
if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
handler, flags);
}
EXPORT_SYMBOL_GPL(gb_connection_create_flags);
struct gb_connection *
gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
unsigned long flags)
{
flags |= GB_CONNECTION_FLAG_OFFLOADED;
return gb_connection_create_flags(bundle, cport_id, NULL, flags);
}
EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_enable)
return 0;
ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
connection->flags);
if (ret) {
dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static void gb_connection_hd_cport_disable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_disable)
return;
ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
connection->name, ret);
}
}
static int gb_connection_hd_cport_connected(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_connected)
return 0;
ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_flush(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_flush)
return 0;
ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
size_t peer_space;
int ret;
peer_space = sizeof(struct gb_operation_msg_hdr) +
sizeof(struct gb_cport_shutdown_request);
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
if (ret) {
dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_clear(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
/*
* Request the SVC to create a connection from AP's cport to interface's
* cport.
*/
static int
gb_connection_svc_connection_create(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
struct gb_interface *intf;
u8 cport_flags;
int ret;
if (gb_connection_is_static(connection))
return 0;
intf = connection->intf;
/*
* Enable either E2EFC or CSD, unless no flow control is requested.
*/
cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
if (gb_connection_flow_control_disabled(connection)) {
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
} else if (gb_connection_e2efc_enabled(connection)) {
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
GB_SVC_CPORT_FLAG_E2EFC;
}
ret = gb_svc_connection_create(hd->svc,
hd->svc->ap_intf_id,
connection->hd_cport_id,
intf->interface_id,
connection->intf_cport_id,
cport_flags);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to create svc connection: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static void
gb_connection_svc_connection_destroy(struct gb_connection *connection)
{
if (gb_connection_is_static(connection))
return;
gb_svc_connection_destroy(connection->hd->svc,
connection->hd->svc->ap_intf_id,
connection->hd_cport_id,
connection->intf->interface_id,
connection->intf_cport_id);
}
/* Inform Interface about active CPorts */
static int gb_connection_control_connected(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return 0;
if (gb_connection_is_control(connection))
return 0;
control = connection->intf->control;
ret = gb_control_connected_operation(control, cport_id);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to connect cport: %d\n", ret);
return ret;
}
return 0;
}
static void
gb_connection_control_disconnecting(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return;
control = connection->intf->control;
ret = gb_control_disconnecting_operation(control, cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to send disconnecting: %d\n",
connection->name, ret);
}
}
static void
gb_connection_control_disconnected(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return;
control = connection->intf->control;
if (gb_connection_is_control(connection)) {
if (connection->mode_switch) {
ret = gb_control_mode_switch_operation(control);
if (ret) {
/*
* Allow mode switch to time out waiting for
* mailbox event.
*/
return;
}
}
return;
}
ret = gb_control_disconnected_operation(control, cport_id);
if (ret) {
dev_warn(&connection->bundle->dev,
"failed to disconnect cport: %d\n", ret);
}
}
static int gb_connection_shutdown_operation(struct gb_connection *connection,
u8 phase)
{
struct gb_cport_shutdown_request *req;
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(connection,
GB_REQUEST_TYPE_CPORT_SHUTDOWN,
sizeof(*req), 0, 0,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
req = operation->request->payload;
req->phase = phase;
ret = gb_operation_request_send_sync(operation);
gb_operation_put(operation);
return ret;
}
static int gb_connection_cport_shutdown(struct gb_connection *connection,
u8 phase)
{
struct gb_host_device *hd = connection->hd;
const struct gb_hd_driver *drv = hd->driver;
int ret;
if (gb_connection_is_static(connection))
return 0;
if (gb_connection_is_offloaded(connection)) {
if (!drv->cport_shutdown)
return 0;
ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
GB_OPERATION_TIMEOUT_DEFAULT);
} else {
ret = gb_connection_shutdown_operation(connection, phase);
}
if (ret) {
dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
connection->name, phase, ret);
return ret;
}
return 0;
}
static int
gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
{
return gb_connection_cport_shutdown(connection, 1);
}
static int
gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
{
return gb_connection_cport_shutdown(connection, 2);
}
/*
* Cancel all active operations on a connection.
*
* Locking: Called with connection lock held and state set to DISABLED or
* DISCONNECTING.
*/
static void gb_connection_cancel_operations(struct gb_connection *connection,
int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
while (!list_empty(&connection->operations)) {
operation = list_last_entry(&connection->operations,
struct gb_operation, links);
gb_operation_get(operation);
spin_unlock_irq(&connection->lock);
if (gb_operation_is_incoming(operation))
gb_operation_cancel_incoming(operation, errno);
else
gb_operation_cancel(operation, errno);
gb_operation_put(operation);
spin_lock_irq(&connection->lock);
}
}
/*
* Cancel all active incoming operations on a connection.
*
* Locking: Called with connection lock held and state set to ENABLED_TX.
*/
static void
gb_connection_flush_incoming_operations(struct gb_connection *connection,
int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
bool incoming;
while (!list_empty(&connection->operations)) {
incoming = false;
list_for_each_entry(operation, &connection->operations,
links) {
if (gb_operation_is_incoming(operation)) {
gb_operation_get(operation);
incoming = true;
break;
}
}
if (!incoming)
break;
spin_unlock_irq(&connection->lock);
/* FIXME: flush, not cancel? */
gb_operation_cancel_incoming(operation, errno);
gb_operation_put(operation);
spin_lock_irq(&connection->lock);
}
}
/*
* _gb_connection_enable() - enable a connection
* @connection: connection to enable
* @rx: whether to enable incoming requests
*
* Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
* ENABLED_TX->ENABLED state transitions.
*
* Locking: Caller holds connection->mutex.
*/
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
{
int ret;
/* Handle ENABLED_TX -> ENABLED transitions. */
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
if (!(connection->handler && rx))
return 0;
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_ENABLED;
spin_unlock_irq(&connection->lock);
return 0;
}
ret = gb_connection_hd_cport_enable(connection);
if (ret)
return ret;
ret = gb_connection_svc_connection_create(connection);
if (ret)
goto err_hd_cport_clear;
ret = gb_connection_hd_cport_connected(connection);
if (ret)
goto err_svc_connection_destroy;
spin_lock_irq(&connection->lock);
if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED;
else
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
spin_unlock_irq(&connection->lock);
ret = gb_connection_control_connected(connection);
if (ret)
goto err_control_disconnecting;
return 0;
err_control_disconnecting:
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
/* Transmit queue should already be empty. */
gb_connection_hd_cport_flush(connection);
gb_connection_control_disconnecting(connection);
gb_connection_cport_shutdown_phase_1(connection);
gb_connection_hd_cport_quiesce(connection);
gb_connection_cport_shutdown_phase_2(connection);
gb_connection_control_disconnected(connection);
connection->state = GB_CONNECTION_STATE_DISABLED;
err_svc_connection_destroy:
gb_connection_svc_connection_destroy(connection);
err_hd_cport_clear:
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
return ret;
}
int gb_connection_enable(struct gb_connection *connection)
{
int ret = 0;
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_ENABLED)
goto out_unlock;
ret = _gb_connection_enable(connection, true);
if (!ret)
trace_gb_connection_enable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_connection_enable);
int gb_connection_enable_tx(struct gb_connection *connection)
{
int ret = 0;
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_ENABLED) {
ret = -EINVAL;
goto out_unlock;
}
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
goto out_unlock;
ret = _gb_connection_enable(connection, false);
if (!ret)
trace_gb_connection_enable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
void gb_connection_disable_rx(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
spin_lock_irq(&connection->lock);
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
spin_unlock_irq(&connection->lock);
goto out_unlock;
}
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
trace_gb_connection_disable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
void gb_connection_mode_switch_prepare(struct gb_connection *connection)
{
connection->mode_switch = true;
}
void gb_connection_mode_switch_complete(struct gb_connection *connection)
{
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
connection->mode_switch = false;
}
void gb_connection_disable(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
goto out_unlock;
trace_gb_connection_disable(connection);
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
gb_connection_control_disconnecting(connection);
gb_connection_cport_shutdown_phase_1(connection);
gb_connection_hd_cport_quiesce(connection);
gb_connection_cport_shutdown_phase_2(connection);
gb_connection_control_disconnected(connection);
connection->state = GB_CONNECTION_STATE_DISABLED;
/* control-connection tear down is deferred when mode switching */
if (!connection->mode_switch) {
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
}
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable);
/* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
goto out_unlock;
trace_gb_connection_disable(connection);
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISABLED;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
/* Caller must have disabled the connection before destroying it. */
void gb_connection_destroy(struct gb_connection *connection)
{
if (!connection)
return;
if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
gb_connection_disable(connection);
mutex_lock(&gb_connection_mutex);
spin_lock_irq(&gb_connections_lock);
list_del(&connection->bundle_links);
list_del(&connection->hd_links);
spin_unlock_irq(&gb_connections_lock);
destroy_workqueue(connection->wq);
gb_hd_cport_release(connection->hd, connection->hd_cport_id);
connection->hd_cport_id = CPORT_ID_BAD;
mutex_unlock(&gb_connection_mutex);
gb_connection_put(connection);
}
EXPORT_SYMBOL_GPL(gb_connection_destroy);
void gb_connection_latency_tag_enable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->latency_tag_enable)
return;
ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to enable latency tag: %d\n",
connection->name, ret);
}
}
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
void gb_connection_latency_tag_disable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->latency_tag_disable)
return;
ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to disable latency tag: %d\n",
connection->name, ret);
}
}
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);

View File

@ -0,0 +1,129 @@
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __CONNECTION_H
#define __CONNECTION_H
#include <linux/list.h>
#include <linux/kfifo.h>
#define GB_CONNECTION_FLAG_CSD BIT(0)
#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
#define GB_CONNECTION_FLAG_CONTROL BIT(4)
#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
enum gb_connection_state {
GB_CONNECTION_STATE_DISABLED = 0,
GB_CONNECTION_STATE_ENABLED_TX = 1,
GB_CONNECTION_STATE_ENABLED = 2,
GB_CONNECTION_STATE_DISCONNECTING = 3,
};
struct gb_operation;
typedef int (*gb_request_handler_t)(struct gb_operation *);
struct gb_connection {
struct gb_host_device *hd;
struct gb_interface *intf;
struct gb_bundle *bundle;
struct kref kref;
u16 hd_cport_id;
u16 intf_cport_id;
struct list_head hd_links;
struct list_head bundle_links;
gb_request_handler_t handler;
unsigned long flags;
struct mutex mutex;
spinlock_t lock;
enum gb_connection_state state;
struct list_head operations;
char name[16];
struct workqueue_struct *wq;
atomic_t op_cycle;
void *private;
bool mode_switch;
};
struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
u16 hd_cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler,
unsigned long flags);
struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
u16 cport_id, unsigned long flags);
void gb_connection_destroy(struct gb_connection *connection);
static inline bool gb_connection_is_static(struct gb_connection *connection)
{
return !connection->intf;
}
int gb_connection_enable(struct gb_connection *connection);
int gb_connection_enable_tx(struct gb_connection *connection);
void gb_connection_disable_rx(struct gb_connection *connection);
void gb_connection_disable(struct gb_connection *connection);
void gb_connection_disable_forced(struct gb_connection *connection);
void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length);
void gb_connection_latency_tag_enable(struct gb_connection *connection);
void gb_connection_latency_tag_disable(struct gb_connection *connection);
static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
{
return !(connection->flags & GB_CONNECTION_FLAG_CSD);
}
static inline bool
gb_connection_flow_control_disabled(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
}
static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
}
static inline bool gb_connection_is_control(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_CONTROL;
}
static inline void *gb_connection_get_data(struct gb_connection *connection)
{
return connection->private;
}
static inline void gb_connection_set_data(struct gb_connection *connection,
void *data)
{
connection->private = data;
}
#endif /* __CONNECTION_H */

View File

@ -0,0 +1,635 @@
/*
* Greybus CPort control protocol.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "greybus.h"
/* Highest control-protocol version supported */
#define GB_CONTROL_VERSION_MAJOR 0
#define GB_CONTROL_VERSION_MINOR 1
static int gb_control_get_version(struct gb_control *control)
{
struct gb_interface *intf = control->connection->intf;
struct gb_control_version_request request;
struct gb_control_version_response response;
int ret;
request.major = GB_CONTROL_VERSION_MAJOR;
request.minor = GB_CONTROL_VERSION_MINOR;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_VERSION,
&request, sizeof(request), &response,
sizeof(response));
if (ret) {
dev_err(&intf->dev,
"failed to get control-protocol version: %d\n",
ret);
return ret;
}
if (response.major > request.major) {
dev_err(&intf->dev,
"unsupported major control-protocol version (%u > %u)\n",
response.major, request.major);
return -ENOTSUPP;
}
control->protocol_major = response.major;
control->protocol_minor = response.minor;
dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
response.minor);
return 0;
}
static int gb_control_get_bundle_version(struct gb_control *control,
struct gb_bundle *bundle)
{
struct gb_interface *intf = control->connection->intf;
struct gb_control_bundle_version_request request;
struct gb_control_bundle_version_response response;
int ret;
request.bundle_id = bundle->id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_VERSION,
&request, sizeof(request),
&response, sizeof(response));
if (ret) {
dev_err(&intf->dev,
"failed to get bundle %u class version: %d\n",
bundle->id, ret);
return ret;
}
bundle->class_major = response.major;
bundle->class_minor = response.minor;
dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
response.major, response.minor);
return 0;
}
int gb_control_get_bundle_versions(struct gb_control *control)
{
struct gb_interface *intf = control->connection->intf;
struct gb_bundle *bundle;
int ret;
if (!control->has_bundle_version)
return 0;
list_for_each_entry(bundle, &intf->bundles, links) {
ret = gb_control_get_bundle_version(control, bundle);
if (ret)
return ret;
}
return 0;
}
/* Get Manifest's size from the interface */
int gb_control_get_manifest_size_operation(struct gb_interface *intf)
{
struct gb_control_get_manifest_size_response response;
struct gb_connection *connection = intf->control->connection;
int ret;
ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
NULL, 0, &response, sizeof(response));
if (ret) {
dev_err(&connection->intf->dev,
"failed to get manifest size: %d\n", ret);
return ret;
}
return le16_to_cpu(response.size);
}
/* Reads Manifest from the interface */
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size)
{
struct gb_connection *connection = intf->control->connection;
return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
NULL, 0, manifest, size);
}
int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
{
struct gb_control_connected_request request;
request.cport_id = cpu_to_le16(cport_id);
return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
&request, sizeof(request), NULL, 0);
}
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
{
struct gb_control_disconnected_request request;
request.cport_id = cpu_to_le16(cport_id);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_DISCONNECTED, &request,
sizeof(request), NULL, 0);
}
int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id)
{
struct gb_control_disconnecting_request *request;
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(control->connection,
GB_CONTROL_TYPE_DISCONNECTING,
sizeof(*request), 0, 0,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
request = operation->request->payload;
request->cport_id = cpu_to_le16(cport_id);
ret = gb_operation_request_send_sync(operation);
if (ret) {
dev_err(&control->dev, "failed to send disconnecting: %d\n",
ret);
}
gb_operation_put(operation);
return ret;
}
int gb_control_mode_switch_operation(struct gb_control *control)
{
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(control->connection,
GB_CONTROL_TYPE_MODE_SWITCH,
0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
ret = gb_operation_request_send_sync(operation);
if (ret)
dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
gb_operation_put(operation);
return ret;
}
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk)
{
struct gb_control_timesync_enable_request request;
request.count = count;
request.frame_time = cpu_to_le64(frame_time);
request.strobe_delay = cpu_to_le32(strobe_delay);
request.refclk = cpu_to_le32(refclk);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
sizeof(request), NULL, 0);
}
int gb_control_timesync_disable(struct gb_control *control)
{
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
NULL, 0);
}
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_get_last_event_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
NULL, 0, &response, sizeof(response));
if (!ret)
*frame_time = le64_to_cpu(response.frame_time);
return ret;
}
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_authoritative_request request;
int i;
for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
request.frame_time[i] = cpu_to_le64(frame_time[i]);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
&request, sizeof(request),
NULL, 0);
}
static int gb_control_bundle_pm_status_map(u8 status)
{
switch (status) {
case GB_CONTROL_BUNDLE_PM_INVAL:
return -EINVAL;
case GB_CONTROL_BUNDLE_PM_BUSY:
return -EBUSY;
case GB_CONTROL_BUNDLE_PM_NA:
return -ENOMSG;
case GB_CONTROL_BUNDLE_PM_FAIL:
default:
return -EREMOTEIO;
}
}
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
bundle_id, ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
bundle_id, ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to resume bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send bundle %u deactivate: %d\n", bundle_id,
ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
if (!control->has_bundle_activate)
return 0;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send bundle %u activate: %d\n", bundle_id,
ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to activate bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
static int gb_control_interface_pm_status_map(u8 status)
{
switch (status) {
case GB_CONTROL_INTF_PM_BUSY:
return -EBUSY;
case GB_CONTROL_INTF_PM_NA:
return -ENOMSG;
default:
return -EREMOTEIO;
}
}
int gb_control_interface_suspend_prepare(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send interface suspend prepare: %d\n", ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while preparing suspend: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
int gb_control_interface_deactivate_prepare(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
0, &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
int gb_control_interface_hibernate_abort(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send interface aborting hibernate: %d\n",
ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
static ssize_t vendor_string_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_control *control = to_gb_control(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
}
static DEVICE_ATTR_RO(vendor_string);
static ssize_t product_string_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_control *control = to_gb_control(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
}
static DEVICE_ATTR_RO(product_string);
static struct attribute *control_attrs[] = {
&dev_attr_vendor_string.attr,
&dev_attr_product_string.attr,
NULL,
};
ATTRIBUTE_GROUPS(control);
static void gb_control_release(struct device *dev)
{
struct gb_control *control = to_gb_control(dev);
gb_connection_destroy(control->connection);
kfree(control->vendor_string);
kfree(control->product_string);
kfree(control);
}
struct device_type greybus_control_type = {
.name = "greybus_control",
.release = gb_control_release,
};
struct gb_control *gb_control_create(struct gb_interface *intf)
{
struct gb_connection *connection;
struct gb_control *control;
control = kzalloc(sizeof(*control), GFP_KERNEL);
if (!control)
return ERR_PTR(-ENOMEM);
control->intf = intf;
connection = gb_connection_create_control(intf);
if (IS_ERR(connection)) {
dev_err(&intf->dev,
"failed to create control connection: %ld\n",
PTR_ERR(connection));
kfree(control);
return ERR_CAST(connection);
}
control->connection = connection;
control->dev.parent = &intf->dev;
control->dev.bus = &greybus_bus_type;
control->dev.type = &greybus_control_type;
control->dev.groups = control_groups;
control->dev.dma_mask = intf->dev.dma_mask;
device_initialize(&control->dev);
dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
gb_connection_set_data(control->connection, control);
return control;
}
int gb_control_enable(struct gb_control *control)
{
int ret;
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
ret = gb_connection_enable_tx(control->connection);
if (ret) {
dev_err(&control->connection->intf->dev,
"failed to enable control connection: %d\n",
ret);
return ret;
}
ret = gb_control_get_version(control);
if (ret)
goto err_disable_connection;
if (control->protocol_major > 0 || control->protocol_minor > 1)
control->has_bundle_version = true;
/* FIXME: use protocol version instead */
if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
control->has_bundle_activate = true;
return 0;
err_disable_connection:
gb_connection_disable(control->connection);
return ret;
}
void gb_control_disable(struct gb_control *control)
{
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
if (control->intf->disconnected)
gb_connection_disable_forced(control->connection);
else
gb_connection_disable(control->connection);
}
int gb_control_suspend(struct gb_control *control)
{
gb_connection_disable(control->connection);
return 0;
}
int gb_control_resume(struct gb_control *control)
{
int ret;
ret = gb_connection_enable_tx(control->connection);
if (ret) {
dev_err(&control->connection->intf->dev,
"failed to enable control connection: %d\n", ret);
return ret;
}
return 0;
}
int gb_control_add(struct gb_control *control)
{
int ret;
ret = device_add(&control->dev);
if (ret) {
dev_err(&control->dev,
"failed to register control device: %d\n",
ret);
return ret;
}
return 0;
}
void gb_control_del(struct gb_control *control)
{
if (device_is_registered(&control->dev))
device_del(&control->dev);
}
struct gb_control *gb_control_get(struct gb_control *control)
{
get_device(&control->dev);
return control;
}
void gb_control_put(struct gb_control *control)
{
put_device(&control->dev);
}
void gb_control_mode_switch_prepare(struct gb_control *control)
{
gb_connection_mode_switch_prepare(control->connection);
}
void gb_control_mode_switch_complete(struct gb_control *control)
{
gb_connection_mode_switch_complete(control->connection);
}

View File

@ -0,0 +1,65 @@
/*
* Greybus CPort control protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __CONTROL_H
#define __CONTROL_H
struct gb_control {
struct device dev;
struct gb_interface *intf;
struct gb_connection *connection;
u8 protocol_major;
u8 protocol_minor;
bool has_bundle_activate;
bool has_bundle_version;
char *vendor_string;
char *product_string;
};
#define to_gb_control(d) container_of(d, struct gb_control, dev)
struct gb_control *gb_control_create(struct gb_interface *intf);
int gb_control_enable(struct gb_control *control);
void gb_control_disable(struct gb_control *control);
int gb_control_suspend(struct gb_control *control);
int gb_control_resume(struct gb_control *control);
int gb_control_add(struct gb_control *control);
void gb_control_del(struct gb_control *control);
struct gb_control *gb_control_get(struct gb_control *control);
void gb_control_put(struct gb_control *control);
int gb_control_get_bundle_versions(struct gb_control *control);
int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id);
int gb_control_mode_switch_operation(struct gb_control *control);
void gb_control_mode_switch_prepare(struct gb_control *control);
void gb_control_mode_switch_complete(struct gb_control *control);
int gb_control_get_manifest_size_operation(struct gb_interface *intf);
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size);
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int gb_control_timesync_disable(struct gb_control *control);
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time);
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time);
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
int gb_control_interface_suspend_prepare(struct gb_control *control);
int gb_control_interface_deactivate_prepare(struct gb_control *control);
int gb_control_interface_hibernate_abort(struct gb_control *control);
#endif /* __CONTROL_H */

View File

@ -0,0 +1,361 @@
/*
* Greybus "Core"
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define CREATE_TRACE_POINTS
#include "greybus.h"
#include "greybus_trace.h"
#define GB_BUNDLE_AUTOSUSPEND_MS 3000
/* Allow greybus to be disabled at boot if needed */
static bool nogreybus;
#ifdef MODULE
module_param(nogreybus, bool, 0444);
#else
core_param(nogreybus, nogreybus, bool, 0444);
#endif
int greybus_disabled(void)
{
return nogreybus;
}
EXPORT_SYMBOL_GPL(greybus_disabled);
static bool greybus_match_one_id(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
(id->vendor != bundle->intf->vendor_id))
return false;
if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
(id->product != bundle->intf->product_id))
return false;
if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
(id->class != bundle->class))
return false;
return true;
}
static const struct greybus_bundle_id *
greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
{
if (id == NULL)
return NULL;
for (; id->vendor || id->product || id->class || id->driver_info;
id++) {
if (greybus_match_one_id(bundle, id))
return id;
}
return NULL;
}
static int greybus_match_device(struct device *dev, struct device_driver *drv)
{
struct greybus_driver *driver = to_greybus_driver(drv);
struct gb_bundle *bundle;
const struct greybus_bundle_id *id;
if (!is_gb_bundle(dev))
return 0;
bundle = to_gb_bundle(dev);
id = greybus_match_id(bundle, driver->id_table);
if (id)
return 1;
/* FIXME - Dynamic ids? */
return 0;
}
static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct gb_host_device *hd;
struct gb_module *module = NULL;
struct gb_interface *intf = NULL;
struct gb_control *control = NULL;
struct gb_bundle *bundle = NULL;
struct gb_svc *svc = NULL;
if (is_gb_host_device(dev)) {
hd = to_gb_host_device(dev);
} else if (is_gb_module(dev)) {
module = to_gb_module(dev);
hd = module->hd;
} else if (is_gb_interface(dev)) {
intf = to_gb_interface(dev);
module = intf->module;
hd = intf->hd;
} else if (is_gb_control(dev)) {
control = to_gb_control(dev);
intf = control->intf;
module = intf->module;
hd = intf->hd;
} else if (is_gb_bundle(dev)) {
bundle = to_gb_bundle(dev);
intf = bundle->intf;
module = intf->module;
hd = intf->hd;
} else if (is_gb_svc(dev)) {
svc = to_gb_svc(dev);
hd = svc->hd;
} else {
dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
return -EINVAL;
}
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
return -ENOMEM;
if (module) {
if (add_uevent_var(env, "MODULE=%u", module->module_id))
return -ENOMEM;
}
if (intf) {
if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
return -ENOMEM;
if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
intf->vendor_id, intf->product_id))
return -ENOMEM;
}
if (bundle) {
// FIXME
// add a uevent that can "load" a bundle type
// This is what we need to bind a driver to so use the info
// in gmod here as well
if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
return -ENOMEM;
}
return 0;
}
static void greybus_shutdown(struct device *dev)
{
if (is_gb_host_device(dev)) {
struct gb_host_device *hd;
hd = to_gb_host_device(dev);
gb_hd_shutdown(hd);
}
}
struct bus_type greybus_bus_type = {
.name = "greybus",
.match = greybus_match_device,
.uevent = greybus_uevent,
.shutdown = greybus_shutdown,
};
static int greybus_probe(struct device *dev)
{
struct greybus_driver *driver = to_greybus_driver(dev->driver);
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct greybus_bundle_id *id;
int retval;
/* match id */
id = greybus_match_id(bundle, driver->id_table);
if (!id)
return -ENODEV;
retval = pm_runtime_get_sync(&bundle->intf->dev);
if (retval < 0) {
pm_runtime_put_noidle(&bundle->intf->dev);
return retval;
}
retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
if (retval) {
pm_runtime_put(&bundle->intf->dev);
return retval;
}
/*
* Unbound bundle devices are always deactivated. During probe, the
* Runtime PM is set to enabled and active and the usage count is
* incremented. If the driver supports runtime PM, it should call
* pm_runtime_put() in its probe routine and pm_runtime_get_sync()
* in remove routine.
*/
pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
retval = driver->probe(bundle, id);
if (retval) {
/*
* Catch buggy drivers that fail to destroy their connections.
*/
WARN_ON(!list_empty(&bundle->connections));
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put(&bundle->intf->dev);
return retval;
}
gb_timesync_schedule_synchronous(bundle->intf);
pm_runtime_put(&bundle->intf->dev);
return 0;
}
static int greybus_remove(struct device *dev)
{
struct greybus_driver *driver = to_greybus_driver(dev->driver);
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gb_connection *connection;
int retval;
retval = pm_runtime_get_sync(dev);
if (retval < 0)
dev_err(dev, "failed to resume bundle: %d\n", retval);
/*
* Disable (non-offloaded) connections early in case the interface is
* already gone to avoid unceccessary operation timeouts during
* driver disconnect. Otherwise, only disable incoming requests.
*/
list_for_each_entry(connection, &bundle->connections, bundle_links) {
if (gb_connection_is_offloaded(connection))
continue;
if (bundle->intf->disconnected)
gb_connection_disable_forced(connection);
else
gb_connection_disable_rx(connection);
}
driver->disconnect(bundle);
/* Catch buggy drivers that fail to destroy their connections. */
WARN_ON(!list_empty(&bundle->connections));
if (!bundle->intf->disconnected)
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_noidle(dev);
return 0;
}
int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
const char *mod_name)
{
int retval;
if (greybus_disabled())
return -ENODEV;
driver->driver.bus = &greybus_bus_type;
driver->driver.name = driver->name;
driver->driver.probe = greybus_probe;
driver->driver.remove = greybus_remove;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (retval)
return retval;
pr_info("registered new driver %s\n", driver->name);
return 0;
}
EXPORT_SYMBOL_GPL(greybus_register_driver);
void greybus_deregister_driver(struct greybus_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(greybus_deregister_driver);
static int __init gb_init(void)
{
int retval;
if (greybus_disabled())
return -ENODEV;
BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
gb_debugfs_init();
retval = bus_register(&greybus_bus_type);
if (retval) {
pr_err("bus_register failed (%d)\n", retval);
goto error_bus;
}
retval = gb_hd_init();
if (retval) {
pr_err("gb_hd_init failed (%d)\n", retval);
goto error_hd;
}
retval = gb_operation_init();
if (retval) {
pr_err("gb_operation_init failed (%d)\n", retval);
goto error_operation;
}
retval = gb_timesync_init();
if (retval) {
pr_err("gb_timesync_init failed\n");
goto error_timesync;
}
return 0; /* Success */
error_timesync:
gb_operation_exit();
error_operation:
gb_hd_exit();
error_hd:
bus_unregister(&greybus_bus_type);
error_bus:
gb_debugfs_cleanup();
return retval;
}
module_init(gb_init);
static void __exit gb_exit(void)
{
gb_timesync_exit();
gb_operation_exit();
gb_hd_exit();
bus_unregister(&greybus_bus_type);
gb_debugfs_cleanup();
tracepoint_synchronize_unregister();
}
module_exit(gb_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");

View File

@ -0,0 +1,31 @@
/*
* Greybus debugfs code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
#include "greybus.h"
static struct dentry *gb_debug_root;
void __init gb_debugfs_init(void)
{
gb_debug_root = debugfs_create_dir("greybus", NULL);
}
void gb_debugfs_cleanup(void)
{
debugfs_remove_recursive(gb_debug_root);
gb_debug_root = NULL;
}
struct dentry *gb_debugfs_get(void)
{
return gb_debug_root;
}
EXPORT_SYMBOL_GPL(gb_debugfs_get);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
/*
* Greybus Firmware Management Header
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __FIRMWARE_H
#define __FIRMWARE_H
#include "greybus.h"
#define FW_NAME_PREFIX "gmp_"
/*
* Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
* (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
*/
#define FW_NAME_SIZE 56
/* Firmware Management Protocol specific functions */
int fw_mgmt_init(void);
void fw_mgmt_exit(void);
struct gb_connection *to_fw_mgmt_connection(struct device *dev);
int gb_fw_mgmt_request_handler(struct gb_operation *op);
int gb_fw_mgmt_connection_init(struct gb_connection *connection);
void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
/* Firmware Download Protocol specific functions */
int gb_fw_download_request_handler(struct gb_operation *op);
int gb_fw_download_connection_init(struct gb_connection *connection);
void gb_fw_download_connection_exit(struct gb_connection *connection);
/* CAP Protocol specific functions */
int cap_init(void);
void cap_exit(void);
int gb_cap_connection_init(struct gb_connection *connection);
void gb_cap_connection_exit(struct gb_connection *connection);
#endif /* __FIRMWARE_H */

View File

@ -0,0 +1,312 @@
/*
* Greybus Firmware Core Bundle Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
#include "firmware.h"
#include "greybus.h"
#include "spilib.h"
struct gb_fw_core {
struct gb_connection *download_connection;
struct gb_connection *mgmt_connection;
struct gb_connection *spi_connection;
struct gb_connection *cap_connection;
};
static struct spilib_ops *spilib_ops;
struct gb_connection *to_fw_mgmt_connection(struct device *dev)
{
struct gb_fw_core *fw_core = dev_get_drvdata(dev);
return fw_core->mgmt_connection;
}
static int gb_fw_spi_connection_init(struct gb_connection *connection)
{
int ret;
if (!connection)
return 0;
ret = gb_connection_enable(connection);
if (ret)
return ret;
ret = gb_spilib_master_init(connection, &connection->bundle->dev,
spilib_ops);
if (ret) {
gb_connection_disable(connection);
return ret;
}
return 0;
}
static void gb_fw_spi_connection_exit(struct gb_connection *connection)
{
if (!connection)
return;
gb_spilib_master_exit(connection);
gb_connection_disable(connection);
}
static int gb_fw_core_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_fw_core *fw_core;
int ret, i;
u16 cport_id;
u8 protocol_id;
fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
if (!fw_core)
return -ENOMEM;
/* Parse CPorts and create connections */
for (i = 0; i < bundle->num_cports; i++) {
cport_desc = &bundle->cport_desc[i];
cport_id = le16_to_cpu(cport_desc->id);
protocol_id = cport_desc->protocol_id;
switch (protocol_id) {
case GREYBUS_PROTOCOL_FW_MANAGEMENT:
/* Disallow multiple Firmware Management CPorts */
if (fw_core->mgmt_connection) {
dev_err(&bundle->dev,
"multiple management CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
gb_fw_mgmt_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
dev_err(&bundle->dev,
"failed to create management connection (%d)\n",
ret);
goto err_destroy_connections;
}
fw_core->mgmt_connection = connection;
break;
case GREYBUS_PROTOCOL_FW_DOWNLOAD:
/* Disallow multiple Firmware Download CPorts */
if (fw_core->download_connection) {
dev_err(&bundle->dev,
"multiple download CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
gb_fw_download_request_handler);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->download_connection = connection;
}
break;
case GREYBUS_PROTOCOL_SPI:
/* Disallow multiple SPI CPorts */
if (fw_core->spi_connection) {
dev_err(&bundle->dev,
"multiple SPI CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
NULL);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->spi_connection = connection;
}
break;
case GREYBUS_PROTOCOL_AUTHENTICATION:
/* Disallow multiple CAP CPorts */
if (fw_core->cap_connection) {
dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
NULL);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->cap_connection = connection;
}
break;
default:
dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
protocol_id);
ret = -EINVAL;
goto err_destroy_connections;
}
}
/* Firmware Management connection is mandatory */
if (!fw_core->mgmt_connection) {
dev_err(&bundle->dev, "missing management connection\n");
ret = -ENODEV;
goto err_destroy_connections;
}
ret = gb_fw_download_connection_init(fw_core->download_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->download_connection);
fw_core->download_connection = NULL;
}
ret = gb_fw_spi_connection_init(fw_core->spi_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->spi_connection);
fw_core->spi_connection = NULL;
}
ret = gb_cap_connection_init(fw_core->cap_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->cap_connection);
fw_core->cap_connection = NULL;
}
ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
ret);
goto err_exit_connections;
}
greybus_set_drvdata(bundle, fw_core);
/* FIXME: Remove this after S2 Loader gets runtime PM support */
if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
gb_pm_runtime_put_autosuspend(bundle);
return 0;
err_exit_connections:
gb_cap_connection_exit(fw_core->cap_connection);
gb_fw_spi_connection_exit(fw_core->spi_connection);
gb_fw_download_connection_exit(fw_core->download_connection);
err_destroy_connections:
gb_connection_destroy(fw_core->mgmt_connection);
gb_connection_destroy(fw_core->cap_connection);
gb_connection_destroy(fw_core->spi_connection);
gb_connection_destroy(fw_core->download_connection);
kfree(fw_core);
return ret;
}
static void gb_fw_core_disconnect(struct gb_bundle *bundle)
{
struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
int ret;
/* FIXME: Remove this after S2 Loader gets runtime PM support */
if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
ret = gb_pm_runtime_get_sync(bundle);
if (ret)
gb_pm_runtime_get_noresume(bundle);
}
gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
gb_cap_connection_exit(fw_core->cap_connection);
gb_fw_spi_connection_exit(fw_core->spi_connection);
gb_fw_download_connection_exit(fw_core->download_connection);
gb_connection_destroy(fw_core->mgmt_connection);
gb_connection_destroy(fw_core->cap_connection);
gb_connection_destroy(fw_core->spi_connection);
gb_connection_destroy(fw_core->download_connection);
kfree(fw_core);
}
static const struct greybus_bundle_id gb_fw_core_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
{ }
};
static struct greybus_driver gb_fw_core_driver = {
.name = "gb-firmware",
.probe = gb_fw_core_probe,
.disconnect = gb_fw_core_disconnect,
.id_table = gb_fw_core_id_table,
};
static int fw_core_init(void)
{
int ret;
ret = fw_mgmt_init();
if (ret) {
pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
return ret;
}
ret = cap_init();
if (ret) {
pr_err("Failed to initialize component authentication core (%d)\n",
ret);
goto fw_mgmt_exit;
}
ret = greybus_register(&gb_fw_core_driver);
if (ret)
goto cap_exit;
return 0;
cap_exit:
cap_exit();
fw_mgmt_exit:
fw_mgmt_exit();
return ret;
}
module_init(fw_core_init);
static void __exit fw_core_exit(void)
{
greybus_deregister(&gb_fw_core_driver);
cap_exit();
fw_mgmt_exit();
}
module_exit(fw_core_exit);
MODULE_ALIAS("greybus:firmware");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,465 @@
/*
* Greybus Firmware Download Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "firmware.h"
#include "greybus.h"
/* Estimated minimum buffer size, actual size can be smaller than this */
#define MIN_FETCH_SIZE 512
/* Timeout, in jiffies, within which fetch or release firmware must be called */
#define NEXT_REQ_TIMEOUT_J msecs_to_jiffies(1000)
struct fw_request {
u8 firmware_id;
bool disabled;
bool timedout;
char name[FW_NAME_SIZE];
const struct firmware *fw;
struct list_head node;
struct delayed_work dwork;
/* Timeout, in jiffies, within which the firmware shall download */
unsigned long release_timeout_j;
struct kref kref;
struct fw_download *fw_download;
};
struct fw_download {
struct device *parent;
struct gb_connection *connection;
struct list_head fw_requests;
struct ida id_map;
struct mutex mutex;
};
static void fw_req_release(struct kref *kref)
{
struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
fw_req->name);
release_firmware(fw_req->fw);
/*
* The request timed out and the module may send a fetch-fw or
* release-fw request later. Lets block the id we allocated for this
* request, so that the AP doesn't refer to a later fw-request (with
* same firmware_id) for the old timedout fw-request.
*
* NOTE:
*
* This also means that after 255 timeouts we will fail to service new
* firmware downloads. But what else can we do in that case anyway? Lets
* just hope that it never happens.
*/
if (!fw_req->timedout)
ida_simple_remove(&fw_req->fw_download->id_map,
fw_req->firmware_id);
kfree(fw_req);
}
/*
* Incoming requests are serialized for a connection, and the only race possible
* is between the timeout handler freeing this and an incoming request.
*
* The operations on the fw-request list are protected by the mutex and
* get_fw_req() increments the reference count before returning a fw_req pointer
* to the users.
*
* free_firmware() also takes the mutex while removing an entry from the list,
* it guarantees that every user of fw_req has taken a kref-reference by now and
* we wouldn't have any new users.
*
* Once the last user drops the reference, the fw_req structure is freed.
*/
static void put_fw_req(struct fw_request *fw_req)
{
kref_put(&fw_req->kref, fw_req_release);
}
/* Caller must call put_fw_req() after using struct fw_request */
static struct fw_request *get_fw_req(struct fw_download *fw_download,
u8 firmware_id)
{
struct fw_request *fw_req;
mutex_lock(&fw_download->mutex);
list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
if (fw_req->firmware_id == firmware_id) {
kref_get(&fw_req->kref);
goto unlock;
}
}
fw_req = NULL;
unlock:
mutex_unlock(&fw_download->mutex);
return fw_req;
}
static void free_firmware(struct fw_download *fw_download,
struct fw_request *fw_req)
{
/* Already disabled from timeout handlers */
if (fw_req->disabled)
return;
mutex_lock(&fw_download->mutex);
list_del(&fw_req->node);
mutex_unlock(&fw_download->mutex);
fw_req->disabled = true;
put_fw_req(fw_req);
}
static void fw_request_timedout(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fw_request *fw_req = container_of(dwork, struct fw_request, dwork);
struct fw_download *fw_download = fw_req->fw_download;
dev_err(fw_download->parent,
"Timed out waiting for fetch / release firmware requests: %u\n",
fw_req->firmware_id);
fw_req->timedout = true;
free_firmware(fw_download, fw_req);
}
static int exceeds_release_timeout(struct fw_request *fw_req)
{
struct fw_download *fw_download = fw_req->fw_download;
if (time_before(jiffies, fw_req->release_timeout_j))
return 0;
dev_err(fw_download->parent,
"Firmware download didn't finish in time, abort: %d\n",
fw_req->firmware_id);
fw_req->timedout = true;
free_firmware(fw_download, fw_req);
return -ETIMEDOUT;
}
/* This returns path of the firmware blob on the disk */
static struct fw_request *find_firmware(struct fw_download *fw_download,
const char *tag)
{
struct gb_interface *intf = fw_download->connection->bundle->intf;
struct fw_request *fw_req;
int ret, req_count;
fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
if (!fw_req)
return ERR_PTR(-ENOMEM);
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_download->parent,
"failed to allocate firmware id (%d)\n", ret);
goto err_free_req;
}
fw_req->firmware_id = ret;
snprintf(fw_req->name, sizeof(fw_req->name),
FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
intf->vendor_id, intf->product_id, tag);
dev_info(fw_download->parent, "Requested firmware package '%s'\n",
fw_req->name);
ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
if (ret) {
dev_err(fw_download->parent,
"firmware request failed for %s (%d)\n", fw_req->name,
ret);
goto err_free_id;
}
fw_req->fw_download = fw_download;
kref_init(&fw_req->kref);
mutex_lock(&fw_download->mutex);
list_add(&fw_req->node, &fw_download->fw_requests);
mutex_unlock(&fw_download->mutex);
/* Timeout, in jiffies, within which firmware should get loaded */
req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
return fw_req;
err_free_id:
ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
err_free_req:
kfree(fw_req);
return ERR_PTR(ret);
}
static int fw_download_find_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_find_firmware_request *request;
struct gb_fw_download_find_firmware_response *response;
struct fw_request *fw_req;
const char *tag;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"illegal size of find firmware request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
tag = (const char *)request->firmware_tag;
/* firmware_tag must be null-terminated */
if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) == GB_FIRMWARE_TAG_MAX_SIZE) {
dev_err(fw_download->parent,
"firmware-tag is not null-terminated\n");
return -EINVAL;
}
fw_req = find_firmware(fw_download, tag);
if (IS_ERR(fw_req))
return PTR_ERR(fw_req);
if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
dev_err(fw_download->parent, "error allocating response\n");
free_firmware(fw_download, fw_req);
return -ENOMEM;
}
response = op->response->payload;
response->firmware_id = fw_req->firmware_id;
response->size = cpu_to_le32(fw_req->fw->size);
dev_dbg(fw_download->parent,
"firmware size is %zu bytes\n", fw_req->fw->size);
return 0;
}
static int fw_download_fetch_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_fetch_firmware_request *request;
struct gb_fw_download_fetch_firmware_response *response;
struct fw_request *fw_req;
const struct firmware *fw;
unsigned int offset, size;
u8 firmware_id;
int ret = 0;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"Illegal size of fetch firmware request (%zu %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
offset = le32_to_cpu(request->offset);
size = le32_to_cpu(request->size);
firmware_id = request->firmware_id;
fw_req = get_fw_req(fw_download, firmware_id);
if (!fw_req) {
dev_err(fw_download->parent,
"firmware not available for id: %02u\n", firmware_id);
return -EINVAL;
}
/* Make sure work handler isn't running in parallel */
cancel_delayed_work_sync(&fw_req->dwork);
/* We timed-out before reaching here ? */
if (fw_req->disabled) {
ret = -ETIMEDOUT;
goto put_fw;
}
/*
* Firmware download must finish within a limited time interval. If it
* doesn't, then we might have a buggy Module on the other side. Abort
* download.
*/
ret = exceeds_release_timeout(fw_req);
if (ret)
goto put_fw;
fw = fw_req->fw;
if (offset >= fw->size || size > fw->size - offset) {
dev_err(fw_download->parent,
"bad fetch firmware request (offs = %u, size = %u)\n",
offset, size);
ret = -EINVAL;
goto put_fw;
}
if (!gb_operation_response_alloc(op, sizeof(*response) + size,
GFP_KERNEL)) {
dev_err(fw_download->parent,
"error allocating fetch firmware response\n");
ret = -ENOMEM;
goto put_fw;
}
response = op->response->payload;
memcpy(response->data, fw->data + offset, size);
dev_dbg(fw_download->parent,
"responding with firmware (offs = %u, size = %u)\n", offset,
size);
/* Refresh timeout */
schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
put_fw:
put_fw_req(fw_req);
return ret;
}
static int fw_download_release_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_release_firmware_request *request;
struct fw_request *fw_req;
u8 firmware_id;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"Illegal size of release firmware request (%zu %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
firmware_id = request->firmware_id;
fw_req = get_fw_req(fw_download, firmware_id);
if (!fw_req) {
dev_err(fw_download->parent,
"firmware not available for id: %02u\n", firmware_id);
return -EINVAL;
}
cancel_delayed_work_sync(&fw_req->dwork);
free_firmware(fw_download, fw_req);
put_fw_req(fw_req);
dev_dbg(fw_download->parent, "release firmware\n");
return 0;
}
int gb_fw_download_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
return fw_download_find_firmware(op);
case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
return fw_download_fetch_firmware(op);
case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
return fw_download_release_firmware(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
int gb_fw_download_connection_init(struct gb_connection *connection)
{
struct fw_download *fw_download;
int ret;
if (!connection)
return 0;
fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
if (!fw_download)
return -ENOMEM;
fw_download->parent = &connection->bundle->dev;
INIT_LIST_HEAD(&fw_download->fw_requests);
ida_init(&fw_download->id_map);
gb_connection_set_data(connection, fw_download);
fw_download->connection = connection;
mutex_init(&fw_download->mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_destroy_id_map;
return 0;
err_destroy_id_map:
ida_destroy(&fw_download->id_map);
kfree(fw_download);
return ret;
}
void gb_fw_download_connection_exit(struct gb_connection *connection)
{
struct fw_download *fw_download;
struct fw_request *fw_req, *tmp;
if (!connection)
return;
fw_download = gb_connection_get_data(connection);
gb_connection_disable(fw_download->connection);
/*
* Make sure we have a reference to the pending requests, before they
* are freed from the timeout handler.
*/
mutex_lock(&fw_download->mutex);
list_for_each_entry(fw_req, &fw_download->fw_requests, node)
kref_get(&fw_req->kref);
mutex_unlock(&fw_download->mutex);
/* Release pending firmware packages */
list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
cancel_delayed_work_sync(&fw_req->dwork);
free_firmware(fw_download, fw_req);
put_fw_req(fw_req);
}
ida_destroy(&fw_download->id_map);
kfree(fw_download);
}

View File

@ -0,0 +1,721 @@
/*
* Greybus Firmware Management Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include "firmware.h"
#include "greybus_firmware.h"
#include "greybus.h"
#define FW_MGMT_TIMEOUT_MS 1000
struct fw_mgmt {
struct device *parent;
struct gb_connection *connection;
struct kref kref;
struct list_head node;
/* Common id-map for interface and backend firmware requests */
struct ida id_map;
struct mutex mutex;
struct completion completion;
struct cdev cdev;
struct device *class_device;
dev_t dev_num;
unsigned int timeout_jiffies;
bool disabled; /* connection getting disabled */
/* Interface Firmware specific fields */
bool mode_switch_started;
bool intf_fw_loaded;
u8 intf_fw_request_id;
u8 intf_fw_status;
u16 intf_fw_major;
u16 intf_fw_minor;
/* Backend Firmware specific fields */
u8 backend_fw_request_id;
u8 backend_fw_status;
};
/*
* Number of minor devices this driver supports.
* There will be exactly one required per Interface.
*/
#define NUM_MINORS U8_MAX
static struct class *fw_mgmt_class;
static dev_t fw_mgmt_dev_num;
static DEFINE_IDA(fw_mgmt_minors_map);
static LIST_HEAD(fw_mgmt_list);
static DEFINE_MUTEX(list_mutex);
static void fw_mgmt_kref_release(struct kref *kref)
{
struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
ida_destroy(&fw_mgmt->id_map);
kfree(fw_mgmt);
}
/*
* All users of fw_mgmt take a reference (from within list_mutex lock), before
* they get a pointer to play with. And the structure will be freed only after
* the last user has put the reference to it.
*/
static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
{
kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
}
/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
{
struct fw_mgmt *fw_mgmt;
mutex_lock(&list_mutex);
list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
if (&fw_mgmt->cdev == cdev) {
kref_get(&fw_mgmt->kref);
goto unlock;
}
}
fw_mgmt = NULL;
unlock:
mutex_unlock(&list_mutex);
return fw_mgmt;
}
static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
struct fw_mgmt_ioc_get_intf_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_interface_fw_version_response response;
int ret;
ret = gb_operation_sync(connection,
GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(fw_mgmt->parent,
"failed to get interface firmware version (%d)\n", ret);
return ret;
}
fw_info->major = le16_to_cpu(response.major);
fw_info->minor = le16_to_cpu(response.minor);
strncpy(fw_info->firmware_tag, response.firmware_tag,
GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error but
* don't fail.
*/
if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent,
"fw-version: firmware-tag is not NULL terminated\n");
fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
}
return 0;
}
static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
u8 load_method, const char *tag)
{
struct gb_fw_mgmt_load_and_validate_fw_request request;
int ret;
if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
load_method != GB_FW_LOAD_METHOD_INTERNAL) {
dev_err(fw_mgmt->parent,
"invalid load-method (%d)\n", load_method);
return -EINVAL;
}
request.load_method = load_method;
strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
return ret;
}
fw_mgmt->intf_fw_request_id = ret;
fw_mgmt->intf_fw_loaded = false;
request.request_id = ret;
ret = gb_operation_sync(fw_mgmt->connection,
GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
sizeof(request), NULL, 0);
if (ret) {
ida_simple_remove(&fw_mgmt->id_map,
fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"load and validate firmware request failed (%d)\n",
ret);
return ret;
}
return 0;
}
static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
struct gb_fw_mgmt_loaded_fw_request *request;
/* No pending load and validate request ? */
if (!fw_mgmt->intf_fw_request_id) {
dev_err(fw_mgmt->parent,
"unexpected firmware loaded request received\n");
return -ENODEV;
}
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
/* Invalid request-id ? */
if (request->request_id != fw_mgmt->intf_fw_request_id) {
dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
fw_mgmt->intf_fw_request_id, request->request_id);
return -ENODEV;
}
ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
fw_mgmt->intf_fw_status = request->status;
fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
dev_err(fw_mgmt->parent,
"failed to load interface firmware, status:%02x\n",
fw_mgmt->intf_fw_status);
else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
dev_err(fw_mgmt->parent,
"failed to validate interface firmware, status:%02x\n",
fw_mgmt->intf_fw_status);
else
fw_mgmt->intf_fw_loaded = true;
complete(&fw_mgmt->completion);
return 0;
}
static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
struct fw_mgmt_ioc_get_backend_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_backend_fw_version_request request;
struct gb_fw_mgmt_backend_fw_version_response response;
int ret;
strncpy(request.firmware_tag, fw_info->firmware_tag,
GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
ret = gb_operation_sync(connection,
GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
fw_info->firmware_tag, ret);
return ret;
}
fw_info->status = response.status;
/* Reset version as that should be non-zero only for success case */
fw_info->major = 0;
fw_info->minor = 0;
switch (fw_info->status) {
case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
fw_info->major = le16_to_cpu(response.major);
fw_info->minor = le16_to_cpu(response.minor);
break;
case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
case GB_FW_BACKEND_VERSION_STATUS_RETRY:
break;
case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
dev_err(fw_mgmt->parent,
"Firmware with tag %s is not supported by Interface\n",
fw_info->firmware_tag);
break;
default:
dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
fw_info->status);
}
return 0;
}
static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
char *tag)
{
struct gb_fw_mgmt_backend_fw_update_request request;
int ret;
strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
return ret;
}
fw_mgmt->backend_fw_request_id = ret;
request.request_id = ret;
ret = gb_operation_sync(fw_mgmt->connection,
GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
sizeof(request), NULL, 0);
if (ret) {
ida_simple_remove(&fw_mgmt->id_map,
fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"backend %s firmware update request failed (%d)\n", tag,
ret);
return ret;
}
return 0;
}
static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
struct gb_fw_mgmt_backend_fw_updated_request *request;
/* No pending load and validate request ? */
if (!fw_mgmt->backend_fw_request_id) {
dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
return -ENODEV;
}
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
/* Invalid request-id ? */
if (request->request_id != fw_mgmt->backend_fw_request_id) {
dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
fw_mgmt->backend_fw_request_id, request->request_id);
return -ENODEV;
}
ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
fw_mgmt->backend_fw_status = request->status;
if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
(fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
dev_err(fw_mgmt->parent,
"failed to load backend firmware: %02x\n",
fw_mgmt->backend_fw_status);
complete(&fw_mgmt->completion);
return 0;
}
/* Char device fops */
static int fw_mgmt_open(struct inode *inode, struct file *file)
{
struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
/* fw_mgmt structure can't get freed until file descriptor is closed */
if (fw_mgmt) {
file->private_data = fw_mgmt;
return 0;
}
return -ENODEV;
}
static int fw_mgmt_release(struct inode *inode, struct file *file)
{
struct fw_mgmt *fw_mgmt = file->private_data;
put_fw_mgmt(fw_mgmt);
return 0;
}
static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
void __user *buf)
{
struct fw_mgmt_ioc_get_intf_version intf_fw_info;
struct fw_mgmt_ioc_get_backend_version backend_fw_info;
struct fw_mgmt_ioc_intf_load_and_validate intf_load;
struct fw_mgmt_ioc_backend_fw_update backend_update;
unsigned int timeout;
int ret;
/* Reject any operations after mode-switch has started */
if (fw_mgmt->mode_switch_started)
return -EBUSY;
switch (cmd) {
case FW_MGMT_IOC_GET_INTF_FW:
ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
&intf_fw_info);
if (ret)
return ret;
if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_GET_BACKEND_FW:
if (copy_from_user(&backend_fw_info, buf,
sizeof(backend_fw_info)))
return -EFAULT;
ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
&backend_fw_info);
if (ret)
return ret;
if (copy_to_user(buf, &backend_fw_info,
sizeof(backend_fw_info)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
return -EFAULT;
ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
intf_load.load_method, intf_load.firmware_tag);
if (ret)
return ret;
if (!wait_for_completion_timeout(&fw_mgmt->completion,
fw_mgmt->timeout_jiffies)) {
dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
return -ETIMEDOUT;
}
intf_load.status = fw_mgmt->intf_fw_status;
intf_load.major = fw_mgmt->intf_fw_major;
intf_load.minor = fw_mgmt->intf_fw_minor;
if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
if (copy_from_user(&backend_update, buf,
sizeof(backend_update)))
return -EFAULT;
ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
backend_update.firmware_tag);
if (ret)
return ret;
if (!wait_for_completion_timeout(&fw_mgmt->completion,
fw_mgmt->timeout_jiffies)) {
dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
return -ETIMEDOUT;
}
backend_update.status = fw_mgmt->backend_fw_status;
if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_SET_TIMEOUT_MS:
if (get_user(timeout, (unsigned int __user *)buf))
return -EFAULT;
if (!timeout) {
dev_err(fw_mgmt->parent, "timeout can't be zero\n");
return -EINVAL;
}
fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
return 0;
case FW_MGMT_IOC_MODE_SWITCH:
if (!fw_mgmt->intf_fw_loaded) {
dev_err(fw_mgmt->parent,
"Firmware not loaded for mode-switch\n");
return -EPERM;
}
/*
* Disallow new ioctls as the fw-core bundle driver is going to
* get disconnected soon and the character device will get
* removed.
*/
fw_mgmt->mode_switch_started = true;
ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
if (ret) {
dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
ret);
fw_mgmt->mode_switch_started = false;
return ret;
}
return 0;
default:
return -ENOTTY;
}
}
static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fw_mgmt *fw_mgmt = file->private_data;
struct gb_bundle *bundle = fw_mgmt->connection->bundle;
int ret = -ENODEV;
/*
* Serialize ioctls.
*
* We don't want the user to do few operations in parallel. For example,
* updating Interface firmware in parallel for the same Interface. There
* is no need to do things in parallel for speed and we can avoid having
* complicated code for now.
*
* This is also used to protect ->disabled, which is used to check if
* the connection is getting disconnected, so that we don't start any
* new operations.
*/
mutex_lock(&fw_mgmt->mutex);
if (!fw_mgmt->disabled) {
ret = gb_pm_runtime_get_sync(bundle);
if (!ret) {
ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
gb_pm_runtime_put_autosuspend(bundle);
}
}
mutex_unlock(&fw_mgmt->mutex);
return ret;
}
static const struct file_operations fw_mgmt_fops = {
.owner = THIS_MODULE,
.open = fw_mgmt_open,
.release = fw_mgmt_release,
.unlocked_ioctl = fw_mgmt_ioctl_unlocked,
};
int gb_fw_mgmt_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_FW_MGMT_TYPE_LOADED_FW:
return fw_mgmt_interface_fw_loaded_operation(op);
case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
return fw_mgmt_backend_fw_updated_operation(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
int gb_fw_mgmt_connection_init(struct gb_connection *connection)
{
struct fw_mgmt *fw_mgmt;
int ret, minor;
if (!connection)
return 0;
fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
if (!fw_mgmt)
return -ENOMEM;
fw_mgmt->parent = &connection->bundle->dev;
fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
fw_mgmt->connection = connection;
gb_connection_set_data(connection, fw_mgmt);
init_completion(&fw_mgmt->completion);
ida_init(&fw_mgmt->id_map);
mutex_init(&fw_mgmt->mutex);
kref_init(&fw_mgmt->kref);
mutex_lock(&list_mutex);
list_add(&fw_mgmt->node, &fw_mgmt_list);
mutex_unlock(&list_mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_list_del;
minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
}
/* Add a char device to allow userspace to interact with fw-mgmt */
fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
if (ret)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
fw_mgmt->dev_num, NULL,
"gb-fw-mgmt-%d", minor);
if (IS_ERR(fw_mgmt->class_device)) {
ret = PTR_ERR(fw_mgmt->class_device);
goto err_del_cdev;
}
return 0;
err_del_cdev:
cdev_del(&fw_mgmt->cdev);
err_remove_ida:
ida_simple_remove(&fw_mgmt_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
mutex_lock(&list_mutex);
list_del(&fw_mgmt->node);
mutex_unlock(&list_mutex);
put_fw_mgmt(fw_mgmt);
return ret;
}
void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
{
struct fw_mgmt *fw_mgmt;
if (!connection)
return;
fw_mgmt = gb_connection_get_data(connection);
device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
cdev_del(&fw_mgmt->cdev);
ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
* existing ones to finish.
*/
mutex_lock(&fw_mgmt->mutex);
fw_mgmt->disabled = true;
mutex_unlock(&fw_mgmt->mutex);
/* All pending greybus operations should have finished by now */
gb_connection_disable(fw_mgmt->connection);
/* Disallow new users to get access to the fw_mgmt structure */
mutex_lock(&list_mutex);
list_del(&fw_mgmt->node);
mutex_unlock(&list_mutex);
/*
* All current users of fw_mgmt would have taken a reference to it by
* now, we can drop our reference and wait the last user will get
* fw_mgmt freed.
*/
put_fw_mgmt(fw_mgmt);
}
int fw_mgmt_init(void)
{
int ret;
fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
if (IS_ERR(fw_mgmt_class))
return PTR_ERR(fw_mgmt_class);
ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
"gb_fw_mgmt");
if (ret)
goto err_remove_class;
return 0;
err_remove_class:
class_destroy(fw_mgmt_class);
return ret;
}
void fw_mgmt_exit(void)
{
unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
class_destroy(fw_mgmt_class);
ida_destroy(&fw_mgmt_minors_map);
}

View File

@ -0,0 +1,127 @@
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef __GB_CAMERA_H
#define __GB_CAMERA_H
#include <linux/v4l2-mediabus.h>
/* Input flags need to be set from the caller */
#define GB_CAMERA_IN_FLAG_TEST (1 << 0)
/* Output flags returned */
#define GB_CAMERA_OUT_FLAG_ADJUSTED (1 << 0)
/**
* struct gb_camera_stream - Represents greybus camera stream.
* @width: Stream width in pixels.
* @height: Stream height in pixels.
* @pixel_code: Media bus pixel code.
* @vc: MIPI CSI virtual channel.
* @dt: MIPI CSI data types. Most formats use a single data type, in which case
* the second element will be ignored.
* @max_size: Maximum size of a frame in bytes. The camera module guarantees
* that all data between the Frame Start and Frame End packet for
* the associated virtual channel and data type(s) will not exceed
* this size.
*/
struct gb_camera_stream {
unsigned int width;
unsigned int height;
enum v4l2_mbus_pixelcode pixel_code;
unsigned int vc;
unsigned int dt[2];
unsigned int max_size;
};
/**
* struct gb_camera_csi_params - CSI configuration parameters
* @num_lanes: number of CSI data lanes
* @clk_freq: CSI clock frequency in Hz
*/
struct gb_camera_csi_params {
unsigned int num_lanes;
unsigned int clk_freq;
};
/**
* struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
* driver to expose operations to the host camera driver.
* @capabilities: Retrieve camera capabilities and store them in the buffer
* 'buf' capabilities. The buffer maximum size is specified by
* the caller in the 'size' parameter, and the effective
* capabilities size is returned from the function. If the buffer
* size is too small to hold the capabilities an error is
* returned and the buffer is left untouched.
*
* @configure_streams: Negotiate configuration and prepare the module for video
* capture. The caller specifies the number of streams it
* requests in the 'nstreams' argument and the associated
* streams configurations in the 'streams' argument. The
* GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
* configuration without applying it, otherwise the
* configuration is applied by the module. The module can
* decide to modify the requested configuration, including
* using a different number of streams. In that case the
* modified configuration won't be applied, the
* GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
* return, and the modified configuration and number of
* streams stored in 'streams' and 'array'. The module
* returns its CSI-2 bus parameters in the 'csi_params'
* structure in all cases.
*
* @capture: Submit a capture request. The supplied 'request_id' must be unique
* and higher than the IDs of all the previously submitted requests.
* The 'streams' argument specifies which streams are affected by the
* request in the form of a bitmask, with bits corresponding to the
* configured streams indexes. If the request contains settings, the
* 'settings' argument points to the settings buffer and its size is
* specified by the 'settings_size' argument. Otherwise the 'settings'
* argument should be set to NULL and 'settings_size' to 0.
*
* @flush: Flush the capture requests queue. Return the ID of the last request
* that will processed by the device before it stops transmitting video
* frames. All queued capture requests with IDs higher than the returned
* ID will be dropped without being processed.
*/
struct gb_camera_ops {
ssize_t (*capabilities)(void *priv, char *buf, size_t len);
int (*configure_streams)(void *priv, unsigned int *nstreams,
unsigned int *flags, struct gb_camera_stream *streams,
struct gb_camera_csi_params *csi_params);
int (*capture)(void *priv, u32 request_id,
unsigned int streams, unsigned int num_frames,
size_t settings_size, const void *settings);
int (*flush)(void *priv, u32 *request_id);
};
/**
* struct gb_camera_module - Represents greybus camera module.
* @priv: Module private data, passed to all camera operations.
* @ops: Greybus camera operation callbacks.
* @interface_id: Interface id of the module.
* @refcount: Reference counting object.
* @release: Module release function.
* @list: List entry in the camera modules list.
*/
struct gb_camera_module {
void *priv;
const struct gb_camera_ops *ops;
unsigned int interface_id;
struct kref refcount;
void (*release)(struct kref *kref);
struct list_head list; /* Global list */
};
#define gb_camera_call(f, op, args...) \
(!(f) ? -ENODEV : (((f)->ops->op) ? \
(f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
int gb_camera_register(struct gb_camera_module *module);
int gb_camera_unregister(struct gb_camera_module *module);
#endif /* __GB_CAMERA_H */

View File

@ -0,0 +1,360 @@
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "greybus.h"
#include "gbphy.h"
#define GB_GBPHY_AUTOSUSPEND_MS 3000
struct gbphy_host {
struct gb_bundle *bundle;
struct list_head devices;
};
static DEFINE_IDA(gbphy_id);
static ssize_t protocol_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
}
static DEVICE_ATTR_RO(protocol_id);
static struct attribute *gbphy_dev_attrs[] = {
&dev_attr_protocol_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(gbphy_dev);
static void gbphy_dev_release(struct device *dev)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
ida_simple_remove(&gbphy_id, gbphy_dev->id);
kfree(gbphy_dev);
}
#ifdef CONFIG_PM
static int gb_gbphy_idle(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
return 0;
}
#endif
static const struct dev_pm_ops gb_gbphy_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
pm_generic_runtime_resume,
gb_gbphy_idle)
};
static struct device_type greybus_gbphy_dev_type = {
.name = "gbphy_device",
.release = gbphy_dev_release,
.pm = &gb_gbphy_pm_ops,
};
static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
struct gb_bundle *bundle = gbphy_dev->bundle;
struct gb_interface *intf = bundle->intf;
struct gb_module *module = intf->module;
struct gb_host_device *hd = intf->hd;
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
return -ENOMEM;
if (add_uevent_var(env, "MODULE=%u", module->module_id))
return -ENOMEM;
if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
return -ENOMEM;
if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
intf->vendor_id, intf->product_id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
return -ENOMEM;
if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
return -ENOMEM;
if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
return -ENOMEM;
return 0;
}
static const struct gbphy_device_id *
gbphy_dev_match_id(struct gbphy_device *gbphy_dev, struct gbphy_driver *gbphy_drv)
{
const struct gbphy_device_id *id = gbphy_drv->id_table;
if (!id)
return NULL;
for (; id->protocol_id; id++)
if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
return id;
return NULL;
}
static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
const struct gbphy_device_id *id;
id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
if (id)
return 1;
return 0;
}
static int gbphy_dev_probe(struct device *dev)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
const struct gbphy_device_id *id;
int ret;
id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
if (!id)
return -ENODEV;
/* for old kernels we need get_sync to resume parent devices */
ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
if (ret < 0)
return ret;
pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/*
* Drivers should call put on the gbphy dev before returning
* from probe if they support runtime pm.
*/
ret = gbphy_drv->probe(gbphy_dev, id);
if (ret) {
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
}
gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
return ret;
}
static int gbphy_dev_remove(struct device *dev)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
gbphy_drv->remove(gbphy_dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
return 0;
}
static struct bus_type gbphy_bus_type = {
.name = "gbphy",
.match = gbphy_dev_match,
.probe = gbphy_dev_probe,
.remove = gbphy_dev_remove,
.uevent = gbphy_dev_uevent,
};
int gb_gbphy_register_driver(struct gbphy_driver *driver,
struct module *owner, const char *mod_name)
{
int retval;
if (greybus_disabled())
return -ENODEV;
driver->driver.bus = &gbphy_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (retval)
return retval;
pr_info("registered new driver %s\n", driver->name);
return 0;
}
EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
struct greybus_descriptor_cport *cport_desc)
{
struct gbphy_device *gbphy_dev;
int retval;
int id;
id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
if (!gbphy_dev) {
ida_simple_remove(&gbphy_id, id);
return ERR_PTR(-ENOMEM);
}
gbphy_dev->id = id;
gbphy_dev->bundle = bundle;
gbphy_dev->cport_desc = cport_desc;
gbphy_dev->dev.parent = &bundle->dev;
gbphy_dev->dev.bus = &gbphy_bus_type;
gbphy_dev->dev.type = &greybus_gbphy_dev_type;
gbphy_dev->dev.groups = gbphy_dev_groups;
gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
retval = device_register(&gbphy_dev->dev);
if (retval) {
put_device(&gbphy_dev->dev);
return ERR_PTR(retval);
}
return gbphy_dev;
}
static void gb_gbphy_disconnect(struct gb_bundle *bundle)
{
struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
struct gbphy_device *gbphy_dev, *temp;
int ret;
ret = gb_pm_runtime_get_sync(bundle);
if (ret < 0)
gb_pm_runtime_get_noresume(bundle);
list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
list_del(&gbphy_dev->list);
device_unregister(&gbphy_dev->dev);
}
kfree(gbphy_host);
}
static int gb_gbphy_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct gbphy_host *gbphy_host;
struct gbphy_device *gbphy_dev;
int i;
if (bundle->num_cports == 0)
return -ENODEV;
gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
if (!gbphy_host)
return -ENOMEM;
gbphy_host->bundle = bundle;
INIT_LIST_HEAD(&gbphy_host->devices);
greybus_set_drvdata(bundle, gbphy_host);
/*
* Create a bunch of children devices, one per cport, and bind the
* bridged phy drivers to them.
*/
for (i = 0; i < bundle->num_cports; ++i) {
gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
if (IS_ERR(gbphy_dev)) {
gb_gbphy_disconnect(bundle);
return PTR_ERR(gbphy_dev);
}
list_add(&gbphy_dev->list, &gbphy_host->devices);
}
gb_pm_runtime_put_autosuspend(bundle);
return 0;
}
static const struct greybus_bundle_id gb_gbphy_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
{ },
};
MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
static struct greybus_driver gb_gbphy_driver = {
.name = "gbphy",
.probe = gb_gbphy_probe,
.disconnect = gb_gbphy_disconnect,
.id_table = gb_gbphy_id_table,
};
static int __init gbphy_init(void)
{
int retval;
retval = bus_register(&gbphy_bus_type);
if (retval) {
pr_err("gbphy bus register failed (%d)\n", retval);
return retval;
}
retval = greybus_register(&gb_gbphy_driver);
if (retval) {
pr_err("error registering greybus driver\n");
goto error_gbphy;
}
return 0;
error_gbphy:
bus_unregister(&gbphy_bus_type);
ida_destroy(&gbphy_id);
return retval;
}
module_init(gbphy_init);
static void __exit gbphy_exit(void)
{
greybus_deregister(&gb_gbphy_driver);
bus_unregister(&gbphy_bus_type);
ida_destroy(&gbphy_id);
}
module_exit(gbphy_exit);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,110 @@
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef __GBPHY_H
#define __GBPHY_H
struct gbphy_device {
u32 id;
struct greybus_descriptor_cport *cport_desc;
struct gb_bundle *bundle;
struct list_head list;
struct device dev;
};
#define to_gbphy_dev(d) container_of(d, struct gbphy_device, dev)
static inline void *gb_gbphy_get_data(struct gbphy_device *gdev)
{
return dev_get_drvdata(&gdev->dev);
}
static inline void gb_gbphy_set_data(struct gbphy_device *gdev, void *data)
{
dev_set_drvdata(&gdev->dev, data);
}
struct gbphy_device_id {
__u8 protocol_id;
};
#define GBPHY_PROTOCOL(p) \
.protocol_id = (p),
struct gbphy_driver {
const char *name;
int (*probe)(struct gbphy_device *,
const struct gbphy_device_id *id);
void (*remove)(struct gbphy_device *);
const struct gbphy_device_id *id_table;
struct device_driver driver;
};
#define to_gbphy_driver(d) container_of(d, struct gbphy_driver, driver)
int gb_gbphy_register_driver(struct gbphy_driver *driver,
struct module *owner, const char *mod_name);
void gb_gbphy_deregister_driver(struct gbphy_driver *driver);
#define gb_gbphy_register(driver) \
gb_gbphy_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
#define gb_gbphy_deregister(driver) \
gb_gbphy_deregister_driver(driver)
/**
* module_gbphy_driver() - Helper macro for registering a gbphy driver
* @__gbphy_driver: gbphy_driver structure
*
* Helper macro for gbphy drivers to set up proper module init / exit
* functions. Replaces module_init() and module_exit() and keeps people from
* printing pointless things to the kernel log when their driver is loaded.
*/
#define module_gbphy_driver(__gbphy_driver) \
module_driver(__gbphy_driver, gb_gbphy_register, gb_gbphy_deregister)
#ifdef CONFIG_PM
static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev)
{
struct device *dev = &gbphy_dev->dev;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
pm_runtime_put_noidle(dev);
return ret;
}
return 0;
}
static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev)
{
struct device *dev = &gbphy_dev->dev;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev)
{
pm_runtime_get_noresume(&gbphy_dev->dev);
}
static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev)
{
pm_runtime_put_noidle(&gbphy_dev->dev);
}
#else
static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev) { return 0; }
static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev) {}
static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev) {}
static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev) {}
#endif
#endif /* __GBPHY_H */

View File

@ -0,0 +1,767 @@
/*
* GPIO Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/mutex.h>
#include "greybus.h"
#include "gbphy.h"
struct gb_gpio_line {
/* The following has to be an array of line_max entries */
/* --> make them just a flags field */
u8 active: 1,
direction: 1, /* 0 = output, 1 = input */
value: 1; /* 0 = low, 1 = high */
u16 debounce_usec;
u8 irq_type;
bool irq_type_pending;
bool masked;
bool masked_pending;
};
struct gb_gpio_controller {
struct gbphy_device *gbphy_dev;
struct gb_connection *connection;
u8 line_max; /* max line number */
struct gb_gpio_line *lines;
struct gpio_chip chip;
struct irq_chip irqc;
struct irq_chip *irqchip;
struct irq_domain *irqdomain;
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
struct mutex irq_lock;
};
#define gpio_chip_to_gb_gpio_controller(chip) \
container_of(chip, struct gb_gpio_controller, chip)
#define irq_data_to_gpio_chip(d) (d->domain->host_data)
static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
{
struct gb_gpio_line_count_response response;
int ret;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
NULL, 0, &response, sizeof(response));
if (!ret)
ggc->line_max = response.count;
return ret;
}
static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
{
struct gb_gpio_activate_request request;
struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
&request, sizeof(request), NULL, 0);
if (ret) {
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
ggc->lines[which].active = true;
return 0;
}
static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
struct device *dev = &gbphy_dev->dev;
struct gb_gpio_deactivate_request request;
int ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
&request, sizeof(request), NULL, 0);
if (ret) {
dev_err(dev, "failed to deactivate gpio %u\n", which);
goto out_pm_put;
}
ggc->lines[which].active = false;
out_pm_put:
gbphy_runtime_put_autosuspend(gbphy_dev);
}
static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_get_direction_request request;
struct gb_gpio_get_direction_response response;
int ret;
u8 direction;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
&request, sizeof(request),
&response, sizeof(response));
if (ret)
return ret;
direction = response.direction;
if (direction && direction != 1) {
dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
which, direction);
}
ggc->lines[which].direction = direction ? 1 : 0;
return 0;
}
static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct gb_gpio_direction_in_request request;
int ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].direction = 1;
return ret;
}
static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
u8 which, bool value_high)
{
struct gb_gpio_direction_out_request request;
int ret;
request.which = which;
request.value = value_high ? 1 : 0;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].direction = 0;
return ret;
}
static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_get_value_request request;
struct gb_gpio_get_value_response response;
int ret;
u8 value;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
&request, sizeof(request),
&response, sizeof(response));
if (ret) {
dev_err(dev, "failed to get value of gpio %u\n", which);
return ret;
}
value = response.value;
if (value && value != 1) {
dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
which, value);
}
ggc->lines[which].value = value ? 1 : 0;
return 0;
}
static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
u8 which, bool value_high)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_set_value_request request;
int ret;
if (ggc->lines[which].direction == 1) {
dev_warn(dev, "refusing to set value of input gpio %u\n",
which);
return;
}
request.which = which;
request.value = value_high ? 1 : 0;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
&request, sizeof(request), NULL, 0);
if (ret) {
dev_err(dev, "failed to set value of gpio %u\n", which);
return;
}
ggc->lines[which].value = request.value;
}
static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
u8 which, u16 debounce_usec)
{
struct gb_gpio_set_debounce_request request;
int ret;
request.which = which;
request.usec = cpu_to_le16(debounce_usec);
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].debounce_usec = debounce_usec;
return ret;
}
static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_mask_request request;
int ret;
request.which = hwirq;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_MASK,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to mask irq: %d\n", ret);
}
static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_unmask_request request;
int ret;
request.which = hwirq;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_UNMASK,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to unmask irq: %d\n", ret);
}
static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
u8 hwirq, u8 type)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_type_request request;
int ret;
request.which = hwirq;
request.type = type;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_TYPE,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to set irq type: %d\n", ret);
}
static void gb_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
line->masked = true;
line->masked_pending = true;
}
static void gb_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
line->masked = false;
line->masked_pending = true;
}
static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
struct device *dev = &ggc->gbphy_dev->dev;
u8 irq_type;
switch (type) {
case IRQ_TYPE_NONE:
irq_type = GB_GPIO_IRQ_TYPE_NONE;
break;
case IRQ_TYPE_EDGE_RISING:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
break;
default:
dev_err(dev, "unsupported irq type: %u\n", type);
return -EINVAL;
}
line->irq_type = irq_type;
line->irq_type_pending = true;
return 0;
}
static void gb_gpio_irq_bus_lock(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
mutex_lock(&ggc->irq_lock);
}
static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
if (line->irq_type_pending) {
_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
line->irq_type_pending = false;
}
if (line->masked_pending) {
if (line->masked)
_gb_gpio_irq_mask(ggc, d->hwirq);
else
_gb_gpio_irq_unmask(ggc, d->hwirq);
line->masked_pending = false;
}
mutex_unlock(&ggc->irq_lock);
}
static int gb_gpio_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_message *request;
struct gb_gpio_irq_event_request *event;
u8 type = op->type;
int irq;
struct irq_desc *desc;
if (type != GB_GPIO_TYPE_IRQ_EVENT) {
dev_err(dev, "unsupported unsolicited request: %u\n", type);
return -EINVAL;
}
request = op->request;
if (request->payload_size < sizeof(*event)) {
dev_err(dev, "short event received (%zu < %zu)\n",
request->payload_size, sizeof(*event));
return -EINVAL;
}
event = request->payload;
if (event->which > ggc->line_max) {
dev_err(dev, "invalid hw irq: %d\n", event->which);
return -EINVAL;
}
irq = irq_find_mapping(ggc->irqdomain, event->which);
if (!irq) {
dev_err(dev, "failed to find IRQ\n");
return -EINVAL;
}
desc = irq_to_desc(irq);
if (!desc) {
dev_err(dev, "failed to look up irq\n");
return -EINVAL;
}
local_irq_disable();
generic_handle_irq_desc(desc);
local_irq_enable();
return 0;
}
static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_activate_operation(ggc, (u8)offset);
}
static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
gb_gpio_deactivate_operation(ggc, (u8)offset);
}
static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u8 which;
int ret;
which = (u8)offset;
ret = gb_gpio_get_direction_operation(ggc, which);
if (ret)
return ret;
return ggc->lines[which].direction ? 1 : 0;
}
static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_direction_in_operation(ggc, (u8)offset);
}
static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
}
static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u8 which;
int ret;
which = (u8)offset;
ret = gb_gpio_get_value_operation(ggc, which);
if (ret)
return ret;
return ggc->lines[which].value;
}
static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
}
static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u16 usec;
if (debounce > U16_MAX)
return -EINVAL;
usec = (u16)debounce;
return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
}
static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
{
int ret;
/* Now find out how many lines there are */
ret = gb_gpio_line_count_operation(ggc);
if (ret)
return ret;
ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
GFP_KERNEL);
if (!ggc->lines)
return -ENOMEM;
return ret;
}
/**
* gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
* @d: the irqdomain used by this irqchip
* @irq: the global irq number used by this GB gpio irqchip irq
* @hwirq: the local IRQ/GPIO line offset on this GB gpio
*
* This function will set up the mapping for a certain IRQ line on a
* GB gpio by assigning the GB gpio as chip data, and using the irqchip
* stored inside the GB gpio.
*/
static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
struct gpio_chip *chip = domain->host_data;
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
irq_set_chip_data(irq, ggc);
irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
irq_set_noprobe(irq);
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
if (ggc->irq_default_type != IRQ_TYPE_NONE)
irq_set_irq_type(irq, ggc->irq_default_type);
return 0;
}
static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
{
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
static const struct irq_domain_ops gb_gpio_domain_ops = {
.map = gb_gpio_irq_map,
.unmap = gb_gpio_irq_unmap,
};
/**
* gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
* @ggc: the gb_gpio_controller to remove the irqchip from
*
* This is called only from gb_gpio_remove()
*/
static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
{
unsigned int offset;
/* Remove all IRQ mappings and delete the domain */
if (ggc->irqdomain) {
for (offset = 0; offset < (ggc->line_max + 1); offset++)
irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
irq_domain_remove(ggc->irqdomain);
}
if (ggc->irqchip) {
ggc->irqchip = NULL;
}
}
/**
* gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
* @chip: the gpio chip to add the irqchip to
* @irqchip: the irqchip to add to the adapter
* @first_irq: if not dynamically assigned, the base (first) IRQ to
* allocate gpio irqs from
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
*
* This function closely associates a certain irqchip with a certain
* gpio chip, providing an irq domain to translate the local IRQs to
* global irqs, and making sure that the gpio chip
* is passed as chip data to all related functions. Driver callbacks
* need to use container_of() to get their local state containers back
* from the gpio chip passed as chip data. An irqdomain will be stored
* in the gpio chip that shall be used by the driver to handle IRQ number
* translation. The gpio chip will need to be initialized and registered
* before calling this function.
*/
static int gb_gpio_irqchip_add(struct gpio_chip *chip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
struct gb_gpio_controller *ggc;
unsigned int offset;
unsigned irq_base;
if (!chip || !irqchip)
return -EINVAL;
ggc = gpio_chip_to_gb_gpio_controller(chip);
ggc->irqchip = irqchip;
ggc->irq_handler = handler;
ggc->irq_default_type = type;
ggc->irqdomain = irq_domain_add_simple(NULL,
ggc->line_max + 1, first_irq,
&gb_gpio_domain_ops, chip);
if (!ggc->irqdomain) {
ggc->irqchip = NULL;
return -EINVAL;
}
/*
* Prepare the mapping since the irqchip shall be orthogonal to
* any gpio calls. If the first_irq was zero, this is
* necessary to allocate descriptors for all IRQs.
*/
for (offset = 0; offset < (ggc->line_max + 1); offset++) {
irq_base = irq_create_mapping(ggc->irqdomain, offset);
if (offset == 0)
ggc->irq_base = irq_base;
}
return 0;
}
static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return irq_find_mapping(ggc->irqdomain, offset);
}
static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct gb_gpio_controller *ggc;
struct gpio_chip *gpio;
struct irq_chip *irqc;
int ret;
ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
if (!ggc)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
gb_gpio_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto exit_ggc_free;
}
ggc->connection = connection;
gb_connection_set_data(connection, ggc);
ggc->gbphy_dev = gbphy_dev;
gb_gbphy_set_data(gbphy_dev, ggc);
ret = gb_connection_enable_tx(connection);
if (ret)
goto exit_connection_destroy;
ret = gb_gpio_controller_setup(ggc);
if (ret)
goto exit_connection_disable;
irqc = &ggc->irqc;
irqc->irq_mask = gb_gpio_irq_mask;
irqc->irq_unmask = gb_gpio_irq_unmask;
irqc->irq_set_type = gb_gpio_irq_set_type;
irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
irqc->name = "greybus_gpio";
mutex_init(&ggc->irq_lock);
gpio = &ggc->chip;
gpio->label = "greybus_gpio";
gpio->parent = &gbphy_dev->dev;
gpio->owner = THIS_MODULE;
gpio->request = gb_gpio_request;
gpio->free = gb_gpio_free;
gpio->get_direction = gb_gpio_get_direction;
gpio->direction_input = gb_gpio_direction_input;
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
gpio->set = gb_gpio_set;
gpio->set_debounce = gb_gpio_set_debounce;
gpio->to_irq = gb_gpio_to_irq;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
gpio->can_sleep = true;
ret = gb_connection_enable(connection);
if (ret)
goto exit_line_free;
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
exit_gpio_irqchip_remove:
gb_gpio_irqchip_remove(ggc);
exit_line_free:
kfree(ggc->lines);
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_ggc_free:
kfree(ggc);
return ret;
}
static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
{
struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = ggc->connection;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
gb_connection_disable_rx(connection);
gpiochip_remove(&ggc->chip);
gb_gpio_irqchip_remove(ggc);
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(ggc->lines);
kfree(ggc);
}
static const struct gbphy_device_id gb_gpio_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
static struct gbphy_driver gpio_driver = {
.name = "gpio",
.probe = gb_gpio_probe,
.remove = gb_gpio_remove,
.id_table = gb_gpio_id_table,
};
module_gbphy_driver(gpio_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,154 @@
/*
* Greybus driver and device API
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __LINUX_GREYBUS_H
#define __LINUX_GREYBUS_H
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include "greybus_id.h"
#include "greybus_manifest.h"
#include "greybus_protocols.h"
#include "manifest.h"
#include "hd.h"
#include "svc.h"
#include "control.h"
#include "module.h"
#include "interface.h"
#include "bundle.h"
#include "connection.h"
#include "operation.h"
#include "timesync.h"
/* Matches up with the Greybus Protocol specification document */
#define GREYBUS_VERSION_MAJOR 0x00
#define GREYBUS_VERSION_MINOR 0x01
#define GREYBUS_ID_MATCH_DEVICE \
(GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
#define GREYBUS_DEVICE(v, p) \
.match_flags = GREYBUS_ID_MATCH_DEVICE, \
.vendor = (v), \
.product = (p),
#define GREYBUS_DEVICE_CLASS(c) \
.match_flags = GREYBUS_ID_MATCH_CLASS, \
.class = (c),
/* Maximum number of CPorts */
#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
#define CPORT_ID_BAD U16_MAX
struct greybus_driver {
const char *name;
int (*probe)(struct gb_bundle *bundle,
const struct greybus_bundle_id *id);
void (*disconnect)(struct gb_bundle *bundle);
const struct greybus_bundle_id *id_table;
struct device_driver driver;
};
#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
{
dev_set_drvdata(&bundle->dev, data);
}
static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
{
return dev_get_drvdata(&bundle->dev);
}
/* Don't call these directly, use the module_greybus_driver() macro instead */
int greybus_register_driver(struct greybus_driver *driver,
struct module *module, const char *mod_name);
void greybus_deregister_driver(struct greybus_driver *driver);
/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
#define greybus_register(driver) \
greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
#define greybus_deregister(driver) \
greybus_deregister_driver(driver)
/**
* module_greybus_driver() - Helper macro for registering a Greybus driver
* @__greybus_driver: greybus_driver structure
*
* Helper macro for Greybus drivers to set up proper module init / exit
* functions. Replaces module_init() and module_exit() and keeps people from
* printing pointless things to the kernel log when their driver is loaded.
*/
#define module_greybus_driver(__greybus_driver) \
module_driver(__greybus_driver, greybus_register, greybus_deregister)
int greybus_disabled(void);
void gb_debugfs_init(void);
void gb_debugfs_cleanup(void);
struct dentry *gb_debugfs_get(void);
extern struct bus_type greybus_bus_type;
extern struct device_type greybus_hd_type;
extern struct device_type greybus_module_type;
extern struct device_type greybus_interface_type;
extern struct device_type greybus_control_type;
extern struct device_type greybus_bundle_type;
extern struct device_type greybus_svc_type;
static inline int is_gb_host_device(const struct device *dev)
{
return dev->type == &greybus_hd_type;
}
static inline int is_gb_module(const struct device *dev)
{
return dev->type == &greybus_module_type;
}
static inline int is_gb_interface(const struct device *dev)
{
return dev->type == &greybus_interface_type;
}
static inline int is_gb_control(const struct device *dev)
{
return dev->type == &greybus_control_type;
}
static inline int is_gb_bundle(const struct device *dev)
{
return dev->type == &greybus_bundle_type;
}
static inline int is_gb_svc(const struct device *dev)
{
return dev->type == &greybus_svc_type;
}
static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
{
return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
}
#endif /* __KERNEL__ */
#endif /* __LINUX_GREYBUS_H */

View File

@ -0,0 +1,120 @@
/*
* Greybus Component Authentication User Header
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_AUTHENTICATION_USER_H
#define __GREYBUS_AUTHENTICATION_USER_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define CAP_CERTIFICATE_MAX_SIZE 1600
#define CAP_SIGNATURE_MAX_SIZE 320
/* Certificate class types */
#define CAP_CERT_IMS_EAPC 0x00000001
#define CAP_CERT_IMS_EASC 0x00000002
#define CAP_CERT_IMS_EARC 0x00000003
#define CAP_CERT_IMS_IAPC 0x00000004
#define CAP_CERT_IMS_IASC 0x00000005
#define CAP_CERT_IMS_IARC 0x00000006
/* IMS Certificate response result codes */
#define CAP_IMS_RESULT_CERT_FOUND 0x00
#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
/* Authentication types */
#define CAP_AUTH_IMS_PRI 0x00000001
#define CAP_AUTH_IMS_SEC 0x00000002
#define CAP_AUTH_IMS_RSA 0x00000003
/* Authenticate response result codes */
#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
/* IOCTL support */
struct cap_ioc_get_endpoint_uid {
__u8 uid[8];
} __attribute__ ((__packed__));
struct cap_ioc_get_ims_certificate {
__u32 certificate_class;
__u32 certificate_id;
__u8 result_code;
__u32 cert_size;
__u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
} __attribute__ ((__packed__));
struct cap_ioc_authenticate {
__u32 auth_type;
__u8 uid[8];
__u8 challenge[32];
__u8 result_code;
__u8 response[64];
__u32 signature_size;
__u8 signature[CAP_SIGNATURE_MAX_SIZE];
} __attribute__ ((__packed__));
#define CAP_IOCTL_BASE 'C'
#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
#endif /* __GREYBUS_AUTHENTICATION_USER_H */

View File

@ -0,0 +1,120 @@
/*
* Greybus Firmware Management User Header
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_FIRMWARE_USER_H
#define __GREYBUS_FIRMWARE_USER_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define GB_FIRMWARE_U_TAG_MAX_SIZE 10
#define GB_FW_U_LOAD_METHOD_UNIPRO 0x01
#define GB_FW_U_LOAD_METHOD_INTERNAL 0x02
#define GB_FW_U_LOAD_STATUS_FAILED 0x00
#define GB_FW_U_LOAD_STATUS_UNVALIDATED 0x01
#define GB_FW_U_LOAD_STATUS_VALIDATED 0x02
#define GB_FW_U_LOAD_STATUS_VALIDATION_FAILED 0x03
#define GB_FW_U_BACKEND_FW_STATUS_SUCCESS 0x01
#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FIND 0x02
#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FETCH 0x03
#define GB_FW_U_BACKEND_FW_STATUS_FAIL_WRITE 0x04
#define GB_FW_U_BACKEND_FW_STATUS_INT 0x05
#define GB_FW_U_BACKEND_FW_STATUS_RETRY 0x06
#define GB_FW_U_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
#define GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS 0x01
#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
#define GB_FW_U_BACKEND_VERSION_STATUS_RETRY 0x04
#define GB_FW_U_BACKEND_VERSION_STATUS_FAIL_INT 0x05
/* IOCTL support */
struct fw_mgmt_ioc_get_intf_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_get_backend_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
__u8 status;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_intf_load_and_validate {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u8 load_method;
__u8 status;
__u16 major;
__u16 minor;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_backend_fw_update {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u8 status;
} __attribute__ ((__packed__));
#define FW_MGMT_IOCTL_BASE 'F'
#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
#endif /* __GREYBUS_FIRMWARE_USER_H */

View File

@ -0,0 +1,26 @@
/* FIXME
* move this to include/linux/mod_devicetable.h when merging
*/
#ifndef __LINUX_GREYBUS_ID_H
#define __LINUX_GREYBUS_ID_H
#include <linux/types.h>
#include <linux/mod_devicetable.h>
struct greybus_bundle_id {
__u16 match_flags;
__u32 vendor;
__u32 product;
__u8 class;
kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
};
/* Used to match the greybus_bundle_id */
#define GREYBUS_ID_MATCH_VENDOR BIT(0)
#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
#define GREYBUS_ID_MATCH_CLASS BIT(2)
#endif /* __LINUX_GREYBUS_ID_H */

View File

@ -0,0 +1,177 @@
/*
* Greybus manifest definition
*
* See "Greybus Application Protocol" document (version 0.1) for
* details on these values and structures.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 and BSD licenses.
*/
#ifndef __GREYBUS_MANIFEST_H
#define __GREYBUS_MANIFEST_H
enum greybus_descriptor_type {
GREYBUS_TYPE_INVALID = 0x00,
GREYBUS_TYPE_INTERFACE = 0x01,
GREYBUS_TYPE_STRING = 0x02,
GREYBUS_TYPE_BUNDLE = 0x03,
GREYBUS_TYPE_CPORT = 0x04,
};
enum greybus_protocol {
GREYBUS_PROTOCOL_CONTROL = 0x00,
/* 0x01 is unused */
GREYBUS_PROTOCOL_GPIO = 0x02,
GREYBUS_PROTOCOL_I2C = 0x03,
GREYBUS_PROTOCOL_UART = 0x04,
GREYBUS_PROTOCOL_HID = 0x05,
GREYBUS_PROTOCOL_USB = 0x06,
GREYBUS_PROTOCOL_SDIO = 0x07,
GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
GREYBUS_PROTOCOL_PWM = 0x09,
/* 0x0a is unused */
GREYBUS_PROTOCOL_SPI = 0x0b,
GREYBUS_PROTOCOL_DISPLAY = 0x0c,
GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
GREYBUS_PROTOCOL_SENSOR = 0x0e,
GREYBUS_PROTOCOL_LIGHTS = 0x0f,
GREYBUS_PROTOCOL_VIBRATOR = 0x10,
GREYBUS_PROTOCOL_LOOPBACK = 0x11,
GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
GREYBUS_PROTOCOL_SVC = 0x14,
GREYBUS_PROTOCOL_BOOTROM = 0x15,
GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
GREYBUS_PROTOCOL_LOG = 0x1a,
/* ... */
GREYBUS_PROTOCOL_RAW = 0xfe,
GREYBUS_PROTOCOL_VENDOR = 0xff,
};
enum greybus_class_type {
GREYBUS_CLASS_CONTROL = 0x00,
/* 0x01 is unused */
/* 0x02 is unused */
/* 0x03 is unused */
/* 0x04 is unused */
GREYBUS_CLASS_HID = 0x05,
/* 0x06 is unused */
/* 0x07 is unused */
GREYBUS_CLASS_POWER_SUPPLY = 0x08,
/* 0x09 is unused */
GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
/* 0x0b is unused */
GREYBUS_CLASS_DISPLAY = 0x0c,
GREYBUS_CLASS_CAMERA = 0x0d,
GREYBUS_CLASS_SENSOR = 0x0e,
GREYBUS_CLASS_LIGHTS = 0x0f,
GREYBUS_CLASS_VIBRATOR = 0x10,
GREYBUS_CLASS_LOOPBACK = 0x11,
GREYBUS_CLASS_AUDIO = 0x12,
/* 0x13 is unused */
/* 0x14 is unused */
GREYBUS_CLASS_BOOTROM = 0x15,
GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
GREYBUS_CLASS_LOG = 0x17,
/* ... */
GREYBUS_CLASS_RAW = 0xfe,
GREYBUS_CLASS_VENDOR = 0xff,
};
enum {
GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
};
/*
* The string in a string descriptor is not NUL-terminated. The
* size of the descriptor will be rounded up to a multiple of 4
* bytes, by padding the string with 0x00 bytes if necessary.
*/
struct greybus_descriptor_string {
__u8 length;
__u8 id;
__u8 string[0];
} __packed;
/*
* An interface descriptor describes information about an interface as a whole,
* *not* the functions within it.
*/
struct greybus_descriptor_interface {
__u8 vendor_stringid;
__u8 product_stringid;
__u8 features;
__u8 pad;
} __packed;
/*
* An bundle descriptor defines an identification number and a class for
* each bundle.
*
* @id: Uniquely identifies a bundle within a interface, its sole purpose is to
* allow CPort descriptors to specify which bundle they are associated with.
* The first bundle will have id 0, second will have 1 and so on.
*
* The largest CPort id associated with an bundle (defined by a
* CPort descriptor in the manifest) is used to determine how to
* encode the device id and module number in UniPro packets
* that use the bundle.
*
* @class: It is used by kernel to know the functionality provided by the
* bundle and will be matched against drivers functinality while probing greybus
* driver. It should contain one of the values defined in
* 'enum greybus_class_type'.
*
*/
struct greybus_descriptor_bundle {
__u8 id; /* interface-relative id (0..) */
__u8 class;
__u8 pad[2];
} __packed;
/*
* A CPort descriptor indicates the id of the bundle within the
* module it's associated with, along with the CPort id used to
* address the CPort. The protocol id defines the format of messages
* exchanged using the CPort.
*/
struct greybus_descriptor_cport {
__le16 id;
__u8 bundle;
__u8 protocol_id; /* enum greybus_protocol */
} __packed;
struct greybus_descriptor_header {
__le16 size;
__u8 type; /* enum greybus_descriptor_type */
__u8 pad;
} __packed;
struct greybus_descriptor {
struct greybus_descriptor_header header;
union {
struct greybus_descriptor_string string;
struct greybus_descriptor_interface interface;
struct greybus_descriptor_bundle bundle;
struct greybus_descriptor_cport cport;
};
} __packed;
struct greybus_manifest_header {
__le16 size;
__u8 version_major;
__u8 version_minor;
} __packed;
struct greybus_manifest {
struct greybus_manifest_header header;
struct greybus_descriptor descriptors[0];
} __packed;
#endif /* __GREYBUS_MANIFEST_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,531 @@
/*
* Greybus driver and device API
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM greybus
#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GREYBUS_H
#include <linux/tracepoint.h>
struct gb_message;
struct gb_operation;
struct gb_connection;
struct gb_bundle;
struct gb_host_device;
DECLARE_EVENT_CLASS(gb_message,
TP_PROTO(struct gb_message *message),
TP_ARGS(message),
TP_STRUCT__entry(
__field(u16, size)
__field(u16, operation_id)
__field(u8, type)
__field(u8, result)
),
TP_fast_assign(
__entry->size = le16_to_cpu(message->header->size);
__entry->operation_id =
le16_to_cpu(message->header->operation_id);
__entry->type = message->header->type;
__entry->result = message->header->result;
),
TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
__entry->size, __entry->operation_id,
__entry->type, __entry->result)
);
#define DEFINE_MESSAGE_EVENT(name) \
DEFINE_EVENT(gb_message, name, \
TP_PROTO(struct gb_message *message), \
TP_ARGS(message))
/*
* Occurs immediately before calling a host device's message_send()
* method.
*/
DEFINE_MESSAGE_EVENT(gb_message_send);
/*
* Occurs after an incoming request message has been received
*/
DEFINE_MESSAGE_EVENT(gb_message_recv_request);
/*
* Occurs after an incoming response message has been received,
* after its matching request has been found.
*/
DEFINE_MESSAGE_EVENT(gb_message_recv_response);
/*
* Occurs after an operation has been canceled, possibly before the
* cancellation is complete.
*/
DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
/*
* Occurs when an incoming request is cancelled; if the response has
* been queued for sending, this occurs after it is sent.
*/
DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
/*
* Occurs in the host driver message_send() function just prior to
* handing off the data to be processed by hardware.
*/
DEFINE_MESSAGE_EVENT(gb_message_submit);
#undef DEFINE_MESSAGE_EVENT
DECLARE_EVENT_CLASS(gb_operation,
TP_PROTO(struct gb_operation *operation),
TP_ARGS(operation),
TP_STRUCT__entry(
__field(u16, cport_id) /* CPort of HD side of connection */
__field(u16, id) /* Operation ID */
__field(u8, type)
__field(unsigned long, flags)
__field(int, active)
__field(int, waiters)
__field(int, errno)
),
TP_fast_assign(
__entry->cport_id = operation->connection->hd_cport_id;
__entry->id = operation->id;
__entry->type = operation->type;
__entry->flags = operation->flags;
__entry->active = operation->active;
__entry->waiters = atomic_read(&operation->waiters);
__entry->errno = operation->errno;
),
TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
__entry->id, __entry->cport_id, __entry->type, __entry->flags,
__entry->active, __entry->waiters, __entry->errno)
);
#define DEFINE_OPERATION_EVENT(name) \
DEFINE_EVENT(gb_operation, name, \
TP_PROTO(struct gb_operation *operation), \
TP_ARGS(operation))
/*
* Occurs after a new operation is created for an outgoing request
* has been successfully created.
*/
DEFINE_OPERATION_EVENT(gb_operation_create);
/*
* Occurs after a new core operation has been created.
*/
DEFINE_OPERATION_EVENT(gb_operation_create_core);
/*
* Occurs after a new operation has been created for an incoming
* request has been successfully created and initialized.
*/
DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
/*
* Occurs when the last reference to an operation has been dropped,
* prior to freeing resources.
*/
DEFINE_OPERATION_EVENT(gb_operation_destroy);
/*
* Occurs when an operation has been marked active, after updating
* its active count.
*/
DEFINE_OPERATION_EVENT(gb_operation_get_active);
/*
* Occurs when an operation has been marked active, before updating
* its active count.
*/
DEFINE_OPERATION_EVENT(gb_operation_put_active);
#undef DEFINE_OPERATION_EVENT
DECLARE_EVENT_CLASS(gb_connection,
TP_PROTO(struct gb_connection *connection),
TP_ARGS(connection),
TP_STRUCT__entry(
__field(int, hd_bus_id)
__field(u8, bundle_id)
/* name contains "hd_cport_id/intf_id:cport_id" */
__dynamic_array(char, name, sizeof(connection->name))
__field(enum gb_connection_state, state)
__field(unsigned long, flags)
),
TP_fast_assign(
__entry->hd_bus_id = connection->hd->bus_id;
__entry->bundle_id = connection->bundle ?
connection->bundle->id : BUNDLE_ID_NONE;
memcpy(__get_str(name), connection->name,
sizeof(connection->name));
__entry->state = connection->state;
__entry->flags = connection->flags;
),
TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
__entry->hd_bus_id, __entry->bundle_id, __get_str(name),
(unsigned int)__entry->state, __entry->flags)
);
#define DEFINE_CONNECTION_EVENT(name) \
DEFINE_EVENT(gb_connection, name, \
TP_PROTO(struct gb_connection *connection), \
TP_ARGS(connection))
/*
* Occurs after a new connection is successfully created.
*/
DEFINE_CONNECTION_EVENT(gb_connection_create);
/*
* Occurs when the last reference to a connection has been dropped,
* before its resources are freed.
*/
DEFINE_CONNECTION_EVENT(gb_connection_release);
/*
* Occurs when a new reference to connection is added, currently
* only when a message over the connection is received.
*/
DEFINE_CONNECTION_EVENT(gb_connection_get);
/*
* Occurs when a new reference to connection is dropped, after a
* a received message is handled, or when the connection is
* destroyed.
*/
DEFINE_CONNECTION_EVENT(gb_connection_put);
/*
* Occurs when a request to enable a connection is made, either for
* transmit only, or for both transmit and receive.
*/
DEFINE_CONNECTION_EVENT(gb_connection_enable);
/*
* Occurs when a request to disable a connection is made, either for
* receive only, or for both transmit and receive. Also occurs when
* a request to forcefully disable a connection is made.
*/
DEFINE_CONNECTION_EVENT(gb_connection_disable);
#undef DEFINE_CONNECTION_EVENT
DECLARE_EVENT_CLASS(gb_bundle,
TP_PROTO(struct gb_bundle *bundle),
TP_ARGS(bundle),
TP_STRUCT__entry(
__field(u8, intf_id)
__field(u8, id)
__field(u8, class)
__field(size_t, num_cports)
),
TP_fast_assign(
__entry->intf_id = bundle->intf->interface_id;
__entry->id = bundle->id;
__entry->class = bundle->class;
__entry->num_cports = bundle->num_cports;
),
TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
__entry->intf_id, __entry->id, __entry->class,
__entry->num_cports)
);
#define DEFINE_BUNDLE_EVENT(name) \
DEFINE_EVENT(gb_bundle, name, \
TP_PROTO(struct gb_bundle *bundle), \
TP_ARGS(bundle))
/*
* Occurs after a new bundle is successfully created.
*/
DEFINE_BUNDLE_EVENT(gb_bundle_create);
/*
* Occurs when the last reference to a bundle has been dropped,
* before its resources are freed.
*/
DEFINE_BUNDLE_EVENT(gb_bundle_release);
/*
* Occurs when a bundle is added to an interface when the interface
* is enabled.
*/
DEFINE_BUNDLE_EVENT(gb_bundle_add);
/*
* Occurs when a registered bundle gets destroyed, normally at the
* time an interface is disabled.
*/
DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
#undef DEFINE_BUNDLE_EVENT
DECLARE_EVENT_CLASS(gb_interface,
TP_PROTO(struct gb_interface *intf),
TP_ARGS(intf),
TP_STRUCT__entry(
__field(u8, module_id)
__field(u8, id) /* Interface id */
__field(u8, device_id)
__field(int, disconnected) /* bool */
__field(int, ejected) /* bool */
__field(int, active) /* bool */
__field(int, enabled) /* bool */
__field(int, mode_switch) /* bool */
),
TP_fast_assign(
__entry->module_id = intf->module->module_id;
__entry->id = intf->interface_id;
__entry->device_id = intf->device_id;
__entry->disconnected = intf->disconnected;
__entry->ejected = intf->ejected;
__entry->active = intf->active;
__entry->enabled = intf->enabled;
__entry->mode_switch = intf->mode_switch;
),
TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
__entry->id, __entry->device_id, __entry->module_id,
__entry->disconnected, __entry->ejected, __entry->active,
__entry->enabled, __entry->mode_switch)
);
#define DEFINE_INTERFACE_EVENT(name) \
DEFINE_EVENT(gb_interface, name, \
TP_PROTO(struct gb_interface *intf), \
TP_ARGS(intf))
/*
* Occurs after a new interface is successfully created.
*/
DEFINE_INTERFACE_EVENT(gb_interface_create);
/*
* Occurs after the last reference to an interface has been dropped.
*/
DEFINE_INTERFACE_EVENT(gb_interface_release);
/*
* Occurs after an interface been registerd.
*/
DEFINE_INTERFACE_EVENT(gb_interface_add);
/*
* Occurs when a registered interface gets deregisterd.
*/
DEFINE_INTERFACE_EVENT(gb_interface_del);
/*
* Occurs when a registered interface has been successfully
* activated.
*/
DEFINE_INTERFACE_EVENT(gb_interface_activate);
/*
* Occurs when an activated interface is being deactivated.
*/
DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
/*
* Occurs when an interface has been successfully enabled.
*/
DEFINE_INTERFACE_EVENT(gb_interface_enable);
/*
* Occurs when an enabled interface is being disabled.
*/
DEFINE_INTERFACE_EVENT(gb_interface_disable);
#undef DEFINE_INTERFACE_EVENT
DECLARE_EVENT_CLASS(gb_module,
TP_PROTO(struct gb_module *module),
TP_ARGS(module),
TP_STRUCT__entry(
__field(int, hd_bus_id)
__field(u8, module_id)
__field(size_t, num_interfaces)
__field(int, disconnected) /* bool */
),
TP_fast_assign(
__entry->hd_bus_id = module->hd->bus_id;
__entry->module_id = module->module_id;
__entry->num_interfaces = module->num_interfaces;
__entry->disconnected = module->disconnected;
),
TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
__entry->hd_bus_id, __entry->module_id,
__entry->num_interfaces, __entry->disconnected)
);
#define DEFINE_MODULE_EVENT(name) \
DEFINE_EVENT(gb_module, name, \
TP_PROTO(struct gb_module *module), \
TP_ARGS(module))
/*
* Occurs after a new module is successfully created, before
* creating any of its interfaces.
*/
DEFINE_MODULE_EVENT(gb_module_create);
/*
* Occurs after the last reference to a module has been dropped.
*/
DEFINE_MODULE_EVENT(gb_module_release);
/*
* Occurs after a module is successfully created, before registering
* any of its interfaces.
*/
DEFINE_MODULE_EVENT(gb_module_add);
/*
* Occurs when a module is deleted, before deregistering its
* interfaces.
*/
DEFINE_MODULE_EVENT(gb_module_del);
#undef DEFINE_MODULE_EVENT
DECLARE_EVENT_CLASS(gb_host_device,
TP_PROTO(struct gb_host_device *hd),
TP_ARGS(hd),
TP_STRUCT__entry(
__field(int, bus_id)
__field(size_t, num_cports)
__field(size_t, buffer_size_max)
),
TP_fast_assign(
__entry->bus_id = hd->bus_id;
__entry->num_cports = hd->num_cports;
__entry->buffer_size_max = hd->buffer_size_max;
),
TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
__entry->bus_id, __entry->num_cports,
__entry->buffer_size_max)
);
#define DEFINE_HD_EVENT(name) \
DEFINE_EVENT(gb_host_device, name, \
TP_PROTO(struct gb_host_device *hd), \
TP_ARGS(hd))
/*
* Occurs after a new host device is successfully created, before
* its SVC has been set up.
*/
DEFINE_HD_EVENT(gb_hd_create);
/*
* Occurs after the last reference to a host device has been
* dropped.
*/
DEFINE_HD_EVENT(gb_hd_release);
/*
* Occurs after a new host device has been added, after the
* connection to its SVC has been enabled.
*/
DEFINE_HD_EVENT(gb_hd_add);
/*
* Occurs when a host device is being disconnected from the AP USB
* host controller.
*/
DEFINE_HD_EVENT(gb_hd_del);
/*
* Occurs when a host device has passed received data to the Greybus
* core, after it has been determined it is destined for a valid
* CPort.
*/
DEFINE_HD_EVENT(gb_hd_in);
#undef DEFINE_HD_EVENT
/*
* Occurs on a TimeSync synchronization event or a TimeSync ping event.
*/
TRACE_EVENT(gb_timesync_irq,
TP_PROTO(u8 ping, u8 strobe, u8 count, u64 frame_time),
TP_ARGS(ping, strobe, count, frame_time),
TP_STRUCT__entry(
__field(u8, ping)
__field(u8, strobe)
__field(u8, count)
__field(u64, frame_time)
),
TP_fast_assign(
__entry->ping = ping;
__entry->strobe = strobe;
__entry->count = count;
__entry->frame_time = frame_time;
),
TP_printk("%s %d/%d frame-time %llu\n",
__entry->ping ? "ping" : "strobe", __entry->strobe,
__entry->count, __entry->frame_time)
);
#endif /* _TRACE_GREYBUS_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
/*
* TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
*/
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE greybus_trace
#include <trace/define_trace.h>

View File

@ -0,0 +1,257 @@
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "greybus.h"
#include "greybus_trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
static struct ida gb_hd_bus_id_map;
int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool async)
{
if (!hd || !hd->driver || !hd->driver->output)
return -EINVAL;
return hd->driver->output(hd, req, size, cmd, async);
}
EXPORT_SYMBOL_GPL(gb_hd_output);
static ssize_t bus_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_host_device *hd = to_gb_host_device(dev);
return sprintf(buf, "%d\n", hd->bus_id);
}
static DEVICE_ATTR_RO(bus_id);
static struct attribute *bus_attrs[] = {
&dev_attr_bus_id.attr,
NULL
};
ATTRIBUTE_GROUPS(bus);
int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
{
struct ida *id_map = &hd->cport_id_map;
int ret;
ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
if (ret < 0) {
dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
{
struct ida *id_map = &hd->cport_id_map;
ida_simple_remove(id_map, cport_id);
}
EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
/* Locking: Caller guarantees serialisation */
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
unsigned long flags)
{
struct ida *id_map = &hd->cport_id_map;
int ida_start, ida_end;
if (hd->driver->cport_allocate)
return hd->driver->cport_allocate(hd, cport_id, flags);
if (cport_id < 0) {
ida_start = 0;
ida_end = hd->num_cports;
} else if (cport_id < hd->num_cports) {
ida_start = cport_id;
ida_end = cport_id + 1;
} else {
dev_err(&hd->dev, "cport %d not available\n", cport_id);
return -EINVAL;
}
return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
}
/* Locking: Caller guarantees serialisation */
void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
{
if (hd->driver->cport_release) {
hd->driver->cport_release(hd, cport_id);
return;
}
ida_simple_remove(&hd->cport_id_map, cport_id);
}
static void gb_hd_release(struct device *dev)
{
struct gb_host_device *hd = to_gb_host_device(dev);
trace_gb_hd_release(hd);
if (hd->svc)
gb_svc_put(hd->svc);
ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
ida_destroy(&hd->cport_id_map);
kfree(hd);
}
struct device_type greybus_hd_type = {
.name = "greybus_host_device",
.release = gb_hd_release,
};
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
struct device *parent,
size_t buffer_size_max,
size_t num_cports)
{
struct gb_host_device *hd;
int ret;
/*
* Validate that the driver implements all of the callbacks
* so that we don't have to every time we make them.
*/
if ((!driver->message_send) || (!driver->message_cancel)) {
dev_err(parent, "mandatory hd-callbacks missing\n");
return ERR_PTR(-EINVAL);
}
if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
dev_err(parent, "greybus host-device buffers too small\n");
return ERR_PTR(-EINVAL);
}
if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
return ERR_PTR(-EINVAL);
}
/*
* Make sure to never allocate messages larger than what the Greybus
* protocol supports.
*/
if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
dev_warn(parent, "limiting buffer size to %u\n",
GB_OPERATION_MESSAGE_SIZE_MAX);
buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
}
hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
if (!hd)
return ERR_PTR(-ENOMEM);
ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
if (ret < 0) {
kfree(hd);
return ERR_PTR(ret);
}
hd->bus_id = ret;
hd->driver = driver;
INIT_LIST_HEAD(&hd->modules);
INIT_LIST_HEAD(&hd->connections);
ida_init(&hd->cport_id_map);
hd->buffer_size_max = buffer_size_max;
hd->num_cports = num_cports;
hd->dev.parent = parent;
hd->dev.bus = &greybus_bus_type;
hd->dev.type = &greybus_hd_type;
hd->dev.groups = bus_groups;
hd->dev.dma_mask = hd->dev.parent->dma_mask;
device_initialize(&hd->dev);
dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
trace_gb_hd_create(hd);
hd->svc = gb_svc_create(hd);
if (!hd->svc) {
dev_err(&hd->dev, "failed to create svc\n");
put_device(&hd->dev);
return ERR_PTR(-ENOMEM);
}
return hd;
}
EXPORT_SYMBOL_GPL(gb_hd_create);
int gb_hd_add(struct gb_host_device *hd)
{
int ret;
ret = device_add(&hd->dev);
if (ret)
return ret;
ret = gb_svc_add(hd->svc);
if (ret) {
device_del(&hd->dev);
return ret;
}
trace_gb_hd_add(hd);
return 0;
}
EXPORT_SYMBOL_GPL(gb_hd_add);
void gb_hd_del(struct gb_host_device *hd)
{
trace_gb_hd_del(hd);
/*
* Tear down the svc and flush any on-going hotplug processing before
* removing the remaining interfaces.
*/
gb_svc_del(hd->svc);
device_del(&hd->dev);
}
EXPORT_SYMBOL_GPL(gb_hd_del);
void gb_hd_shutdown(struct gb_host_device *hd)
{
gb_svc_del(hd->svc);
}
EXPORT_SYMBOL_GPL(gb_hd_shutdown);
void gb_hd_put(struct gb_host_device *hd)
{
put_device(&hd->dev);
}
EXPORT_SYMBOL_GPL(gb_hd_put);
int __init gb_hd_init(void)
{
ida_init(&gb_hd_bus_id_map);
return 0;
}
void gb_hd_exit(void)
{
ida_destroy(&gb_hd_bus_id_map);
}

View File

@ -0,0 +1,90 @@
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __HD_H
#define __HD_H
struct gb_host_device;
struct gb_message;
struct gb_hd_driver {
size_t hd_priv_size;
int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
unsigned long flags);
void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
unsigned long flags);
int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
u8 phase, unsigned int timeout);
int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
size_t peer_space, unsigned int timeout);
int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
struct gb_message *message, gfp_t gfp_mask);
void (*message_cancel)(struct gb_message *message);
int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool async);
int (*timesync_enable)(struct gb_host_device *hd, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int (*timesync_disable)(struct gb_host_device *hd);
int (*timesync_authoritative)(struct gb_host_device *hd,
u64 *frame_time);
int (*timesync_get_last_event)(struct gb_host_device *hd,
u64 *frame_time);
};
struct gb_host_device {
struct device dev;
int bus_id;
const struct gb_hd_driver *driver;
struct list_head modules;
struct list_head connections;
struct ida cport_id_map;
/* Number of CPorts supported by the UniPro IP */
size_t num_cports;
/* Host device buffer constraints */
size_t buffer_size_max;
struct gb_svc *svc;
/* Private data for the host driver */
unsigned long hd_priv[0] __aligned(sizeof(s64));
};
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
unsigned long flags);
void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
struct device *parent,
size_t buffer_size_max,
size_t num_cports);
int gb_hd_add(struct gb_host_device *hd);
void gb_hd_del(struct gb_host_device *hd);
void gb_hd_shutdown(struct gb_host_device *hd);
void gb_hd_put(struct gb_host_device *hd);
int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool in_irq);
int gb_hd_init(void);
void gb_hd_exit(void);
#endif /* __HD_H */

View File

@ -0,0 +1,536 @@
/*
* HID class driver for the Greybus.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/bitops.h>
#include <linux/hid.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "greybus.h"
/* Greybus HID device's structure */
struct gb_hid {
struct gb_bundle *bundle;
struct gb_connection *connection;
struct hid_device *hid;
struct gb_hid_desc_response hdesc;
unsigned long flags;
#define GB_HID_STARTED 0x01
#define GB_HID_READ_PENDING 0x04
unsigned int bufsize;
char *inbuf;
};
static DEFINE_MUTEX(gb_hid_open_mutex);
/* Routines to get controller's information over greybus */
/* Operations performed on greybus */
static int gb_hid_get_desc(struct gb_hid *ghid)
{
return gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_DESC, NULL,
0, &ghid->hdesc, sizeof(ghid->hdesc));
}
static int gb_hid_get_report_desc(struct gb_hid *ghid, char *rdesc)
{
int ret;
ret = gb_pm_runtime_get_sync(ghid->bundle);
if (ret)
return ret;
ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT_DESC,
NULL, 0, rdesc,
le16_to_cpu(ghid->hdesc.wReportDescLength));
gb_pm_runtime_put_autosuspend(ghid->bundle);
return ret;
}
static int gb_hid_set_power(struct gb_hid *ghid, int type)
{
int ret;
ret = gb_pm_runtime_get_sync(ghid->bundle);
if (ret)
return ret;
ret = gb_operation_sync(ghid->connection, type, NULL, 0, NULL, 0);
gb_pm_runtime_put_autosuspend(ghid->bundle);
return ret;
}
static int gb_hid_get_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
unsigned char *buf, int len)
{
struct gb_hid_get_report_request request;
int ret;
ret = gb_pm_runtime_get_sync(ghid->bundle);
if (ret)
return ret;
request.report_type = report_type;
request.report_id = report_id;
ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT,
&request, sizeof(request), buf, len);
gb_pm_runtime_put_autosuspend(ghid->bundle);
return ret;
}
static int gb_hid_set_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
unsigned char *buf, int len)
{
struct gb_hid_set_report_request *request;
struct gb_operation *operation;
int ret, size = sizeof(*request) + len - 1;
ret = gb_pm_runtime_get_sync(ghid->bundle);
if (ret)
return ret;
operation = gb_operation_create(ghid->connection,
GB_HID_TYPE_SET_REPORT, size, 0,
GFP_KERNEL);
if (!operation) {
gb_pm_runtime_put_autosuspend(ghid->bundle);
return -ENOMEM;
}
request = operation->request->payload;
request->report_type = report_type;
request->report_id = report_id;
memcpy(request->report, buf, len);
ret = gb_operation_request_send_sync(operation);
if (ret) {
dev_err(&operation->connection->bundle->dev,
"failed to set report: %d\n", ret);
} else {
ret = len;
}
gb_operation_put(operation);
gb_pm_runtime_put_autosuspend(ghid->bundle);
return ret;
}
static int gb_hid_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gb_hid *ghid = gb_connection_get_data(connection);
struct gb_hid_input_report_request *request = op->request->payload;
if (op->type != GB_HID_TYPE_IRQ_EVENT) {
dev_err(&connection->bundle->dev,
"unsupported unsolicited request\n");
return -EINVAL;
}
if (test_bit(GB_HID_STARTED, &ghid->flags))
hid_input_report(ghid->hid, HID_INPUT_REPORT,
request->report, op->request->payload_size, 1);
return 0;
}
static int gb_hid_report_len(struct hid_report *report)
{
return ((report->size - 1) >> 3) + 1 +
report->device->report_enum[report->type].numbered;
}
static void gb_hid_find_max_report(struct hid_device *hid, unsigned int type,
unsigned int *max)
{
struct hid_report *report;
unsigned int size;
list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
size = gb_hid_report_len(report);
if (*max < size)
*max = size;
}
}
static void gb_hid_free_buffers(struct gb_hid *ghid)
{
kfree(ghid->inbuf);
ghid->inbuf = NULL;
ghid->bufsize = 0;
}
static int gb_hid_alloc_buffers(struct gb_hid *ghid, size_t bufsize)
{
ghid->inbuf = kzalloc(bufsize, GFP_KERNEL);
if (!ghid->inbuf)
return -ENOMEM;
ghid->bufsize = bufsize;
return 0;
}
/* Routines dealing with reports */
static void gb_hid_init_report(struct gb_hid *ghid, struct hid_report *report)
{
unsigned int size;
size = gb_hid_report_len(report);
if (gb_hid_get_report(ghid, report->type, report->id, ghid->inbuf,
size))
return;
/*
* hid->driver_lock is held as we are in probe function,
* we just need to setup the input fields, so using
* hid_report_raw_event is safe.
*/
hid_report_raw_event(ghid->hid, report->type, ghid->inbuf, size, 1);
}
static void gb_hid_init_reports(struct gb_hid *ghid)
{
struct hid_device *hid = ghid->hid;
struct hid_report *report;
list_for_each_entry(report,
&hid->report_enum[HID_INPUT_REPORT].report_list, list)
gb_hid_init_report(ghid, report);
list_for_each_entry(report,
&hid->report_enum[HID_FEATURE_REPORT].report_list, list)
gb_hid_init_report(ghid, report);
}
static int __gb_hid_get_raw_report(struct hid_device *hid,
unsigned char report_number, __u8 *buf, size_t count,
unsigned char report_type)
{
struct gb_hid *ghid = hid->driver_data;
int ret;
if (report_type == HID_OUTPUT_REPORT)
return -EINVAL;
ret = gb_hid_get_report(ghid, report_type, report_number, buf, count);
if (!ret)
ret = count;
return ret;
}
static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
size_t len, unsigned char report_type)
{
struct gb_hid *ghid = hid->driver_data;
int report_id = buf[0];
int ret;
if (report_type == HID_INPUT_REPORT)
return -EINVAL;
if (report_id) {
buf++;
len--;
}
ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
if (report_id && ret >= 0)
ret++; /* add report_id to the number of transfered bytes */
return 0;
}
static int gb_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
__u8 *buf, size_t len, unsigned char rtype,
int reqtype)
{
switch (reqtype) {
case HID_REQ_GET_REPORT:
return __gb_hid_get_raw_report(hid, reportnum, buf, len, rtype);
case HID_REQ_SET_REPORT:
if (buf[0] != reportnum)
return -EINVAL;
return __gb_hid_output_raw_report(hid, buf, len, rtype);
default:
return -EIO;
}
}
/* HID Callbacks */
static int gb_hid_parse(struct hid_device *hid)
{
struct gb_hid *ghid = hid->driver_data;
unsigned int rsize;
char *rdesc;
int ret;
rsize = le16_to_cpu(ghid->hdesc.wReportDescLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
return -EINVAL;
}
rdesc = kzalloc(rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory\n");
return -ENOMEM;
}
ret = gb_hid_get_report_desc(ghid, rdesc);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
goto free_rdesc;
}
ret = hid_parse_report(hid, rdesc, rsize);
if (ret)
dbg_hid("parsing report descriptor failed\n");
free_rdesc:
kfree(rdesc);
return ret;
}
static int gb_hid_start(struct hid_device *hid)
{
struct gb_hid *ghid = hid->driver_data;
unsigned int bufsize = HID_MIN_BUFFER_SIZE;
int ret;
gb_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
gb_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
gb_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
if (bufsize > HID_MAX_BUFFER_SIZE)
bufsize = HID_MAX_BUFFER_SIZE;
ret = gb_hid_alloc_buffers(ghid, bufsize);
if (ret)
return ret;
if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
gb_hid_init_reports(ghid);
return 0;
}
static void gb_hid_stop(struct hid_device *hid)
{
struct gb_hid *ghid = hid->driver_data;
gb_hid_free_buffers(ghid);
}
static int gb_hid_open(struct hid_device *hid)
{
struct gb_hid *ghid = hid->driver_data;
int ret = 0;
mutex_lock(&gb_hid_open_mutex);
if (!hid->open++) {
ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
if (ret < 0)
hid->open--;
else
set_bit(GB_HID_STARTED, &ghid->flags);
}
mutex_unlock(&gb_hid_open_mutex);
return ret;
}
static void gb_hid_close(struct hid_device *hid)
{
struct gb_hid *ghid = hid->driver_data;
int ret;
/*
* Protecting hid->open to make sure we don't restart data acquistion
* due to a resumption we no longer care about..
*/
mutex_lock(&gb_hid_open_mutex);
if (!--hid->open) {
clear_bit(GB_HID_STARTED, &ghid->flags);
/* Save some power */
ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
if (ret)
dev_err(&ghid->connection->bundle->dev,
"failed to power off (%d)\n", ret);
}
mutex_unlock(&gb_hid_open_mutex);
}
static int gb_hid_power(struct hid_device *hid, int lvl)
{
struct gb_hid *ghid = hid->driver_data;
switch (lvl) {
case PM_HINT_FULLON:
return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
case PM_HINT_NORMAL:
return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
}
return 0;
}
/* HID structure to pass callbacks */
static struct hid_ll_driver gb_hid_ll_driver = {
.parse = gb_hid_parse,
.start = gb_hid_start,
.stop = gb_hid_stop,
.open = gb_hid_open,
.close = gb_hid_close,
.power = gb_hid_power,
.raw_request = gb_hid_raw_request,
};
static int gb_hid_init(struct gb_hid *ghid)
{
struct hid_device *hid = ghid->hid;
int ret;
ret = gb_hid_get_desc(ghid);
if (ret)
return ret;
hid->version = le16_to_cpu(ghid->hdesc.bcdHID);
hid->vendor = le16_to_cpu(ghid->hdesc.wVendorID);
hid->product = le16_to_cpu(ghid->hdesc.wProductID);
hid->country = ghid->hdesc.bCountryCode;
hid->driver_data = ghid;
hid->ll_driver = &gb_hid_ll_driver;
hid->dev.parent = &ghid->connection->bundle->dev;
// hid->bus = BUS_GREYBUS; /* Need a bustype for GREYBUS in <linux/input.h> */
/* Set HID device's name */
snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
dev_name(&ghid->connection->bundle->dev),
hid->vendor, hid->product);
return 0;
}
static int gb_hid_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct hid_device *hid;
struct gb_hid *ghid;
int ret;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_HID)
return -ENODEV;
ghid = kzalloc(sizeof(*ghid), GFP_KERNEL);
if (!ghid)
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
gb_hid_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto err_free_ghid;
}
gb_connection_set_data(connection, ghid);
ghid->connection = connection;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
ret = PTR_ERR(hid);
goto err_connection_destroy;
}
ghid->hid = hid;
ghid->bundle = bundle;
greybus_set_drvdata(bundle, ghid);
ret = gb_connection_enable(connection);
if (ret)
goto err_destroy_hid;
ret = gb_hid_init(ghid);
if (ret)
goto err_connection_disable;
ret = hid_add_device(hid);
if (ret) {
hid_err(hid, "can't add hid device: %d\n", ret);
goto err_connection_disable;
}
gb_pm_runtime_put_autosuspend(bundle);
return 0;
err_connection_disable:
gb_connection_disable(connection);
err_destroy_hid:
hid_destroy_device(hid);
err_connection_destroy:
gb_connection_destroy(connection);
err_free_ghid:
kfree(ghid);
return ret;
}
static void gb_hid_disconnect(struct gb_bundle *bundle)
{
struct gb_hid *ghid = greybus_get_drvdata(bundle);
if (gb_pm_runtime_get_sync(bundle))
gb_pm_runtime_get_noresume(bundle);
hid_destroy_device(ghid->hid);
gb_connection_disable(ghid->connection);
gb_connection_destroy(ghid->connection);
kfree(ghid);
}
static const struct greybus_bundle_id gb_hid_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_HID) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_hid_id_table);
static struct greybus_driver gb_hid_driver = {
.name = "hid",
.probe = gb_hid_probe,
.disconnect = gb_hid_disconnect,
.id_table = gb_hid_id_table,
};
module_greybus_driver(gb_hid_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,343 @@
/*
* I2C bridge driver for the Greybus "generic" I2C module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include "greybus.h"
#include "gbphy.h"
struct gb_i2c_device {
struct gb_connection *connection;
struct gbphy_device *gbphy_dev;
u32 functionality;
struct i2c_adapter adapter;
};
/*
* Map Greybus i2c functionality bits into Linux ones
*/
static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
{
return gb_i2c_functionality; /* All bits the same for now */
}
static int gb_i2c_functionality_operation(struct gb_i2c_device *gb_i2c_dev)
{
struct gb_i2c_functionality_response response;
u32 functionality;
int ret;
ret = gb_operation_sync(gb_i2c_dev->connection,
GB_I2C_TYPE_FUNCTIONALITY,
NULL, 0, &response, sizeof(response));
if (ret)
return ret;
functionality = le32_to_cpu(response.functionality);
gb_i2c_dev->functionality = gb_i2c_functionality_map(functionality);
return 0;
}
/*
* Map Linux i2c_msg flags into Greybus i2c transfer op flags.
*/
static u16 gb_i2c_transfer_op_flags_map(u16 flags)
{
return flags; /* All flags the same for now */
}
static void
gb_i2c_fill_transfer_op(struct gb_i2c_transfer_op *op, struct i2c_msg *msg)
{
u16 flags = gb_i2c_transfer_op_flags_map(msg->flags);
op->addr = cpu_to_le16(msg->addr);
op->flags = cpu_to_le16(flags);
op->size = cpu_to_le16(msg->len);
}
static struct gb_operation *
gb_i2c_operation_create(struct gb_connection *connection,
struct i2c_msg *msgs, u32 msg_count)
{
struct gb_i2c_device *gb_i2c_dev = gb_connection_get_data(connection);
struct gb_i2c_transfer_request *request;
struct gb_operation *operation;
struct gb_i2c_transfer_op *op;
struct i2c_msg *msg;
u32 data_out_size = 0;
u32 data_in_size = 0;
size_t request_size;
void *data;
u16 op_count;
u32 i;
if (msg_count > (u32)U16_MAX) {
dev_err(&gb_i2c_dev->gbphy_dev->dev, "msg_count (%u) too big\n",
msg_count);
return NULL;
}
op_count = (u16)msg_count;
/*
* In addition to space for all message descriptors we need
* to have enough to hold all outbound message data.
*/
msg = msgs;
for (i = 0; i < msg_count; i++, msg++)
if (msg->flags & I2C_M_RD)
data_in_size += (u32)msg->len;
else
data_out_size += (u32)msg->len;
request_size = sizeof(*request);
request_size += msg_count * sizeof(*op);
request_size += data_out_size;
/* Response consists only of incoming data */
operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER,
request_size, data_in_size, GFP_KERNEL);
if (!operation)
return NULL;
request = operation->request->payload;
request->op_count = cpu_to_le16(op_count);
/* Fill in the ops array */
op = &request->ops[0];
msg = msgs;
for (i = 0; i < msg_count; i++)
gb_i2c_fill_transfer_op(op++, msg++);
if (!data_out_size)
return operation;
/* Copy over the outgoing data; it starts after the last op */
data = op;
msg = msgs;
for (i = 0; i < msg_count; i++) {
if (!(msg->flags & I2C_M_RD)) {
memcpy(data, msg->buf, msg->len);
data += msg->len;
}
msg++;
}
return operation;
}
static void gb_i2c_decode_response(struct i2c_msg *msgs, u32 msg_count,
struct gb_i2c_transfer_response *response)
{
struct i2c_msg *msg = msgs;
u8 *data;
u32 i;
if (!response)
return;
data = response->data;
for (i = 0; i < msg_count; i++) {
if (msg->flags & I2C_M_RD) {
memcpy(msg->buf, data, msg->len);
data += msg->len;
}
msg++;
}
}
/*
* Some i2c transfer operations return results that are expected.
*/
static bool gb_i2c_expected_transfer_error(int errno)
{
return errno == -EAGAIN || errno == -ENODEV;
}
static int gb_i2c_transfer_operation(struct gb_i2c_device *gb_i2c_dev,
struct i2c_msg *msgs, u32 msg_count)
{
struct gb_connection *connection = gb_i2c_dev->connection;
struct device *dev = &gb_i2c_dev->gbphy_dev->dev;
struct gb_operation *operation;
int ret;
operation = gb_i2c_operation_create(connection, msgs, msg_count);
if (!operation)
return -ENOMEM;
ret = gbphy_runtime_get_sync(gb_i2c_dev->gbphy_dev);
if (ret)
goto exit_operation_put;
ret = gb_operation_request_send_sync(operation);
if (!ret) {
struct gb_i2c_transfer_response *response;
response = operation->response->payload;
gb_i2c_decode_response(msgs, msg_count, response);
ret = msg_count;
} else if (!gb_i2c_expected_transfer_error(ret)) {
dev_err(dev, "transfer operation failed (%d)\n", ret);
}
gbphy_runtime_put_autosuspend(gb_i2c_dev->gbphy_dev);
exit_operation_put:
gb_operation_put(operation);
return ret;
}
static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int msg_count)
{
struct gb_i2c_device *gb_i2c_dev;
gb_i2c_dev = i2c_get_adapdata(adap);
return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
}
#if 0
/* Later */
static int gb_i2c_smbus_xfer(struct i2c_adapter *adap,
u16 addr, unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
struct gb_i2c_device *gb_i2c_dev;
gb_i2c_dev = i2c_get_adapdata(adap);
return 0;
}
#endif
static u32 gb_i2c_functionality(struct i2c_adapter *adap)
{
struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
return gb_i2c_dev->functionality;
}
static const struct i2c_algorithm gb_i2c_algorithm = {
.master_xfer = gb_i2c_master_xfer,
/* .smbus_xfer = gb_i2c_smbus_xfer, */
.functionality = gb_i2c_functionality,
};
/*
* Do initial setup of the i2c device. This includes verifying we
* can support it (based on the protocol version it advertises).
* If that's OK, we get and cached its functionality bits.
*
* Note: gb_i2c_dev->connection is assumed to have been valid.
*/
static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
{
/* Assume the functionality never changes, just get it once */
return gb_i2c_functionality_operation(gb_i2c_dev);
}
static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct gb_i2c_device *gb_i2c_dev;
struct i2c_adapter *adapter;
int ret;
gb_i2c_dev = kzalloc(sizeof(*gb_i2c_dev), GFP_KERNEL);
if (!gb_i2c_dev)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto exit_i2cdev_free;
}
gb_i2c_dev->connection = connection;
gb_connection_set_data(connection, gb_i2c_dev);
gb_i2c_dev->gbphy_dev = gbphy_dev;
gb_gbphy_set_data(gbphy_dev, gb_i2c_dev);
ret = gb_connection_enable(connection);
if (ret)
goto exit_connection_destroy;
ret = gb_i2c_device_setup(gb_i2c_dev);
if (ret)
goto exit_connection_disable;
/* Looks good; up our i2c adapter */
adapter = &gb_i2c_dev->adapter;
adapter->owner = THIS_MODULE;
adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adapter->algo = &gb_i2c_algorithm;
/* adapter->algo_data = what? */
adapter->dev.parent = &gbphy_dev->dev;
snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
i2c_set_adapdata(adapter, gb_i2c_dev);
ret = i2c_add_adapter(adapter);
if (ret)
goto exit_connection_disable;
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_i2cdev_free:
kfree(gb_i2c_dev);
return ret;
}
static void gb_i2c_remove(struct gbphy_device *gbphy_dev)
{
struct gb_i2c_device *gb_i2c_dev = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = gb_i2c_dev->connection;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
i2c_del_adapter(&gb_i2c_dev->adapter);
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(gb_i2c_dev);
}
static const struct gbphy_device_id gb_i2c_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_I2C) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_i2c_id_table);
static struct gbphy_driver i2c_driver = {
.name = "i2c",
.probe = gb_i2c_probe,
.remove = gb_i2c_remove,
.id_table = gb_i2c_id_table,
};
module_gbphy_driver(i2c_driver);
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,88 @@
/*
* Greybus Interface Block code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __INTERFACE_H
#define __INTERFACE_H
enum gb_interface_type {
GB_INTERFACE_TYPE_INVALID = 0,
GB_INTERFACE_TYPE_UNKNOWN,
GB_INTERFACE_TYPE_DUMMY,
GB_INTERFACE_TYPE_UNIPRO,
GB_INTERFACE_TYPE_GREYBUS,
};
#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
struct gb_interface {
struct device dev;
struct gb_control *control;
struct list_head bundles;
struct list_head module_node;
struct list_head manifest_descs;
u8 interface_id; /* Physical location within the Endo */
u8 device_id;
u8 features; /* Feature flags set in the manifest */
enum gb_interface_type type;
u32 ddbl1_manufacturer_id;
u32 ddbl1_product_id;
u32 vendor_id;
u32 product_id;
u64 serial_number;
struct gb_host_device *hd;
struct gb_module *module;
unsigned long quirks;
struct mutex mutex;
bool disconnected;
bool ejected;
bool removed;
bool active;
bool enabled;
bool mode_switch;
bool dme_read;
struct work_struct mode_switch_work;
struct completion mode_switch_completion;
};
#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
struct gb_interface *gb_interface_create(struct gb_module *module,
u8 interface_id);
int gb_interface_activate(struct gb_interface *intf);
void gb_interface_deactivate(struct gb_interface *intf);
int gb_interface_enable(struct gb_interface *intf);
void gb_interface_disable(struct gb_interface *intf);
int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int gb_interface_timesync_authoritative(struct gb_interface *intf,
u64 *frame_time);
int gb_interface_timesync_disable(struct gb_interface *intf);
int gb_interface_add(struct gb_interface *intf);
void gb_interface_del(struct gb_interface *intf);
void gb_interface_put(struct gb_interface *intf);
void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
u32 mailbox);
int gb_interface_request_mode_switch(struct gb_interface *intf);
#endif /* __INTERFACE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
/*
* Greybus driver for the log protocol
*
* Copyright 2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/uaccess.h>
#include "greybus.h"
struct gb_log {
struct gb_connection *connection;
};
static int gb_log_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct device *dev = &connection->bundle->dev;
struct gb_log_send_log_request *receive;
u16 len;
if (op->type != GB_LOG_TYPE_SEND_LOG) {
dev_err(dev, "unknown request type 0x%02x\n", op->type);
return -EINVAL;
}
/* Verify size of payload */
if (op->request->payload_size < sizeof(*receive)) {
dev_err(dev, "log request too small (%zu < %zu)\n",
op->request->payload_size, sizeof(*receive));
return -EINVAL;
}
receive = op->request->payload;
len = le16_to_cpu(receive->len);
if (len != (int)(op->request->payload_size - sizeof(*receive))) {
dev_err(dev, "log request wrong size %d vs %d\n", len,
(int)(op->request->payload_size - sizeof(*receive)));
return -EINVAL;
}
if (len == 0) {
dev_err(dev, "log request of 0 bytes?\n");
return -EINVAL;
}
if (len > GB_LOG_MAX_LEN) {
dev_err(dev, "log request too big: %d\n", len);
return -EINVAL;
}
/* Ensure the buffer is 0 terminated */
receive->msg[len - 1] = '\0';
/* Print with dev_dbg() so that it can be easily turned off using
* dynamic debugging (and prevent any DoS) */
dev_dbg(dev, "%s", receive->msg);
return 0;
}
static int gb_log_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_log *log;
int retval;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOG)
return -ENODEV;
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
gb_log_request_handler);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto error_free;
}
log->connection = connection;
greybus_set_drvdata(bundle, log);
retval = gb_connection_enable(connection);
if (retval)
goto error_connection_destroy;
return 0;
error_connection_destroy:
gb_connection_destroy(connection);
error_free:
kfree(log);
return retval;
}
static void gb_log_disconnect(struct gb_bundle *bundle)
{
struct gb_log *log = greybus_get_drvdata(bundle);
struct gb_connection *connection = log->connection;
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(log);
}
static const struct greybus_bundle_id gb_log_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOG) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_log_id_table);
static struct greybus_driver gb_log_driver = {
.name = "log",
.probe = gb_log_probe,
.disconnect = gb_log_disconnect,
.id_table = gb_log_id_table,
};
module_greybus_driver(gb_log_driver);
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,535 @@
/*
* Greybus manifest parsing
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
static const char *get_descriptor_type_string(u8 type)
{
switch(type) {
case GREYBUS_TYPE_INVALID:
return "invalid";
case GREYBUS_TYPE_STRING:
return "string";
case GREYBUS_TYPE_INTERFACE:
return "interface";
case GREYBUS_TYPE_CPORT:
return "cport";
case GREYBUS_TYPE_BUNDLE:
return "bundle";
default:
WARN_ON(1);
return "unknown";
}
}
/*
* We scan the manifest once to identify where all the descriptors
* are. The result is a list of these manifest_desc structures. We
* then pick through them for what we're looking for (starting with
* the interface descriptor). As each is processed we remove it from
* the list. When we're done the list should (probably) be empty.
*/
struct manifest_desc {
struct list_head links;
size_t size;
void *data;
enum greybus_descriptor_type type;
};
static void release_manifest_descriptor(struct manifest_desc *descriptor)
{
list_del(&descriptor->links);
kfree(descriptor);
}
static void release_manifest_descriptors(struct gb_interface *intf)
{
struct manifest_desc *descriptor;
struct manifest_desc *next;
list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
release_manifest_descriptor(descriptor);
}
static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
{
struct manifest_desc *desc, *tmp;
struct greybus_descriptor_cport *desc_cport;
list_for_each_entry_safe(desc, tmp, head, links) {
desc_cport = desc->data;
if (desc->type != GREYBUS_TYPE_CPORT)
continue;
if (desc_cport->bundle == bundle_id)
release_manifest_descriptor(desc);
}
}
static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
{
struct manifest_desc *descriptor;
struct manifest_desc *next;
list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
if (descriptor->type == GREYBUS_TYPE_BUNDLE)
return descriptor;
return NULL;
}
/*
* Validate the given descriptor. Its reported size must fit within
* the number of bytes remaining, and it must have a recognized
* type. Check that the reported size is at least as big as what
* we expect to see. (It could be bigger, perhaps for a new version
* of the format.)
*
* Returns the (non-zero) number of bytes consumed by the descriptor,
* or a negative errno.
*/
static int identify_descriptor(struct gb_interface *intf,
struct greybus_descriptor *desc, size_t size)
{
struct greybus_descriptor_header *desc_header = &desc->header;
struct manifest_desc *descriptor;
size_t desc_size;
size_t expected_size;
if (size < sizeof(*desc_header)) {
dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
size, sizeof(*desc_header));
return -EINVAL; /* Must at least have header */
}
desc_size = le16_to_cpu(desc_header->size);
if (desc_size > size) {
dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
desc_size, size);
return -EINVAL;
}
/* Descriptor needs to at least have a header */
expected_size = sizeof(*desc_header);
switch (desc_header->type) {
case GREYBUS_TYPE_STRING:
expected_size += sizeof(struct greybus_descriptor_string);
expected_size += desc->string.length;
/* String descriptors are padded to 4 byte boundaries */
expected_size = ALIGN(expected_size, 4);
break;
case GREYBUS_TYPE_INTERFACE:
expected_size += sizeof(struct greybus_descriptor_interface);
break;
case GREYBUS_TYPE_BUNDLE:
expected_size += sizeof(struct greybus_descriptor_bundle);
break;
case GREYBUS_TYPE_CPORT:
expected_size += sizeof(struct greybus_descriptor_cport);
break;
case GREYBUS_TYPE_INVALID:
default:
dev_err(&intf->dev, "invalid descriptor type (%u)\n",
desc_header->type);
return -EINVAL;
}
if (desc_size < expected_size) {
dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
get_descriptor_type_string(desc_header->type),
desc_size, expected_size);
return -EINVAL;
}
/* Descriptor bigger than what we expect */
if (desc_size > expected_size) {
dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
get_descriptor_type_string(desc_header->type),
expected_size, desc_size);
}
descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
if (!descriptor)
return -ENOMEM;
descriptor->size = desc_size;
descriptor->data = (char *)desc + sizeof(*desc_header);
descriptor->type = desc_header->type;
list_add_tail(&descriptor->links, &intf->manifest_descs);
/* desc_size is positive and is known to fit in a signed int */
return desc_size;
}
/*
* Find the string descriptor having the given id, validate it, and
* allocate a duplicate copy of it. The duplicate has an extra byte
* which guarantees the returned string is NUL-terminated.
*
* String index 0 is valid (it represents "no string"), and for
* that a null pointer is returned.
*
* Otherwise returns a pointer to a newly-allocated copy of the
* descriptor string, or an error-coded pointer on failure.
*/
static char *gb_string_get(struct gb_interface *intf, u8 string_id)
{
struct greybus_descriptor_string *desc_string;
struct manifest_desc *descriptor;
bool found = false;
char *string;
/* A zero string id means no string (but no error) */
if (!string_id)
return NULL;
list_for_each_entry(descriptor, &intf->manifest_descs, links) {
if (descriptor->type != GREYBUS_TYPE_STRING)
continue;
desc_string = descriptor->data;
if (desc_string->id == string_id) {
found = true;
break;
}
}
if (!found)
return ERR_PTR(-ENOENT);
/* Allocate an extra byte so we can guarantee it's NUL-terminated */
string = kmemdup(&desc_string->string, desc_string->length + 1,
GFP_KERNEL);
if (!string)
return ERR_PTR(-ENOMEM);
string[desc_string->length] = '\0';
/* Ok we've used this string, so we're done with it */
release_manifest_descriptor(descriptor);
return string;
}
/*
* Find cport descriptors in the manifest associated with the given
* bundle, and set up data structures for the functions that use
* them. Returns the number of cports set up for the bundle, or 0
* if there is an error.
*/
static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
{
struct gb_interface *intf = bundle->intf;
struct greybus_descriptor_cport *desc_cport;
struct manifest_desc *desc, *next, *tmp;
LIST_HEAD(list);
u8 bundle_id = bundle->id;
u16 cport_id;
u32 count = 0;
int i;
/* Set up all cport descriptors associated with this bundle */
list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
if (desc->type != GREYBUS_TYPE_CPORT)
continue;
desc_cport = desc->data;
if (desc_cport->bundle != bundle_id)
continue;
cport_id = le16_to_cpu(desc_cport->id);
if (cport_id > CPORT_ID_MAX)
goto exit;
/* Nothing else should have its cport_id as control cport id */
if (cport_id == GB_CONTROL_CPORT_ID) {
dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
cport_id);
goto exit;
}
/*
* Found one, move it to our temporary list after checking for
* duplicates.
*/
list_for_each_entry(tmp, &list, links) {
desc_cport = tmp->data;
if (cport_id == le16_to_cpu(desc_cport->id)) {
dev_err(&bundle->dev,
"duplicate CPort %u found\n",
cport_id);
goto exit;
}
}
list_move_tail(&desc->links, &list);
count++;
}
if (!count)
return 0;
bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
GFP_KERNEL);
if (!bundle->cport_desc)
goto exit;
bundle->num_cports = count;
i = 0;
list_for_each_entry_safe(desc, next, &list, links) {
desc_cport = desc->data;
memcpy(&bundle->cport_desc[i++], desc_cport,
sizeof(*desc_cport));
/* Release the cport descriptor */
release_manifest_descriptor(desc);
}
return count;
exit:
release_cport_descriptors(&list, bundle_id);
/*
* Free all cports for this bundle to avoid 'excess descriptors'
* warnings.
*/
release_cport_descriptors(&intf->manifest_descs, bundle_id);
return 0; /* Error; count should also be 0 */
}
/*
* Find bundle descriptors in the manifest and set up their data
* structures. Returns the number of bundles set up for the
* given interface.
*/
static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
{
struct manifest_desc *desc;
struct gb_bundle *bundle;
struct gb_bundle *bundle_next;
u32 count = 0;
u8 bundle_id;
u8 class;
while ((desc = get_next_bundle_desc(intf))) {
struct greybus_descriptor_bundle *desc_bundle;
/* Found one. Set up its bundle structure*/
desc_bundle = desc->data;
bundle_id = desc_bundle->id;
class = desc_bundle->class;
/* Done with this bundle descriptor */
release_manifest_descriptor(desc);
/* Ignore any legacy control bundles */
if (bundle_id == GB_CONTROL_BUNDLE_ID) {
dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
__func__);
release_cport_descriptors(&intf->manifest_descs,
bundle_id);
continue;
}
/* Nothing else should have its class set to control class */
if (class == GREYBUS_CLASS_CONTROL) {
dev_err(&intf->dev,
"bundle %u cannot use control class\n",
bundle_id);
goto cleanup;
}
bundle = gb_bundle_create(intf, bundle_id, class);
if (!bundle)
goto cleanup;
/*
* Now go set up this bundle's functions and cports.
*
* A 'bundle' represents a device in greybus. It may require
* multiple cports for its functioning. If we fail to setup any
* cport of a bundle, we better reject the complete bundle as
* the device may not be able to function properly then.
*
* But, failing to setup a cport of bundle X doesn't mean that
* the device corresponding to bundle Y will not work properly.
* Bundles should be treated as separate independent devices.
*
* While parsing manifest for an interface, treat bundles as
* separate entities and don't reject entire interface and its
* bundles on failing to initialize a cport. But make sure the
* bundle which needs the cport, gets destroyed properly.
*/
if (!gb_manifest_parse_cports(bundle)) {
gb_bundle_destroy(bundle);
continue;
}
count++;
}
return count;
cleanup:
/* An error occurred; undo any changes we've made */
list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
gb_bundle_destroy(bundle);
count--;
}
return 0; /* Error; count should also be 0 */
}
static bool gb_manifest_parse_interface(struct gb_interface *intf,
struct manifest_desc *interface_desc)
{
struct greybus_descriptor_interface *desc_intf = interface_desc->data;
struct gb_control *control = intf->control;
char *str;
/* Handle the strings first--they can fail */
str = gb_string_get(intf, desc_intf->vendor_stringid);
if (IS_ERR(str))
return false;
control->vendor_string = str;
str = gb_string_get(intf, desc_intf->product_stringid);
if (IS_ERR(str))
goto out_free_vendor_string;
control->product_string = str;
/* Assign feature flags communicated via manifest */
intf->features = desc_intf->features;
/* Release the interface descriptor, now that we're done with it */
release_manifest_descriptor(interface_desc);
/* An interface must have at least one bundle descriptor */
if (!gb_manifest_parse_bundles(intf)) {
dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
goto out_err;
}
return true;
out_err:
kfree(control->product_string);
control->product_string = NULL;
out_free_vendor_string:
kfree(control->vendor_string);
control->vendor_string = NULL;
return false;
}
/*
* Parse a buffer containing an interface manifest.
*
* If we find anything wrong with the content/format of the buffer
* we reject it.
*
* The first requirement is that the manifest's version is
* one we can parse.
*
* We make an initial pass through the buffer and identify all of
* the descriptors it contains, keeping track for each its type
* and the location size of its data in the buffer.
*
* Next we scan the descriptors, looking for an interface descriptor;
* there must be exactly one of those. When found, we record the
* information it contains, and then remove that descriptor (and any
* string descriptors it refers to) from further consideration.
*
* After that we look for the interface's bundles--there must be at
* least one of those.
*
* Returns true if parsing was successful, false otherwise.
*/
bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
{
struct greybus_manifest *manifest;
struct greybus_manifest_header *header;
struct greybus_descriptor *desc;
struct manifest_desc *descriptor;
struct manifest_desc *interface_desc = NULL;
u16 manifest_size;
u32 found = 0;
bool result;
/* Manifest descriptor list should be empty here */
if (WARN_ON(!list_empty(&intf->manifest_descs)))
return false;
/* we have to have at _least_ the manifest header */
if (size < sizeof(*header)) {
dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
size, sizeof(*header));
return false;
}
/* Make sure the size is right */
manifest = data;
header = &manifest->header;
manifest_size = le16_to_cpu(header->size);
if (manifest_size != size) {
dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
size, manifest_size);
return false;
}
/* Validate major/minor number */
if (header->version_major > GREYBUS_VERSION_MAJOR) {
dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
header->version_major, header->version_minor,
GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
return false;
}
/* OK, find all the descriptors */
desc = manifest->descriptors;
size -= sizeof(*header);
while (size) {
int desc_size;
desc_size = identify_descriptor(intf, desc, size);
if (desc_size < 0) {
result = false;
goto out;
}
desc = (struct greybus_descriptor *)((char *)desc + desc_size);
size -= desc_size;
}
/* There must be a single interface descriptor */
list_for_each_entry(descriptor, &intf->manifest_descs, links) {
if (descriptor->type == GREYBUS_TYPE_INTERFACE)
if (!found++)
interface_desc = descriptor;
}
if (found != 1) {
dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
found);
result = false;
goto out;
}
/* Parse the manifest, starting with the interface descriptor */
result = gb_manifest_parse_interface(intf, interface_desc);
/*
* We really should have no remaining descriptors, but we
* don't know what newer format manifests might leave.
*/
if (result && !list_empty(&intf->manifest_descs))
dev_info(&intf->dev, "excess descriptors in interface manifest\n");
out:
release_manifest_descriptors(intf);
return result;
}

View File

@ -0,0 +1,16 @@
/*
* Greybus manifest parsing
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __MANIFEST_H
#define __MANIFEST_H
struct gb_interface;
bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
#endif /* __MANIFEST_H */

View File

@ -0,0 +1,238 @@
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_trace.h"
static ssize_t eject_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct gb_module *module = to_gb_module(dev);
struct gb_interface *intf;
size_t i;
long val;
int ret;
ret = kstrtol(buf, 0, &val);
if (ret)
return ret;
if (!val)
return len;
for (i = 0; i < module->num_interfaces; ++i) {
intf = module->interfaces[i];
mutex_lock(&intf->mutex);
/* Set flag to prevent concurrent activation. */
intf->ejected = true;
gb_interface_disable(intf);
gb_interface_deactivate(intf);
mutex_unlock(&intf->mutex);
}
/* Tell the SVC to eject the primary interface. */
ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
if (ret)
return ret;
return len;
}
static DEVICE_ATTR_WO(eject);
static ssize_t module_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_module *module = to_gb_module(dev);
return sprintf(buf, "%u\n", module->module_id);
}
static DEVICE_ATTR_RO(module_id);
static ssize_t num_interfaces_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_module *module = to_gb_module(dev);
return sprintf(buf, "%zu\n", module->num_interfaces);
}
static DEVICE_ATTR_RO(num_interfaces);
static struct attribute *module_attrs[] = {
&dev_attr_eject.attr,
&dev_attr_module_id.attr,
&dev_attr_num_interfaces.attr,
NULL,
};
ATTRIBUTE_GROUPS(module);
static void gb_module_release(struct device *dev)
{
struct gb_module *module = to_gb_module(dev);
trace_gb_module_release(module);
kfree(module);
}
struct device_type greybus_module_type = {
.name = "greybus_module",
.release = gb_module_release,
};
struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
size_t num_interfaces)
{
struct gb_interface *intf;
struct gb_module *module;
int i;
module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
GFP_KERNEL);
if (!module)
return NULL;
module->hd = hd;
module->module_id = module_id;
module->num_interfaces = num_interfaces;
module->dev.parent = &hd->dev;
module->dev.bus = &greybus_bus_type;
module->dev.type = &greybus_module_type;
module->dev.groups = module_groups;
module->dev.dma_mask = hd->dev.dma_mask;
device_initialize(&module->dev);
dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
trace_gb_module_create(module);
for (i = 0; i < num_interfaces; ++i) {
intf = gb_interface_create(module, module_id + i);
if (!intf) {
dev_err(&module->dev, "failed to create interface %u\n",
module_id + i);
goto err_put_interfaces;
}
module->interfaces[i] = intf;
}
return module;
err_put_interfaces:
for (--i; i > 0; --i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
return NULL;
}
/*
* Register and enable an interface after first attempting to activate it.
*/
static void gb_module_register_interface(struct gb_interface *intf)
{
struct gb_module *module = intf->module;
u8 intf_id = intf->interface_id;
int ret;
mutex_lock(&intf->mutex);
ret = gb_interface_activate(intf);
if (ret) {
if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
dev_err(&module->dev,
"failed to activate interface %u: %d\n",
intf_id, ret);
}
gb_interface_add(intf);
goto err_unlock;
}
ret = gb_interface_add(intf);
if (ret)
goto err_interface_deactivate;
ret = gb_interface_enable(intf);
if (ret) {
dev_err(&module->dev, "failed to enable interface %u: %d\n",
intf_id, ret);
goto err_interface_deactivate;
}
mutex_unlock(&intf->mutex);
return;
err_interface_deactivate:
gb_interface_deactivate(intf);
err_unlock:
mutex_unlock(&intf->mutex);
}
static void gb_module_deregister_interface(struct gb_interface *intf)
{
/* Mark as disconnected to prevent I/O during disable. */
if (intf->module->disconnected)
intf->disconnected = true;
mutex_lock(&intf->mutex);
intf->removed = true;
gb_interface_disable(intf);
gb_interface_deactivate(intf);
mutex_unlock(&intf->mutex);
gb_interface_del(intf);
}
/* Register a module and its interfaces. */
int gb_module_add(struct gb_module *module)
{
size_t i;
int ret;
ret = device_add(&module->dev);
if (ret) {
dev_err(&module->dev, "failed to register module: %d\n", ret);
return ret;
}
trace_gb_module_add(module);
for (i = 0; i < module->num_interfaces; ++i)
gb_module_register_interface(module->interfaces[i]);
return 0;
}
/* Deregister a module and its interfaces. */
void gb_module_del(struct gb_module *module)
{
size_t i;
for (i = 0; i < module->num_interfaces; ++i)
gb_module_deregister_interface(module->interfaces[i]);
trace_gb_module_del(module);
device_del(&module->dev);
}
void gb_module_put(struct gb_module *module)
{
size_t i;
for (i = 0; i < module->num_interfaces; ++i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
}

View File

@ -0,0 +1,34 @@
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __MODULE_H
#define __MODULE_H
struct gb_module {
struct device dev;
struct gb_host_device *hd;
struct list_head hd_node;
u8 module_id;
size_t num_interfaces;
bool disconnected;
struct gb_interface *interfaces[0];
};
#define to_gb_module(d) container_of(d, struct gb_module, dev)
struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
size_t num_interfaces);
int gb_module_add(struct gb_module *module);
void gb_module_del(struct gb_module *module);
void gb_module_put(struct gb_module *module);
#endif /* __MODULE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,210 @@
/*
* Greybus operations
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __OPERATION_H
#define __OPERATION_H
#include <linux/completion.h>
struct gb_operation;
/* The default amount of time a request is given to complete */
#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
/*
* The top bit of the type in an operation message header indicates
* whether the message is a request (bit clear) or response (bit set)
*/
#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
enum gb_operation_result {
GB_OP_SUCCESS = 0x00,
GB_OP_INTERRUPTED = 0x01,
GB_OP_TIMEOUT = 0x02,
GB_OP_NO_MEMORY = 0x03,
GB_OP_PROTOCOL_BAD = 0x04,
GB_OP_OVERFLOW = 0x05,
GB_OP_INVALID = 0x06,
GB_OP_RETRY = 0x07,
GB_OP_NONEXISTENT = 0x08,
GB_OP_UNKNOWN_ERROR = 0xfe,
GB_OP_MALFUNCTION = 0xff,
};
#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
/*
* Protocol code should only examine the payload and payload_size fields, and
* host-controller drivers may use the hcpriv field. All other fields are
* intended to be private to the operations core code.
*/
struct gb_message {
struct gb_operation *operation;
struct gb_operation_msg_hdr *header;
void *payload;
size_t payload_size;
void *buffer;
void *hcpriv;
};
#define GB_OPERATION_FLAG_INCOMING BIT(0)
#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
#define GB_OPERATION_FLAG_CORE BIT(3)
#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
GB_OPERATION_FLAG_UNIDIRECTIONAL)
/*
* A Greybus operation is a remote procedure call performed over a
* connection between two UniPro interfaces.
*
* Every operation consists of a request message sent to the other
* end of the connection coupled with a reply message returned to
* the sender. Every operation has a type, whose interpretation is
* dependent on the protocol associated with the connection.
*
* Only four things in an operation structure are intended to be
* directly usable by protocol handlers: the operation's connection
* pointer; the operation type; the request message payload (and
* size); and the response message payload (and size). Note that a
* message with a 0-byte payload has a null message payload pointer.
*
* In addition, every operation has a result, which is an errno
* value. Protocol handlers access the operation result using
* gb_operation_result().
*/
typedef void (*gb_operation_callback)(struct gb_operation *);
struct gb_operation {
struct gb_connection *connection;
struct gb_message *request;
struct gb_message *response;
unsigned long flags;
u8 type;
u16 id;
int errno; /* Operation result */
struct work_struct work;
gb_operation_callback callback;
struct completion completion;
struct kref kref;
atomic_t waiters;
int active;
struct list_head links; /* connection->operations */
};
static inline bool
gb_operation_is_incoming(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_INCOMING;
}
static inline bool
gb_operation_is_unidirectional(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
}
static inline bool
gb_operation_short_response_allowed(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
}
static inline bool gb_operation_is_core(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_CORE;
}
void gb_connection_recv(struct gb_connection *connection,
void *data, size_t size);
int gb_operation_result(struct gb_operation *operation);
size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
struct gb_operation *
gb_operation_create_flags(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, unsigned long flags,
gfp_t gfp);
static inline struct gb_operation *
gb_operation_create(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, gfp_t gfp)
{
return gb_operation_create_flags(connection, type, request_size,
response_size, 0, gfp);
}
struct gb_operation *
gb_operation_create_core(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, unsigned long flags,
gfp_t gfp);
void gb_operation_get(struct gb_operation *operation);
void gb_operation_put(struct gb_operation *operation);
bool gb_operation_response_alloc(struct gb_operation *operation,
size_t response_size, gfp_t gfp);
int gb_operation_request_send(struct gb_operation *operation,
gb_operation_callback callback,
gfp_t gfp);
int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
unsigned int timeout);
static inline int
gb_operation_request_send_sync(struct gb_operation *operation)
{
return gb_operation_request_send_sync_timeout(operation,
GB_OPERATION_TIMEOUT_DEFAULT);
}
void gb_operation_cancel(struct gb_operation *operation, int errno);
void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
void greybus_message_sent(struct gb_host_device *hd,
struct gb_message *message, int status);
int gb_operation_sync_timeout(struct gb_connection *connection, int type,
void *request, int request_size,
void *response, int response_size,
unsigned int timeout);
int gb_operation_unidirectional_timeout(struct gb_connection *connection,
int type, void *request, int request_size,
unsigned int timeout);
static inline int gb_operation_sync(struct gb_connection *connection, int type,
void *request, int request_size,
void *response, int response_size)
{
return gb_operation_sync_timeout(connection, type,
request, request_size, response, response_size,
GB_OPERATION_TIMEOUT_DEFAULT);
}
static inline int gb_operation_unidirectional(struct gb_connection *connection,
int type, void *request, int request_size)
{
return gb_operation_unidirectional_timeout(connection, type,
request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
}
int gb_operation_init(void);
void gb_operation_exit(void);
#endif /* !__OPERATION_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,338 @@
/*
* PWM Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pwm.h>
#include "greybus.h"
#include "gbphy.h"
struct gb_pwm_chip {
struct gb_connection *connection;
u8 pwm_max; /* max pwm number */
struct pwm_chip chip;
struct pwm_chip *pwm;
};
#define pwm_chip_to_gb_pwm_chip(chip) \
container_of(chip, struct gb_pwm_chip, chip)
static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
{
struct gb_pwm_count_response response;
int ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
NULL, 0, &response, sizeof(response));
if (ret)
return ret;
pwmc->pwm_max = response.count;
return 0;
}
static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
u8 which)
{
struct gb_pwm_activate_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ACTIVATE,
&request, sizeof(request), NULL, 0);
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
u8 which)
{
struct gb_pwm_deactivate_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DEACTIVATE,
&request, sizeof(request), NULL, 0);
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
u8 which, u32 duty, u32 period)
{
struct gb_pwm_config_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
request.duty = cpu_to_le32(duty);
request.period = cpu_to_le32(period);
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_CONFIG,
&request, sizeof(request), NULL, 0);
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
u8 which, u8 polarity)
{
struct gb_pwm_polarity_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
request.polarity = polarity;
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_POLARITY,
&request, sizeof(request), NULL, 0);
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
u8 which)
{
struct gb_pwm_enable_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ENABLE,
&request, sizeof(request), NULL, 0);
if (ret)
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
u8 which)
{
struct gb_pwm_disable_request request;
struct gbphy_device *gbphy_dev;
int ret;
if (which > pwmc->pwm_max)
return -EINVAL;
request.which = which;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
&request, sizeof(request), NULL, 0);
gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
};
static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
if (pwm_is_enabled(pwm))
dev_warn(chip->dev, "freeing PWM device without disabling\n");
gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
}
static int gb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
return gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_ns, period_ns);
};
static int gb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
return gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, polarity);
};
static int gb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
};
static void gb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
gb_pwm_disable_operation(pwmc, pwm->hwpwm);
};
static const struct pwm_ops gb_pwm_ops = {
.request = gb_pwm_request,
.free = gb_pwm_free,
.config = gb_pwm_config,
.set_polarity = gb_pwm_set_polarity,
.enable = gb_pwm_enable,
.disable = gb_pwm_disable,
.owner = THIS_MODULE,
};
static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct gb_pwm_chip *pwmc;
struct pwm_chip *pwm;
int ret;
pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
if (!pwmc)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto exit_pwmc_free;
}
pwmc->connection = connection;
gb_connection_set_data(connection, pwmc);
gb_gbphy_set_data(gbphy_dev, pwmc);
ret = gb_connection_enable(connection);
if (ret)
goto exit_connection_destroy;
/* Query number of pwms present */
ret = gb_pwm_count_operation(pwmc);
if (ret)
goto exit_connection_disable;
pwm = &pwmc->chip;
pwm->dev = &gbphy_dev->dev;
pwm->ops = &gb_pwm_ops;
pwm->base = -1; /* Allocate base dynamically */
pwm->npwm = pwmc->pwm_max + 1;
pwm->can_sleep = true; /* FIXME */
ret = pwmchip_add(pwm);
if (ret) {
dev_err(&gbphy_dev->dev,
"failed to register PWM: %d\n", ret);
goto exit_connection_disable;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_pwmc_free:
kfree(pwmc);
return ret;
}
static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
{
struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = pwmc->connection;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
pwmchip_remove(&pwmc->chip);
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(pwmc);
}
static const struct gbphy_device_id gb_pwm_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_PWM) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_pwm_id_table);
static struct gbphy_driver pwm_driver = {
.name = "pwm",
.probe = gb_pwm_probe,
.remove = gb_pwm_remove,
.id_table = gb_pwm_id_table,
};
module_gbphy_driver(pwm_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,381 @@
/*
* Greybus driver for the Raw protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/uaccess.h>
#include "greybus.h"
struct gb_raw {
struct gb_connection *connection;
struct list_head list;
int list_data;
struct mutex list_lock;
dev_t dev;
struct cdev cdev;
struct device *device;
};
struct raw_data {
struct list_head entry;
u32 len;
u8 data[0];
};
static struct class *raw_class;
static int raw_major;
static const struct file_operations raw_fops;
static DEFINE_IDA(minors);
/* Number of minor devices this driver supports */
#define NUM_MINORS 256
/* Maximum size of any one send data buffer we support */
#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
/*
* Maximum size of the data in the receive buffer we allow before we start to
* drop messages on the floor
*/
#define MAX_DATA_SIZE (MAX_PACKET_SIZE * 8)
/*
* Add the raw data message to the list of received messages.
*/
static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
{
struct raw_data *raw_data;
struct device *dev = &raw->connection->bundle->dev;
int retval = 0;
if (len > MAX_PACKET_SIZE) {
dev_err(dev, "Too big of a data packet, rejected\n");
return -EINVAL;
}
mutex_lock(&raw->list_lock);
if ((raw->list_data + len) > MAX_DATA_SIZE) {
dev_err(dev, "Too much data in receive buffer, now dropping packets\n");
retval = -EINVAL;
goto exit;
}
raw_data = kmalloc(sizeof(*raw_data) + len, GFP_KERNEL);
if (!raw_data) {
retval = -ENOMEM;
goto exit;
}
raw->list_data += len;
raw_data->len = len;
memcpy(&raw_data->data[0], data, len);
list_add_tail(&raw_data->entry, &raw->list);
exit:
mutex_unlock(&raw->list_lock);
return retval;
}
static int gb_raw_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct device *dev = &connection->bundle->dev;
struct gb_raw *raw = greybus_get_drvdata(connection->bundle);
struct gb_raw_send_request *receive;
u32 len;
if (op->type != GB_RAW_TYPE_SEND) {
dev_err(dev, "unknown request type 0x%02x\n", op->type);
return -EINVAL;
}
/* Verify size of payload */
if (op->request->payload_size < sizeof(*receive)) {
dev_err(dev, "raw receive request too small (%zu < %zu)\n",
op->request->payload_size, sizeof(*receive));
return -EINVAL;
}
receive = op->request->payload;
len = le32_to_cpu(receive->len);
if (len != (int)(op->request->payload_size - sizeof(__le32))) {
dev_err(dev, "raw receive request wrong size %d vs %d\n", len,
(int)(op->request->payload_size - sizeof(__le32)));
return -EINVAL;
}
if (len == 0) {
dev_err(dev, "raw receive request of 0 bytes?\n");
return -EINVAL;
}
return receive_data(raw, len, receive->data);
}
static int gb_raw_send(struct gb_raw *raw, u32 len, const char __user *data)
{
struct gb_connection *connection = raw->connection;
struct gb_raw_send_request *request;
int retval;
request = kmalloc(len + sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
if (copy_from_user(&request->data[0], data, len)) {
kfree(request);
return -EFAULT;
}
request->len = cpu_to_le32(len);
retval = gb_operation_sync(connection, GB_RAW_TYPE_SEND,
request, len + sizeof(*request),
NULL, 0);
kfree(request);
return retval;
}
static int gb_raw_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_raw *raw;
int retval;
int minor;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_RAW)
return -ENODEV;
raw = kzalloc(sizeof(*raw), GFP_KERNEL);
if (!raw)
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
gb_raw_request_handler);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto error_free;
}
INIT_LIST_HEAD(&raw->list);
mutex_init(&raw->list_lock);
raw->connection = connection;
greybus_set_drvdata(bundle, raw);
minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
if (minor < 0) {
retval = minor;
goto error_connection_destroy;
}
raw->dev = MKDEV(raw_major, minor);
cdev_init(&raw->cdev, &raw_fops);
retval = gb_connection_enable(connection);
if (retval)
goto error_remove_ida;
retval = cdev_add(&raw->cdev, raw->dev, 1);
if (retval)
goto error_connection_disable;
raw->device = device_create(raw_class, &connection->bundle->dev,
raw->dev, raw, "gb!raw%d", minor);
if (IS_ERR(raw->device)) {
retval = PTR_ERR(raw->device);
goto error_del_cdev;
}
return 0;
error_del_cdev:
cdev_del(&raw->cdev);
error_connection_disable:
gb_connection_disable(connection);
error_remove_ida:
ida_simple_remove(&minors, minor);
error_connection_destroy:
gb_connection_destroy(connection);
error_free:
kfree(raw);
return retval;
}
static void gb_raw_disconnect(struct gb_bundle *bundle)
{
struct gb_raw *raw = greybus_get_drvdata(bundle);
struct gb_connection *connection = raw->connection;
struct raw_data *raw_data;
struct raw_data *temp;
// FIXME - handle removing a connection when the char device node is open.
device_destroy(raw_class, raw->dev);
cdev_del(&raw->cdev);
gb_connection_disable(connection);
ida_simple_remove(&minors, MINOR(raw->dev));
gb_connection_destroy(connection);
mutex_lock(&raw->list_lock);
list_for_each_entry_safe(raw_data, temp, &raw->list, entry) {
list_del(&raw_data->entry);
kfree(raw_data);
}
mutex_unlock(&raw->list_lock);
kfree(raw);
}
/*
* Character device node interfaces.
*
* Note, we are using read/write to only allow a single read/write per message.
* This means for read(), you have to provide a big enough buffer for the full
* message to be copied into. If the buffer isn't big enough, the read() will
* fail with -ENOSPC.
*/
static int raw_open(struct inode *inode, struct file *file)
{
struct cdev *cdev = inode->i_cdev;
struct gb_raw *raw = container_of(cdev, struct gb_raw, cdev);
file->private_data = raw;
return 0;
}
static ssize_t raw_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct gb_raw *raw = file->private_data;
int retval;
if (!count)
return 0;
if (count > MAX_PACKET_SIZE)
return -E2BIG;
retval = gb_raw_send(raw, count, buf);
if (retval)
return retval;
return count;
}
static ssize_t raw_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct gb_raw *raw = file->private_data;
int retval = 0;
struct raw_data *raw_data;
mutex_lock(&raw->list_lock);
if (list_empty(&raw->list))
goto exit;
raw_data = list_first_entry(&raw->list, struct raw_data, entry);
if (raw_data->len > count) {
retval = -ENOSPC;
goto exit;
}
if (copy_to_user(buf, &raw_data->data[0], raw_data->len)) {
retval = -EFAULT;
goto exit;
}
list_del(&raw_data->entry);
raw->list_data -= raw_data->len;
retval = raw_data->len;
kfree(raw_data);
exit:
mutex_unlock(&raw->list_lock);
return retval;
}
static const struct file_operations raw_fops = {
.owner = THIS_MODULE,
.write = raw_write,
.read = raw_read,
.open = raw_open,
.llseek = noop_llseek,
};
static const struct greybus_bundle_id gb_raw_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_RAW) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_raw_id_table);
static struct greybus_driver gb_raw_driver = {
.name = "raw",
.probe = gb_raw_probe,
.disconnect = gb_raw_disconnect,
.id_table = gb_raw_id_table,
};
static int raw_init(void)
{
dev_t dev;
int retval;
raw_class = class_create(THIS_MODULE, "gb_raw");
if (IS_ERR(raw_class)) {
retval = PTR_ERR(raw_class);
goto error_class;
}
retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
if (retval < 0)
goto error_chrdev;
raw_major = MAJOR(dev);
retval = greybus_register(&gb_raw_driver);
if (retval)
goto error_gb;
return 0;
error_gb:
unregister_chrdev_region(dev, NUM_MINORS);
error_chrdev:
class_destroy(raw_class);
error_class:
return retval;
}
module_init(raw_init);
static void __exit raw_exit(void)
{
greybus_deregister(&gb_raw_driver);
unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
class_destroy(raw_class);
ida_destroy(&minors);
}
module_exit(raw_exit);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,884 @@
/*
* SD/MMC Greybus driver.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include "greybus.h"
#include "gbphy.h"
struct gb_sdio_host {
struct gb_connection *connection;
struct gbphy_device *gbphy_dev;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mutex lock; /* lock for this host */
size_t data_max;
spinlock_t xfer; /* lock to cancel ongoing transfer */
bool xfer_stop;
struct workqueue_struct *mrq_workqueue;
struct work_struct mrqwork;
u8 queued_events;
bool removed;
bool card_present;
bool read_only;
};
#define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
GB_SDIO_RSP_OPCODE)
#define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
#define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
GB_SDIO_RSP_136)
#define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
/* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
#define GB_SDIO_VDD_SHIFT 8
#ifndef MMC_CAP2_CORE_RUNTIME_PM
#define MMC_CAP2_CORE_RUNTIME_PM 0
#endif
static inline bool single_op(struct mmc_command *cmd)
{
uint32_t opcode = cmd->opcode;
return opcode == MMC_WRITE_BLOCK ||
opcode == MMC_READ_SINGLE_BLOCK;
}
static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
{
u32 caps = 0;
u32 caps2 = 0;
caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
host->mmc->caps = caps;
host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
if (caps & MMC_CAP_NONREMOVABLE)
host->card_present = true;
}
static u32 _gb_sdio_get_host_ocr(u32 ocr)
{
return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
);
}
static int gb_sdio_get_caps(struct gb_sdio_host *host)
{
struct gb_sdio_get_caps_response response;
struct mmc_host *mmc = host->mmc;
u16 data_max;
u32 blksz;
u32 ocr;
u32 r;
int ret;
ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
NULL, 0, &response, sizeof(response));
if (ret < 0)
return ret;
r = le32_to_cpu(response.caps);
_gb_sdio_set_host_caps(host, r);
/* get the max block size that could fit our payload */
data_max = gb_operation_get_payload_size_max(host->connection);
data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
data_max - sizeof(struct gb_sdio_transfer_response));
blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
blksz = max_t(u32, 512, blksz);
mmc->max_blk_size = rounddown_pow_of_two(blksz);
mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
host->data_max = data_max;
/* get ocr supported values */
ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
mmc->ocr_avail = ocr;
mmc->ocr_avail_sdio = mmc->ocr_avail;
mmc->ocr_avail_sd = mmc->ocr_avail;
mmc->ocr_avail_mmc = mmc->ocr_avail;
/* get frequency range values */
mmc->f_min = le32_to_cpu(response.f_min);
mmc->f_max = le32_to_cpu(response.f_max);
return 0;
}
static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
{
if (event & GB_SDIO_CARD_INSERTED)
host->queued_events &= ~GB_SDIO_CARD_REMOVED;
else if (event & GB_SDIO_CARD_REMOVED)
host->queued_events &= ~GB_SDIO_CARD_INSERTED;
host->queued_events |= event;
}
static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
{
u8 state_changed = 0;
if (event & GB_SDIO_CARD_INSERTED) {
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (host->card_present)
return 0;
host->card_present = true;
state_changed = 1;
}
if (event & GB_SDIO_CARD_REMOVED) {
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (!(host->card_present))
return 0;
host->card_present = false;
state_changed = 1;
}
if (event & GB_SDIO_WP) {
host->read_only = true;
}
if (state_changed) {
dev_info(mmc_dev(host->mmc), "card %s now event\n",
(host->card_present ? "inserted" : "removed"));
mmc_detect_change(host->mmc, 0);
}
return 0;
}
static int gb_sdio_request_handler(struct gb_operation *op)
{
struct gb_sdio_host *host = gb_connection_get_data(op->connection);
struct gb_message *request;
struct gb_sdio_event_request *payload;
u8 type = op->type;
int ret = 0;
u8 event;
if (type != GB_SDIO_TYPE_EVENT) {
dev_err(mmc_dev(host->mmc),
"unsupported unsolicited event: %u\n", type);
return -EINVAL;
}
request = op->request;
if (request->payload_size < sizeof(*payload)) {
dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
request->payload_size, sizeof(*payload));
return -EINVAL;
}
payload = request->payload;
event = payload->event;
if (host->removed)
_gb_queue_event(host, event);
else
ret = _gb_sdio_process_events(host, event);
return ret;
}
static int gb_sdio_set_ios(struct gb_sdio_host *host,
struct gb_sdio_set_ios_request *request)
{
int ret;
ret = gbphy_runtime_get_sync(host->gbphy_dev);
if (ret)
return ret;
ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
sizeof(*request), NULL, 0);
gbphy_runtime_put_autosuspend(host->gbphy_dev);
return ret;
}
static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
size_t len, u16 nblocks, off_t skip)
{
struct gb_sdio_transfer_request *request;
struct gb_sdio_transfer_response *response;
struct gb_operation *operation;
struct scatterlist *sg = data->sg;
unsigned int sg_len = data->sg_len;
size_t copied;
u16 send_blksz;
u16 send_blocks;
int ret;
WARN_ON(len > host->data_max);
operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
len + sizeof(*request),
sizeof(*response), GFP_KERNEL);
if (!operation)
return -ENOMEM;
request = operation->request->payload;
request->data_flags = (data->flags >> 8);
request->data_blocks = cpu_to_le16(nblocks);
request->data_blksz = cpu_to_le16(data->blksz);
copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
if (copied != len) {
ret = -EINVAL;
goto err_put_operation;
}
ret = gb_operation_request_send_sync(operation);
if (ret < 0)
goto err_put_operation;
response = operation->response->payload;
send_blocks = le16_to_cpu(response->data_blocks);
send_blksz = le16_to_cpu(response->data_blksz);
if (len != send_blksz * send_blocks) {
dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
len, send_blksz * send_blocks);
ret = -EINVAL;
}
err_put_operation:
gb_operation_put(operation);
return ret;
}
static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
size_t len, u16 nblocks, off_t skip)
{
struct gb_sdio_transfer_request *request;
struct gb_sdio_transfer_response *response;
struct gb_operation *operation;
struct scatterlist *sg = data->sg;
unsigned int sg_len = data->sg_len;
size_t copied;
u16 recv_blksz;
u16 recv_blocks;
int ret;
WARN_ON(len > host->data_max);
operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
sizeof(*request),
len + sizeof(*response), GFP_KERNEL);
if (!operation)
return -ENOMEM;
request = operation->request->payload;
request->data_flags = (data->flags >> 8);
request->data_blocks = cpu_to_le16(nblocks);
request->data_blksz = cpu_to_le16(data->blksz);
ret = gb_operation_request_send_sync(operation);
if (ret < 0)
goto err_put_operation;
response = operation->response->payload;
recv_blocks = le16_to_cpu(response->data_blocks);
recv_blksz = le16_to_cpu(response->data_blksz);
if (len != recv_blksz * recv_blocks) {
dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
recv_blksz * recv_blocks, len);
ret = -EINVAL;
goto err_put_operation;
}
copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
skip);
if (copied != len)
ret = -EINVAL;
err_put_operation:
gb_operation_put(operation);
return ret;
}
static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
{
size_t left, len;
off_t skip = 0;
int ret = 0;
u16 nblocks;
if (single_op(data->mrq->cmd) && data->blocks > 1) {
ret = -ETIMEDOUT;
goto out;
}
left = data->blksz * data->blocks;
while (left) {
/* check is a stop transmission is pending */
spin_lock(&host->xfer);
if (host->xfer_stop) {
host->xfer_stop = false;
spin_unlock(&host->xfer);
ret = -EINTR;
goto out;
}
spin_unlock(&host->xfer);
len = min(left, host->data_max);
nblocks = len / data->blksz;
len = nblocks * data->blksz;
if (data->flags & MMC_DATA_READ) {
ret = _gb_sdio_recv(host, data, len, nblocks, skip);
if (ret < 0)
goto out;
} else {
ret = _gb_sdio_send(host, data, len, nblocks, skip);
if (ret < 0)
goto out;
}
data->bytes_xfered += len;
left -= len;
skip += len;
}
out:
data->error = ret;
return ret;
}
static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
{
struct gb_sdio_command_request request = {0};
struct gb_sdio_command_response response;
struct mmc_data *data = host->mrq->data;
u8 cmd_flags;
u8 cmd_type;
int i;
int ret;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
cmd_flags = GB_SDIO_RSP_NONE;
break;
case MMC_RSP_R1:
cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
break;
case MMC_RSP_R1B:
cmd_flags = GB_SDIO_RSP_R1B;
break;
case MMC_RSP_R2:
cmd_flags = GB_SDIO_RSP_R2;
break;
case MMC_RSP_R3:
cmd_flags = GB_SDIO_RSP_R3_R4;
break;
default:
dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
mmc_resp_type(cmd));
ret = -EINVAL;
goto out;
}
switch (mmc_cmd_type(cmd)) {
case MMC_CMD_BC:
cmd_type = GB_SDIO_CMD_BC;
break;
case MMC_CMD_BCR:
cmd_type = GB_SDIO_CMD_BCR;
break;
case MMC_CMD_AC:
cmd_type = GB_SDIO_CMD_AC;
break;
case MMC_CMD_ADTC:
cmd_type = GB_SDIO_CMD_ADTC;
break;
default:
dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
mmc_cmd_type(cmd));
ret = -EINVAL;
goto out;
}
request.cmd = cmd->opcode;
request.cmd_flags = cmd_flags;
request.cmd_type = cmd_type;
request.cmd_arg = cpu_to_le32(cmd->arg);
/* some controllers need to know at command time data details */
if (data) {
request.data_blocks = cpu_to_le16(data->blocks);
request.data_blksz = cpu_to_le16(data->blksz);
}
ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
&request, sizeof(request), &response,
sizeof(response));
if (ret < 0)
goto out;
/* no response expected */
if (cmd_flags & GB_SDIO_RSP_NONE)
goto out;
/* long response expected */
if (cmd_flags & GB_SDIO_RSP_R2)
for (i = 0; i < 4; i++)
cmd->resp[i] = le32_to_cpu(response.resp[i]);
else
cmd->resp[0] = le32_to_cpu(response.resp[0]);
out:
cmd->error = ret;
return ret;
}
static void gb_sdio_mrq_work(struct work_struct *work)
{
struct gb_sdio_host *host;
struct mmc_request *mrq;
int ret;
host = container_of(work, struct gb_sdio_host, mrqwork);
ret = gbphy_runtime_get_sync(host->gbphy_dev);
if (ret)
return;
mutex_lock(&host->lock);
mrq = host->mrq;
if (!mrq) {
mutex_unlock(&host->lock);
gbphy_runtime_put_autosuspend(host->gbphy_dev);
dev_err(mmc_dev(host->mmc), "mmc request is NULL");
return;
}
if (host->removed) {
mrq->cmd->error = -ESHUTDOWN;
goto done;
}
if (mrq->sbc) {
ret = gb_sdio_command(host, mrq->sbc);
if (ret < 0)
goto done;
}
ret = gb_sdio_command(host, mrq->cmd);
if (ret < 0)
goto done;
if (mrq->data) {
ret = gb_sdio_transfer(host, mrq->data);
if (ret < 0)
goto done;
}
if (mrq->stop) {
ret = gb_sdio_command(host, mrq->stop);
if (ret < 0)
goto done;
}
done:
host->mrq = NULL;
mutex_unlock(&host->lock);
mmc_request_done(host->mmc, mrq);
gbphy_runtime_put_autosuspend(host->gbphy_dev);
}
static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct gb_sdio_host *host = mmc_priv(mmc);
struct mmc_command *cmd = mrq->cmd;
/* Check if it is a cancel to ongoing transfer */
if (cmd->opcode == MMC_STOP_TRANSMISSION) {
spin_lock(&host->xfer);
host->xfer_stop = true;
spin_unlock(&host->xfer);
}
mutex_lock(&host->lock);
WARN_ON(host->mrq);
host->mrq = mrq;
if (host->removed) {
mrq->cmd->error = -ESHUTDOWN;
goto out;
}
if (!host->card_present) {
mrq->cmd->error = -ENOMEDIUM;
goto out;
}
queue_work(host->mrq_workqueue, &host->mrqwork);
mutex_unlock(&host->lock);
return;
out:
host->mrq = NULL;
mutex_unlock(&host->lock);
mmc_request_done(mmc, mrq);
}
static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct gb_sdio_host *host = mmc_priv(mmc);
struct gb_sdio_set_ios_request request;
int ret;
u8 power_mode;
u8 bus_width;
u8 timing;
u8 signal_voltage;
u8 drv_type;
u32 vdd = 0;
mutex_lock(&host->lock);
request.clock = cpu_to_le32(ios->clock);
if (ios->vdd)
vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
request.vdd = cpu_to_le32(vdd);
request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
GB_SDIO_BUSMODE_OPENDRAIN :
GB_SDIO_BUSMODE_PUSHPULL);
switch (ios->power_mode) {
case MMC_POWER_OFF:
default:
power_mode = GB_SDIO_POWER_OFF;
break;
case MMC_POWER_UP:
power_mode = GB_SDIO_POWER_UP;
break;
case MMC_POWER_ON:
power_mode = GB_SDIO_POWER_ON;
break;
case MMC_POWER_UNDEFINED:
power_mode = GB_SDIO_POWER_UNDEFINED;
break;
}
request.power_mode = power_mode;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
bus_width = GB_SDIO_BUS_WIDTH_1;
break;
case MMC_BUS_WIDTH_4:
default:
bus_width = GB_SDIO_BUS_WIDTH_4;
break;
case MMC_BUS_WIDTH_8:
bus_width = GB_SDIO_BUS_WIDTH_8;
break;
}
request.bus_width = bus_width;
switch (ios->timing) {
case MMC_TIMING_LEGACY:
default:
timing = GB_SDIO_TIMING_LEGACY;
break;
case MMC_TIMING_MMC_HS:
timing = GB_SDIO_TIMING_MMC_HS;
break;
case MMC_TIMING_SD_HS:
timing = GB_SDIO_TIMING_SD_HS;
break;
case MMC_TIMING_UHS_SDR12:
timing = GB_SDIO_TIMING_UHS_SDR12;
break;
case MMC_TIMING_UHS_SDR25:
timing = GB_SDIO_TIMING_UHS_SDR25;
break;
case MMC_TIMING_UHS_SDR50:
timing = GB_SDIO_TIMING_UHS_SDR50;
break;
case MMC_TIMING_UHS_SDR104:
timing = GB_SDIO_TIMING_UHS_SDR104;
break;
case MMC_TIMING_UHS_DDR50:
timing = GB_SDIO_TIMING_UHS_DDR50;
break;
case MMC_TIMING_MMC_DDR52:
timing = GB_SDIO_TIMING_MMC_DDR52;
break;
case MMC_TIMING_MMC_HS200:
timing = GB_SDIO_TIMING_MMC_HS200;
break;
case MMC_TIMING_MMC_HS400:
timing = GB_SDIO_TIMING_MMC_HS400;
break;
}
request.timing = timing;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
break;
case MMC_SIGNAL_VOLTAGE_180:
default:
signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
break;
case MMC_SIGNAL_VOLTAGE_120:
signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
break;
}
request.signal_voltage = signal_voltage;
switch (ios->drv_type) {
case MMC_SET_DRIVER_TYPE_A:
drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
break;
case MMC_SET_DRIVER_TYPE_C:
drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
break;
case MMC_SET_DRIVER_TYPE_D:
drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
break;
case MMC_SET_DRIVER_TYPE_B:
default:
drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
break;
}
request.drv_type = drv_type;
ret = gb_sdio_set_ios(host, &request);
if (ret < 0)
goto out;
memcpy(&mmc->ios, ios, sizeof(mmc->ios));
out:
mutex_unlock(&host->lock);
}
static int gb_mmc_get_ro(struct mmc_host *mmc)
{
struct gb_sdio_host *host = mmc_priv(mmc);
mutex_lock(&host->lock);
if (host->removed) {
mutex_unlock(&host->lock);
return -ESHUTDOWN;
}
mutex_unlock(&host->lock);
return host->read_only;
}
static int gb_mmc_get_cd(struct mmc_host *mmc)
{
struct gb_sdio_host *host = mmc_priv(mmc);
mutex_lock(&host->lock);
if (host->removed) {
mutex_unlock(&host->lock);
return -ESHUTDOWN;
}
mutex_unlock(&host->lock);
return host->card_present;
}
static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
return 0;
}
static const struct mmc_host_ops gb_sdio_ops = {
.request = gb_mmc_request,
.set_ios = gb_mmc_set_ios,
.get_ro = gb_mmc_get_ro,
.get_cd = gb_mmc_get_cd,
.start_signal_voltage_switch = gb_mmc_switch_voltage,
};
static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct mmc_host *mmc;
struct gb_sdio_host *host;
int ret = 0;
mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
if (!mmc)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
gb_sdio_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto exit_mmc_free;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->removed = true;
host->connection = connection;
gb_connection_set_data(connection, host);
host->gbphy_dev = gbphy_dev;
gb_gbphy_set_data(gbphy_dev, host);
ret = gb_connection_enable_tx(connection);
if (ret)
goto exit_connection_destroy;
ret = gb_sdio_get_caps(host);
if (ret < 0)
goto exit_connection_disable;
mmc->ops = &gb_sdio_ops;
mmc->max_segs = host->mmc->max_blk_count;
/* for now we make a map 1:1 between max request and segment size */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
mutex_init(&host->lock);
spin_lock_init(&host->xfer);
host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
dev_name(&gbphy_dev->dev));
if (!host->mrq_workqueue) {
ret = -ENOMEM;
goto exit_connection_disable;
}
INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
ret = gb_connection_enable(connection);
if (ret)
goto exit_wq_destroy;
ret = mmc_add_host(mmc);
if (ret < 0)
goto exit_wq_destroy;
host->removed = false;
ret = _gb_sdio_process_events(host, host->queued_events);
host->queued_events = 0;
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
exit_wq_destroy:
destroy_workqueue(host->mrq_workqueue);
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_mmc_free:
mmc_free_host(mmc);
return ret;
}
static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
{
struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = host->connection;
struct mmc_host *mmc;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
mutex_lock(&host->lock);
host->removed = true;
mmc = host->mmc;
gb_connection_set_data(connection, NULL);
mutex_unlock(&host->lock);
flush_workqueue(host->mrq_workqueue);
destroy_workqueue(host->mrq_workqueue);
gb_connection_disable_rx(connection);
mmc_remove_host(mmc);
gb_connection_disable(connection);
gb_connection_destroy(connection);
mmc_free_host(mmc);
}
static const struct gbphy_device_id gb_sdio_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
static struct gbphy_driver sdio_driver = {
.name = "sdio",
.probe = gb_sdio_probe,
.remove = gb_sdio_remove,
.id_table = gb_sdio_id_table,
};
module_gbphy_driver(sdio_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,79 @@
/*
* SPI bridge PHY driver.
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/module.h>
#include "greybus.h"
#include "gbphy.h"
#include "spilib.h"
static struct spilib_ops *spilib_ops;
static int gb_spi_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
int ret;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
if (IS_ERR(connection))
return PTR_ERR(connection);
ret = gb_connection_enable(connection);
if (ret)
goto exit_connection_destroy;
ret = gb_spilib_master_init(connection, &gbphy_dev->dev, spilib_ops);
if (ret)
goto exit_connection_disable;
gb_gbphy_set_data(gbphy_dev, connection);
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
return ret;
}
static void gb_spi_remove(struct gbphy_device *gbphy_dev)
{
struct gb_connection *connection = gb_gbphy_get_data(gbphy_dev);
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
gb_spilib_master_exit(connection);
gb_connection_disable(connection);
gb_connection_destroy(connection);
}
static const struct gbphy_device_id gb_spi_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SPI) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_spi_id_table);
static struct gbphy_driver spi_driver = {
.name = "spi",
.probe = gb_spi_probe,
.remove = gb_spi_remove,
.id_table = gb_spi_id_table,
};
module_gbphy_driver(spi_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,565 @@
/*
* Greybus SPI library
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include "greybus.h"
#include "spilib.h"
struct gb_spilib {
struct gb_connection *connection;
struct device *parent;
struct spi_transfer *first_xfer;
struct spi_transfer *last_xfer;
struct spilib_ops *ops;
u32 rx_xfer_offset;
u32 tx_xfer_offset;
u32 last_xfer_size;
unsigned int op_timeout;
u16 mode;
u16 flags;
u32 bits_per_word_mask;
u8 num_chipselect;
u32 min_speed_hz;
u32 max_speed_hz;
};
#define GB_SPI_STATE_MSG_DONE ((void *)0)
#define GB_SPI_STATE_MSG_IDLE ((void *)1)
#define GB_SPI_STATE_MSG_RUNNING ((void *)2)
#define GB_SPI_STATE_OP_READY ((void *)3)
#define GB_SPI_STATE_OP_DONE ((void *)4)
#define GB_SPI_STATE_MSG_ERROR ((void *)-1)
#define XFER_TIMEOUT_TOLERANCE 200
static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
{
return gb_connection_get_data(spi->connection);
}
static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
{
size_t headers_size;
data_max -= sizeof(struct gb_spi_transfer_request);
headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
return tx_size + headers_size > data_max ? 0 : 1;
}
static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
size_t data_max)
{
size_t rx_xfer_size;
data_max -= sizeof(struct gb_spi_transfer_response);
if (rx_size + len > data_max)
rx_xfer_size = data_max - rx_size;
else
rx_xfer_size = len;
/* if this is a write_read, for symmetry read the same as write */
if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
rx_xfer_size = *tx_xfer_size;
if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
*tx_xfer_size = rx_xfer_size;
return rx_xfer_size;
}
static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
size_t data_max)
{
size_t headers_size;
data_max -= sizeof(struct gb_spi_transfer_request);
headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
if (tx_size + headers_size + len > data_max)
return data_max - (tx_size + sizeof(struct gb_spi_transfer));
return len;
}
static void clean_xfer_state(struct gb_spilib *spi)
{
spi->first_xfer = NULL;
spi->last_xfer = NULL;
spi->rx_xfer_offset = 0;
spi->tx_xfer_offset = 0;
spi->last_xfer_size = 0;
spi->op_timeout = 0;
}
static bool is_last_xfer_done(struct gb_spilib *spi)
{
struct spi_transfer *last_xfer = spi->last_xfer;
if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
(spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
return true;
return false;
}
static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
{
struct spi_transfer *last_xfer = spi->last_xfer;
if (msg->state != GB_SPI_STATE_OP_DONE)
return 0;
/*
* if we transferred all content of the last transfer, reset values and
* check if this was the last transfer in the message
*/
if (is_last_xfer_done(spi)) {
spi->tx_xfer_offset = 0;
spi->rx_xfer_offset = 0;
spi->op_timeout = 0;
if (last_xfer == list_last_entry(&msg->transfers,
struct spi_transfer,
transfer_list))
msg->state = GB_SPI_STATE_MSG_DONE;
else
spi->first_xfer = list_next_entry(last_xfer,
transfer_list);
return 0;
}
spi->first_xfer = last_xfer;
if (last_xfer->tx_buf)
spi->tx_xfer_offset += spi->last_xfer_size;
if (last_xfer->rx_buf)
spi->rx_xfer_offset += spi->last_xfer_size;
return 0;
}
static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
struct spi_message *msg)
{
if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list))
return NULL;
return list_next_entry(xfer, transfer_list);
}
/* Routines to transfer data */
static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
struct gb_connection *connection, struct spi_message *msg)
{
struct gb_spi_transfer_request *request;
struct spi_device *dev = msg->spi;
struct spi_transfer *xfer;
struct gb_spi_transfer *gb_xfer;
struct gb_operation *operation;
u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
u32 total_len = 0;
unsigned int xfer_timeout;
size_t data_max;
void *tx_data;
data_max = gb_operation_get_payload_size_max(connection);
xfer = spi->first_xfer;
/* Find number of transfers queued and tx/rx length in the message */
while (msg->state != GB_SPI_STATE_OP_READY) {
msg->state = GB_SPI_STATE_MSG_RUNNING;
spi->last_xfer = xfer;
if (!xfer->tx_buf && !xfer->rx_buf) {
dev_err(spi->parent,
"bufferless transfer, length %u\n", xfer->len);
msg->state = GB_SPI_STATE_MSG_ERROR;
return NULL;
}
tx_xfer_size = 0;
rx_xfer_size = 0;
if (xfer->tx_buf) {
len = xfer->len - spi->tx_xfer_offset;
if (!tx_header_fit_operation(tx_size, count, data_max))
break;
tx_xfer_size = calc_tx_xfer_size(tx_size, count,
len, data_max);
spi->last_xfer_size = tx_xfer_size;
}
if (xfer->rx_buf) {
len = xfer->len - spi->rx_xfer_offset;
rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
len, data_max);
spi->last_xfer_size = rx_xfer_size;
}
tx_size += tx_xfer_size;
rx_size += rx_xfer_size;
total_len += spi->last_xfer_size;
count++;
xfer = get_next_xfer(xfer, msg);
if (!xfer || total_len >= data_max)
msg->state = GB_SPI_STATE_OP_READY;
}
/*
* In addition to space for all message descriptors we need
* to have enough to hold all tx data.
*/
request_size = sizeof(*request);
request_size += count * sizeof(*gb_xfer);
request_size += tx_size;
/* Response consists only of incoming data */
operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
request_size, rx_size, GFP_KERNEL);
if (!operation)
return NULL;
request = operation->request->payload;
request->count = cpu_to_le16(count);
request->mode = dev->mode;
request->chip_select = dev->chip_select;
gb_xfer = &request->transfers[0];
tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
/* Fill in the transfers array */
xfer = spi->first_xfer;
while (msg->state != GB_SPI_STATE_OP_DONE) {
if (xfer == spi->last_xfer)
xfer_len = spi->last_xfer_size;
else
xfer_len = xfer->len;
/* make sure we do not timeout in a slow transfer */
xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
if (xfer_timeout > spi->op_timeout)
spi->op_timeout = xfer_timeout;
gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
gb_xfer->len = cpu_to_le32(xfer_len);
gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
gb_xfer->cs_change = xfer->cs_change;
gb_xfer->bits_per_word = xfer->bits_per_word;
/* Copy tx data */
if (xfer->tx_buf) {
gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
xfer_len);
tx_data += xfer_len;
}
if (xfer->rx_buf)
gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
if (xfer == spi->last_xfer) {
if (!is_last_xfer_done(spi))
gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
msg->state = GB_SPI_STATE_OP_DONE;
continue;
}
gb_xfer++;
xfer = get_next_xfer(xfer, msg);
}
msg->actual_length += total_len;
return operation;
}
static void gb_spi_decode_response(struct gb_spilib *spi,
struct spi_message *msg,
struct gb_spi_transfer_response *response)
{
struct spi_transfer *xfer = spi->first_xfer;
void *rx_data = response->data;
u32 xfer_len;
while (xfer) {
/* Copy rx data */
if (xfer->rx_buf) {
if (xfer == spi->first_xfer)
xfer_len = xfer->len - spi->rx_xfer_offset;
else if (xfer == spi->last_xfer)
xfer_len = spi->last_xfer_size;
else
xfer_len = xfer->len;
memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
xfer_len);
rx_data += xfer_len;
}
if (xfer == spi->last_xfer)
break;
xfer = list_next_entry(xfer, transfer_list);
}
}
static int gb_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct gb_spilib *spi = spi_master_get_devdata(master);
struct gb_connection *connection = spi->connection;
struct gb_spi_transfer_response *response;
struct gb_operation *operation;
int ret = 0;
spi->first_xfer = list_first_entry_or_null(&msg->transfers,
struct spi_transfer,
transfer_list);
if (!spi->first_xfer) {
ret = -ENOMEM;
goto out;
}
msg->state = GB_SPI_STATE_MSG_IDLE;
while (msg->state != GB_SPI_STATE_MSG_DONE &&
msg->state != GB_SPI_STATE_MSG_ERROR) {
operation = gb_spi_operation_create(spi, connection, msg);
if (!operation) {
msg->state = GB_SPI_STATE_MSG_ERROR;
ret = -EINVAL;
continue;
}
ret = gb_operation_request_send_sync_timeout(operation,
spi->op_timeout);
if (!ret) {
response = operation->response->payload;
if (response)
gb_spi_decode_response(spi, msg, response);
} else {
dev_err(spi->parent,
"transfer operation failed: %d\n", ret);
msg->state = GB_SPI_STATE_MSG_ERROR;
}
gb_operation_put(operation);
setup_next_xfer(spi, msg);
}
out:
msg->status = ret;
clean_xfer_state(spi);
spi_finalize_current_message(master);
return ret;
}
static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
{
struct gb_spilib *spi = spi_master_get_devdata(master);
return spi->ops->prepare_transfer_hardware(spi->parent);
}
static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
{
struct gb_spilib *spi = spi_master_get_devdata(master);
spi->ops->unprepare_transfer_hardware(spi->parent);
return 0;
}
static int gb_spi_setup(struct spi_device *spi)
{
/* Nothing to do for now */
return 0;
}
static void gb_spi_cleanup(struct spi_device *spi)
{
/* Nothing to do for now */
}
/* Routines to get controller information */
/*
* Map Greybus spi mode bits/flags/bpw into Linux ones.
* All bits are same for now and so these macro's return same values.
*/
#define gb_spi_mode_map(mode) mode
#define gb_spi_flags_map(flags) flags
static int gb_spi_get_master_config(struct gb_spilib *spi)
{
struct gb_spi_master_config_response response;
u16 mode, flags;
int ret;
ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
NULL, 0, &response, sizeof(response));
if (ret < 0)
return ret;
mode = le16_to_cpu(response.mode);
spi->mode = gb_spi_mode_map(mode);
flags = le16_to_cpu(response.flags);
spi->flags = gb_spi_flags_map(flags);
spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
spi->num_chipselect = response.num_chipselect;
spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
return 0;
}
static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
{
struct spi_master *master = get_master_from_spi(spi);
struct gb_spi_device_config_request request;
struct gb_spi_device_config_response response;
struct spi_board_info spi_board = { {0} };
struct spi_device *spidev;
int ret;
u8 dev_type;
request.chip_select = cs;
ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
&request, sizeof(request),
&response, sizeof(response));
if (ret < 0)
return ret;
dev_type = response.device_type;
if (dev_type == GB_SPI_SPI_DEV)
strlcpy(spi_board.modalias, "spidev",
sizeof(spi_board.modalias));
else if (dev_type == GB_SPI_SPI_NOR)
strlcpy(spi_board.modalias, "spi-nor",
sizeof(spi_board.modalias));
else if (dev_type == GB_SPI_SPI_MODALIAS)
memcpy(spi_board.modalias, response.name,
sizeof(spi_board.modalias));
else
return -EINVAL;
spi_board.mode = le16_to_cpu(response.mode);
spi_board.bus_num = master->bus_num;
spi_board.chip_select = cs;
spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
spidev = spi_new_device(master, &spi_board);
if (!spidev)
return -EINVAL;
return 0;
}
int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
struct spilib_ops *ops)
{
struct gb_spilib *spi;
struct spi_master *master;
int ret;
u8 i;
/* Allocate master with space for data */
master = spi_alloc_master(dev, sizeof(*spi));
if (!master) {
dev_err(dev, "cannot alloc SPI master\n");
return -ENOMEM;
}
spi = spi_master_get_devdata(master);
spi->connection = connection;
gb_connection_set_data(connection, master);
spi->parent = dev;
spi->ops = ops;
/* get master configuration */
ret = gb_spi_get_master_config(spi);
if (ret)
goto exit_spi_put;
master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
master->num_chipselect = spi->num_chipselect;
master->mode_bits = spi->mode;
master->flags = spi->flags;
master->bits_per_word_mask = spi->bits_per_word_mask;
/* Attach methods */
master->cleanup = gb_spi_cleanup;
master->setup = gb_spi_setup;
master->transfer_one_message = gb_spi_transfer_one_message;
if (ops && ops->prepare_transfer_hardware) {
master->prepare_transfer_hardware =
gb_spi_prepare_transfer_hardware;
}
if (ops && ops->unprepare_transfer_hardware) {
master->unprepare_transfer_hardware =
gb_spi_unprepare_transfer_hardware;
}
master->auto_runtime_pm = true;
ret = spi_register_master(master);
if (ret < 0)
goto exit_spi_put;
/* now, fetch the devices configuration */
for (i = 0; i < spi->num_chipselect; i++) {
ret = gb_spi_setup_device(spi, i);
if (ret < 0) {
dev_err(dev, "failed to allocate spi device %d: %d\n",
i, ret);
goto exit_spi_unregister;
}
}
return 0;
exit_spi_unregister:
spi_unregister_master(master);
exit_spi_put:
spi_master_put(master);
return ret;
}
EXPORT_SYMBOL_GPL(gb_spilib_master_init);
void gb_spilib_master_exit(struct gb_connection *connection)
{
struct spi_master *master = gb_connection_get_data(connection);
spi_unregister_master(master);
spi_master_put(master);
}
EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,24 @@
/*
* Greybus SPI library header
*
* copyright 2016 google inc.
* copyright 2016 linaro ltd.
*
* released under the gplv2 only.
*/
#ifndef __SPILIB_H
#define __SPILIB_H
struct device;
struct gb_connection;
struct spilib_ops {
int (*prepare_transfer_hardware)(struct device *dev);
void (*unprepare_transfer_hardware)(struct device *dev);
};
int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, struct spilib_ops *ops);
void gb_spilib_master_exit(struct gb_connection *connection);
#endif /* __SPILIB_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
/*
* Greybus SVC code
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __SVC_H
#define __SVC_H
#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
enum gb_svc_state {
GB_SVC_STATE_RESET,
GB_SVC_STATE_PROTOCOL_VERSION,
GB_SVC_STATE_SVC_HELLO,
};
enum gb_svc_watchdog_bite {
GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
};
struct gb_svc_watchdog;
struct svc_debugfs_pwrmon_rail {
u8 id;
struct gb_svc *svc;
};
struct gb_svc {
struct device dev;
struct gb_host_device *hd;
struct gb_connection *connection;
enum gb_svc_state state;
struct ida device_id_map;
struct workqueue_struct *wq;
u16 endo_id;
u8 ap_intf_id;
u8 protocol_major;
u8 protocol_minor;
struct gb_svc_watchdog *watchdog;
enum gb_svc_watchdog_bite action;
struct dentry *debugfs_dentry;
struct svc_debugfs_pwrmon_rail *pwrmon_rails;
};
#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
struct gb_svc *gb_svc_create(struct gb_host_device *hd);
int gb_svc_add(struct gb_svc *svc);
void gb_svc_del(struct gb_svc *svc);
void gb_svc_put(struct gb_svc *svc);
int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
u8 measurement_type, u32 *value);
int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
u8 intf2_id, u8 dev2_id);
void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id, u8 cport_flags);
void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id);
int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 *value);
int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 value);
int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
u8 tx_amplitude, u8 tx_hs_equalizer,
u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
u8 flags, u32 quirks,
struct gb_svc_l2_timer_cfg *local,
struct gb_svc_l2_timer_cfg *remote);
int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
int gb_svc_ping(struct gb_svc *svc);
int gb_svc_watchdog_create(struct gb_svc *svc);
void gb_svc_watchdog_destroy(struct gb_svc *svc);
bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
u32 strobe_delay, u32 refclk);
int gb_svc_timesync_disable(struct gb_svc *svc);
int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time);
int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time);
int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask);
int gb_svc_timesync_wake_pins_release(struct gb_svc *svc);
int gb_svc_protocol_init(void);
void gb_svc_protocol_exit(void);
#endif /* __SVC_H */

View File

@ -0,0 +1,198 @@
/*
* SVC Greybus "watchdog" driver.
*
* Copyright 2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
#include "greybus.h"
#define SVC_WATCHDOG_PERIOD (2*HZ)
struct gb_svc_watchdog {
struct delayed_work work;
struct gb_svc *svc;
bool enabled;
struct notifier_block pm_notifier;
};
static struct delayed_work reset_work;
static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
struct gb_svc_watchdog *watchdog =
container_of(notifier, struct gb_svc_watchdog, pm_notifier);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
gb_svc_watchdog_disable(watchdog->svc);
break;
case PM_POST_SUSPEND:
gb_svc_watchdog_enable(watchdog->svc);
break;
default:
break;
}
return NOTIFY_DONE;
}
static void greybus_reset(struct work_struct *work)
{
static char start_path[256] = "/system/bin/start";
static char *envp[] = {
"HOME=/",
"PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
NULL,
};
static char *argv[] = {
start_path,
"unipro_reset",
NULL,
};
printk(KERN_ERR "svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
argv[0], argv[1]);
call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
}
static void do_work(struct work_struct *work)
{
struct gb_svc_watchdog *watchdog;
struct gb_svc *svc;
int retval;
watchdog = container_of(work, struct gb_svc_watchdog, work.work);
svc = watchdog->svc;
dev_dbg(&svc->dev, "%s: ping.\n", __func__);
retval = gb_svc_ping(svc);
if (retval) {
/*
* Something went really wrong, let's warn userspace and then
* pull the plug and reset the whole greybus network.
* We need to do this outside of this workqueue as we will be
* tearing down the svc device itself. So queue up
* yet-another-callback to do that.
*/
dev_err(&svc->dev,
"SVC ping has returned %d, something is wrong!!!\n",
retval);
if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
panic("SVC is not responding\n");
} else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
INIT_DELAYED_WORK(&reset_work, greybus_reset);
schedule_delayed_work(&reset_work, HZ / 2);
/*
* Disable ourselves, we don't want to trip again unless
* userspace wants us to.
*/
watchdog->enabled = false;
}
}
/* resubmit our work to happen again, if we are still "alive" */
if (watchdog->enabled)
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
}
int gb_svc_watchdog_create(struct gb_svc *svc)
{
struct gb_svc_watchdog *watchdog;
int retval;
if (svc->watchdog)
return 0;
watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
if (!watchdog)
return -ENOMEM;
watchdog->enabled = false;
watchdog->svc = svc;
INIT_DELAYED_WORK(&watchdog->work, do_work);
svc->watchdog = watchdog;
watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
retval = register_pm_notifier(&watchdog->pm_notifier);
if (retval) {
dev_err(&svc->dev, "error registering pm notifier(%d)\n",
retval);
goto svc_watchdog_create_err;
}
retval = gb_svc_watchdog_enable(svc);
if (retval) {
dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
unregister_pm_notifier(&watchdog->pm_notifier);
goto svc_watchdog_create_err;
}
return retval;
svc_watchdog_create_err:
svc->watchdog = NULL;
kfree(watchdog);
return retval;
}
void gb_svc_watchdog_destroy(struct gb_svc *svc)
{
struct gb_svc_watchdog *watchdog = svc->watchdog;
if (!watchdog)
return;
unregister_pm_notifier(&watchdog->pm_notifier);
gb_svc_watchdog_disable(svc);
svc->watchdog = NULL;
kfree(watchdog);
}
bool gb_svc_watchdog_enabled(struct gb_svc *svc)
{
if (!svc || !svc->watchdog)
return false;
return svc->watchdog->enabled;
}
int gb_svc_watchdog_enable(struct gb_svc *svc)
{
struct gb_svc_watchdog *watchdog;
if (!svc->watchdog)
return -ENODEV;
watchdog = svc->watchdog;
if (watchdog->enabled)
return 0;
watchdog->enabled = true;
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
return 0;
}
int gb_svc_watchdog_disable(struct gb_svc *svc)
{
struct gb_svc_watchdog *watchdog;
if (!svc->watchdog)
return -ENODEV;
watchdog = svc->watchdog;
if (!watchdog->enabled)
return 0;
watchdog->enabled = false;
cancel_delayed_work_sync(&watchdog->work);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
/*
* TimeSync API driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __TIMESYNC_H
#define __TIMESYNC_H
struct gb_svc;
struct gb_interface;
struct gb_timesync_svc;
/* Platform */
u64 gb_timesync_platform_get_counter(void);
u32 gb_timesync_platform_get_clock_rate(void);
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata);
void gb_timesync_platform_unlock_bus(void);
int gb_timesync_platform_init(void);
void gb_timesync_platform_exit(void);
/* Core API */
int gb_timesync_interface_add(struct gb_interface *interface);
void gb_timesync_interface_remove(struct gb_interface *interface);
int gb_timesync_svc_add(struct gb_svc *svc);
void gb_timesync_svc_remove(struct gb_svc *svc);
u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface);
u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc);
int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
struct timespec *ts);
int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
u64 frame_time, struct timespec *ts);
int gb_timesync_schedule_synchronous(struct gb_interface *intf);
void gb_timesync_schedule_asynchronous(struct gb_interface *intf);
void gb_timesync_irq(struct gb_timesync_svc *timesync_svc);
int gb_timesync_init(void);
void gb_timesync_exit(void);
#endif /* __TIMESYNC_H */

View File

@ -0,0 +1,77 @@
/*
* TimeSync API driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*
* This code reads directly from an ARMv7 memory-mapped timer that lives in
* MMIO space. Since this counter lives inside of MMIO space its shared between
* cores and that means we don't have to worry about issues like TSC on x86
* where each time-stamp-counter (TSC) is local to a particular core.
*
* Register-level access code is based on
* drivers/clocksource/arm_arch_timer.c
*/
#include <linux/cpufreq.h>
#include <linux/of_platform.h>
#include "greybus.h"
#include "arche_platform.h"
static u32 gb_timesync_clock_frequency;
int (*arche_platform_change_state_cb)(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
EXPORT_SYMBOL_GPL(arche_platform_change_state_cb);
u64 gb_timesync_platform_get_counter(void)
{
return (u64)get_cycles();
}
u32 gb_timesync_platform_get_clock_rate(void)
{
if (unlikely(!gb_timesync_clock_frequency))
return cpufreq_get(0);
return gb_timesync_clock_frequency;
}
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
{
return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
pdata);
}
void gb_timesync_platform_unlock_bus(void)
{
arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
}
static const struct of_device_id arch_timer_of_match[] = {
{ .compatible = "google,greybus-frame-time-counter", },
{},
};
int __init gb_timesync_platform_init(void)
{
struct device_node *np;
np = of_find_matching_node(NULL, arch_timer_of_match);
if (!np) {
/* Tolerate not finding to allow BBB etc to continue */
pr_warn("Unable to find a compatible ARMv7 timer\n");
return 0;
}
if (of_property_read_u32(np, "clock-frequency",
&gb_timesync_clock_frequency)) {
pr_err("Unable to find timer clock-frequency\n");
return -ENODEV;
}
return 0;
}
void gb_timesync_platform_exit(void) {}

View File

@ -0,0 +1 @@
loopback_test

View File

@ -0,0 +1,10 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= loopback_test.c
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := gb_loopback_test
include $(BUILD_EXECUTABLE)

View File

@ -0,0 +1,31 @@
ifeq ($(strip $(V)), 1)
Q =
else
Q = @
endif
CFLAGS += -std=gnu99 -Wall -Wextra -g \
-D_GNU_SOURCE \
-Wno-unused-parameter \
-Wmaybe-uninitialized \
-Wredundant-decls \
-Wcast-align \
-Wsign-compare \
-Wno-missing-field-initializers
CC := $(CROSS_COMPILE)gcc
TOOLS = loopback_test
all: $(TOOLS)
%.o: %.c ../greybus_protocols.h
@echo ' TARGET_CC $@'
$(Q)$(CC) $(CFLAGS) -c $< -o $@
loopback_%: loopback_%.o
@echo ' TARGET_LD $@'
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@
clean::
rm -f *.o $(TOOLS)

View File

@ -0,0 +1,198 @@
1 - LOOPBACK DRIVER
The driver implements the main logic of the loopback test and provides
sysfs files to configure the test and retrieve the results.
A user could run a test without the need of the test application given
that he understands the sysfs interface of the loopback driver.
The loopback kernel driver needs to be loaded and at least one module
with the loopback feature enabled must be present for the sysfs files to be
created and for the loopback test application to be able to run.
To load the module:
# modprobe gb-loopback
When the module is probed, New files are available on the sysfs
directory of the detected loopback device.
(typically under "/sys/bus/graybus/devices").
Here is a short summary of the sysfs interface files that should be visible:
* Loopback Configuration Files:
async - Use asynchronous operations.
iteration_max - Number of tests iterations to perform.
size - payload size of the transfer.
timeout - The number of microseconds to give an individual
asynchronous request before timing out.
us_wait - Time to wait between 2 messages
type - By writing the test type to this file, the test starts.
Valid tests are:
0 stop the test
2 - ping
3 - transfer
4 - sink
* Loopback feedback files:
error - number of errors that have occurred.
iteration_count - Number of iterations performed.
requests_completed - Number of requests successfully completed.
requests_timedout - Number of requests that have timed out.
timeout_max - Max allowed timeout
timeout_min - Min allowed timeout.
* Loopback result files:
apbridge_unipro_latency_avg
apbridge_unipro_latency_max
apbridge_unipro_latency_min
gpbridge_firmware_latency_avg
gpbridge_firmware_latency_max
gpbridge_firmware_latency_min
requests_per_second_avg
requests_per_second_max
requests_per_second_min
latency_avg
latency_max
latency_min
throughput_avg
throughput_max
throughput_min
2 - LOOPBACK TEST APPLICATION
The loopback test application manages and formats the results provided by
the loopback kernel module. The purpose of this application
is to:
- Start and manage multiple loopback device tests concurrently.
- Calculate the aggregate results for multiple devices.
- Gather and format test results (csv or human readable).
The best way to get up to date usage information for the application is
usually to pass the "-h" parameter.
Here is the summary of the available options:
Mandatory arguments
-t must be one of the test names - sink, transfer or ping
-i iteration count - the number of iterations to run the test over
Optional arguments
-S sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/
-D debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/
-s size of data packet to send during test - defaults to zero
-m mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc
default is zero which means broadcast to all connections
-v verbose output
-d debug output
-r raw data output - when specified the full list of latency values are included in the output CSV
-p porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file
-a aggregate - show aggregation of all enabled devies
-l list found loopback devices and exit.
-x Async - Enable async transfers.
-o Timeout - Timeout in microseconds for async operations.
3 - REAL WORLD EXAMPLE USAGES
3.1 - Using the driver sysfs files to run a test on a single device:
* Run a 1000 transfers of a 100 byte packet. Each transfer is started only
after the previous one finished successfully:
echo 0 > /sys/bus/greybus/devices/1-2.17/type
echo 0 > /sys/bus/greybus/devices/1-2.17/async
echo 2000 > /sys/bus/greybus/devices/1-2.17/us_wait
echo 100 > /sys/bus/greybus/devices/1-2.17/size
echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
echo 0 > /sys/bus/greybus/devices/1-2.17/mask
echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
echo 3 > /sys/bus/greybus/devices/1-2.17/type
* Run a 1000 transfers of a 100 byte packet. Transfers are started without
waiting for the previous one to finish:
echo 0 > /sys/bus/greybus/devices/1-2.17/type
echo 3 > /sys/bus/greybus/devices/1-2.17/async
echo 0 > /sys/bus/greybus/devices/1-2.17/us_wait
echo 100 > /sys/bus/greybus/devices/1-2.17/size
echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
echo 0 > /sys/bus/greybus/devices/1-2.17/mask
echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
echo 3 > /sys/bus/greybus/devices/1-2.17/type
* Read the results from sysfs:
cat /sys/bus/greybus/devices/1-2.17/requests_per_second_min
cat /sys/bus/greybus/devices/1-2.17/requests_per_second_max
cat /sys/bus/greybus/devices/1-2.17/requests_per_second_avg
cat /sys/bus/greybus/devices/1-2.17/latency_min
cat /sys/bus/greybus/devices/1-2.17/latency_max
cat /sys/bus/greybus/devices/1-2.17/latency_avg
cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_min
cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_max
cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_avg
cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_min
cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_max
cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_avg
cat /sys/bus/greybus/devices/1-2.17/error
cat /sys/bus/greybus/devices/1-2.17/requests_completed
cat /sys/bus/greybus/devices/1-2.17/requests_timedout
3.2 - using the test application:
* Run a transfer test 10 iterations of size 100 bytes on all available devices
#/loopback_test -t transfer -i 10 -s 100
1970-1-1 0:10:7,transfer,1-4.17,100,10,0,443,509,471.700012,66,1963,2256,2124.600098,293,102776,118088,109318.898438,15312,1620,1998,1894.099976,378,56,57,56.799999,1
1970-1-1 0:10:7,transfer,1-5.17,100,10,0,399,542,463.399994,143,1845,2505,2175.800049,660,92568,125744,107393.296875,33176,1469,2305,1806.500000,836,56,57,56.799999,1
* Show the aggregate results of both devices. ("-a")
#/loopback_test -t transfer -i 10 -s 100 -a
1970-1-1 0:10:35,transfer,1-4.17,100,10,0,448,580,494.100006,132,1722,2230,2039.400024,508,103936,134560,114515.703125,30624,1513,1980,1806.900024,467,56,57,57.299999,1
1970-1-1 0:10:35,transfer,1-5.17,100,10,0,383,558,478.600006,175,1791,2606,2115.199951,815,88856,129456,110919.703125,40600,1457,2246,1773.599976,789,56,57,57.099998,1
1970-1-1 0:10:35,transfer,aggregate,100,10,0,383,580,486.000000,197,1722,2606,2077.000000,884,88856,134560,112717.000000,45704,1457,2246,1789.000000,789,56,57,57.000000,1
* Example usage of the mask option to select which devices will
run the test (1st, 2nd, or both devices):
# /loopback_test -t transfer -i 10 -s 100 -m 1
1970-1-1 0:11:56,transfer,1-4.17,100,10,0,514,558,544.900024,44,1791,1943,1836.599976,152,119248,129456,126301.296875,10208,1600,1001609,101613.601562,1000009,56,57,56.900002,1
# /loopback_test -t transfer -i 10 -s 100 -m 2
1970-1-1 0:12:0,transfer,1-5.17,100,10,0,468,554,539.000000,86,1804,2134,1859.500000,330,108576,128528,124932.500000,19952,1606,1626,1619.300049,20,56,57,57.400002,1
# /loopback_test -t transfer -i 10 -s 100 -m 3
1970-1-1 0:12:3,transfer,1-4.17,100,10,0,432,510,469.399994,78,1959,2313,2135.800049,354,100224,118320,108785.296875,18096,1610,2024,1893.500000,414,56,57,57.200001,1
1970-1-1 0:12:3,transfer,1-5.17,100,10,0,404,542,468.799988,138,1843,2472,2152.500000,629,93728,125744,108646.101562,32016,1504,2247,1853.099976,743,56,57,57.099998,1
* Show output in human readable format ("-p")
# /loopback_test -t transfer -i 10 -s 100 -m 3 -p
1970-1-1 0:12:37
test: transfer
path: 1-4.17
size: 100
iterations: 10
errors: 0
async: Disabled
requests per-sec: min=390, max=547, average=469.299988, jitter=157
ap-throughput B/s: min=90480 max=126904 average=108762.101562 jitter=36424
ap-latency usec: min=1826 max=2560 average=2146.000000 jitter=734
apbridge-latency usec: min=1620 max=1982 average=1882.099976 jitter=362
gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1
1970-1-1 0:12:37
test: transfer
path: 1-5.17
size: 100
iterations: 10
errors: 0
async: Disabled
requests per-sec: min=397, max=538, average=461.700012, jitter=141
ap-throughput B/s: min=92104 max=124816 average=106998.898438 jitter=32712
ap-latency usec: min=1856 max=2514 average=2185.699951 jitter=658
apbridge-latency usec: min=1460 max=2296 average=1828.599976 jitter=836
gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1

View File

@ -0,0 +1,168 @@
#!/usr/bin/env python
# Copyright (c) 2015 Google, Inc.
# Copyright (c) 2015 Linaro, Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import csv
import datetime
import sys
import time
dict = {'ping': '2', 'transfer': '3', 'sink': '4'}
verbose = 1
def abort():
sys.exit(1)
def usage():
print('Usage: looptest TEST SIZE ITERATIONS PATH\n\n'
' Run TEST for a number of ITERATIONS with operation data SIZE bytes\n'
' TEST may be \'ping\' \'transfer\' or \'sink\'\n'
' SIZE indicates the size of transfer <= greybus max payload bytes\n'
' ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n'
' Note if ITERATIONS is set to zero then this utility will\n'
' initiate an infinite (non terminating) test and exit\n'
' without logging any metrics data\n'
' PATH indicates the sysfs path for the loopback greybus entries e.g.\n'
' /sys/bus/greybus/devices/endo0:1:1:1:1/\n'
'Examples:\n'
' looptest transfer 128 10000\n'
' looptest ping 0 128\n'
' looptest sink 2030 32768\n'
.format(sys.argv[0]), file=sys.stderr)
abort()
def read_sysfs_int(path):
try:
f = open(path, "r");
val = f.read();
f.close()
return int(val)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
print("Invalid path %s" % path)
def write_sysfs_val(path, val):
try:
f = open(path, "r+")
f.write(val)
f.close()
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
print("Invalid path %s" % path)
def log_csv(test_name, size, iteration_max, sys_pfx):
# file name will test_name_size_iteration_max.csv
# every time the same test with the same parameters is run we will then
# append to the same CSV with datestamp - representing each test dataset
fname = test_name + '_' + size + '_' + str(iteration_max) + '.csv'
try:
# gather data set
date = str(datetime.datetime.now())
error = read_sysfs_int(sys_pfx + 'error')
request_min = read_sysfs_int(sys_pfx + 'requests_per_second_min')
request_max = read_sysfs_int(sys_pfx + 'requests_per_second_max')
request_avg = read_sysfs_int(sys_pfx + 'requests_per_second_avg')
latency_min = read_sysfs_int(sys_pfx + 'latency_min')
latency_max = read_sysfs_int(sys_pfx + 'latency_max')
latency_avg = read_sysfs_int(sys_pfx + 'latency_avg')
throughput_min = read_sysfs_int(sys_pfx + 'throughput_min')
throughput_max = read_sysfs_int(sys_pfx + 'throughput_max')
throughput_avg = read_sysfs_int(sys_pfx + 'throughput_avg')
# derive jitter
request_jitter = request_max - request_min
latency_jitter = latency_max - latency_min
throughput_jitter = throughput_max - throughput_min
# append data set to file
with open(fname, 'a') as csvf:
row = csv.writer(csvf, delimiter=",", quotechar="'",
quoting=csv.QUOTE_MINIMAL)
row.writerow([date, test_name, size, iteration_max, error,
request_min, request_max, request_avg, request_jitter,
latency_min, latency_max, latency_avg, latency_jitter,
throughput_min, throughput_max, throughput_avg, throughput_jitter])
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def loopback_run(test_name, size, iteration_max, sys_pfx):
test_id = dict[test_name]
try:
# Terminate any currently running test
write_sysfs_val(sys_pfx + 'type', '0')
# Set parameter for no wait between messages
write_sysfs_val(sys_pfx + 'ms_wait', '0')
# Set operation size
write_sysfs_val(sys_pfx + 'size', size)
# Set iterations
write_sysfs_val(sys_pfx + 'iteration_max', str(iteration_max))
# Initiate by setting loopback operation type
write_sysfs_val(sys_pfx + 'type', test_id)
time.sleep(1)
if iteration_max == 0:
print ("Infinite test initiated CSV won't be logged\n")
return
previous = 0
err = 0
while True:
# get current count bail out if it hasn't changed
iteration_count = read_sysfs_int(sys_pfx + 'iteration_count')
if previous == iteration_count:
err = 1
break
elif iteration_count == iteration_max:
break
previous = iteration_count
if verbose:
print('%02d%% complete %d of %d ' %
(100 * iteration_count / iteration_max,
iteration_count, iteration_max))
time.sleep(1)
if err:
print ('\nError executing test\n')
else:
log_csv(test_name, size, iteration_max, sys_pfx)
except ValueError as ve:
print("Error: %s " % format(e.strerror), file=sys.stderr)
abort()
def main():
if len(sys.argv) < 5:
usage()
if sys.argv[1] in dict.keys():
loopback_run(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4])
else:
usage()
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,247 @@
/*
* USB host driver for the Greybus "generic" USB module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "greybus.h"
#include "gbphy.h"
/* Greybus USB request types */
#define GB_USB_TYPE_HCD_START 0x02
#define GB_USB_TYPE_HCD_STOP 0x03
#define GB_USB_TYPE_HUB_CONTROL 0x04
struct gb_usb_hub_control_request {
__le16 typeReq;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
};
struct gb_usb_hub_control_response {
u8 buf[0];
};
struct gb_usb_device {
struct gb_connection *connection;
struct gbphy_device *gbphy_dev;
};
static inline struct gb_usb_device *to_gb_usb_device(struct usb_hcd *hcd)
{
return (struct gb_usb_device *)hcd->hcd_priv;
}
static inline struct usb_hcd *gb_usb_device_to_hcd(struct gb_usb_device *dev)
{
return container_of((void *)dev, struct usb_hcd, hcd_priv);
}
static void hcd_stop(struct usb_hcd *hcd)
{
struct gb_usb_device *dev = to_gb_usb_device(hcd);
int ret;
ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_STOP,
NULL, 0, NULL, 0);
if (ret)
dev_err(&dev->gbphy_dev->dev, "HCD stop failed '%d'\n", ret);
}
static int hcd_start(struct usb_hcd *hcd)
{
struct usb_bus *bus = hcd_to_bus(hcd);
struct gb_usb_device *dev = to_gb_usb_device(hcd);
int ret;
ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_START,
NULL, 0, NULL, 0);
if (ret) {
dev_err(&dev->gbphy_dev->dev, "HCD start failed '%d'\n", ret);
return ret;
}
hcd->state = HC_STATE_RUNNING;
if (bus->root_hub)
usb_hcd_resume_root_hub(hcd);
return 0;
}
static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
return -ENXIO;
}
static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
return -ENXIO;
}
static int get_frame_number(struct usb_hcd *hcd)
{
return 0;
}
static int hub_status_data(struct usb_hcd *hcd, char *buf)
{
return 0;
}
static int hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength)
{
struct gb_usb_device *dev = to_gb_usb_device(hcd);
struct gb_operation *operation;
struct gb_usb_hub_control_request *request;
struct gb_usb_hub_control_response *response;
size_t response_size;
int ret;
/* FIXME: handle unspecified lengths */
response_size = sizeof(*response) + wLength;
operation = gb_operation_create(dev->connection,
GB_USB_TYPE_HUB_CONTROL,
sizeof(*request),
response_size,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
request = operation->request->payload;
request->typeReq = cpu_to_le16(typeReq);
request->wValue = cpu_to_le16(wValue);
request->wIndex = cpu_to_le16(wIndex);
request->wLength = cpu_to_le16(wLength);
ret = gb_operation_request_send_sync(operation);
if (ret)
goto out;
if (wLength) {
/* Greybus core has verified response size */
response = operation->response->payload;
memcpy(buf, response->buf, wLength);
}
out:
gb_operation_put(operation);
return ret;
}
static struct hc_driver usb_gb_hc_driver = {
.description = "greybus-hcd",
.product_desc = "Greybus USB Host Controller",
.hcd_priv_size = sizeof(struct gb_usb_device),
.flags = HCD_USB2,
.start = hcd_start,
.stop = hcd_stop,
.urb_enqueue = urb_enqueue,
.urb_dequeue = urb_dequeue,
.get_frame_number = get_frame_number,
.hub_status_data = hub_status_data,
.hub_control = hub_control,
};
static int gb_usb_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct device *dev = &gbphy_dev->dev;
struct gb_usb_device *gb_usb_dev;
struct usb_hcd *hcd;
int retval;
hcd = usb_create_hcd(&usb_gb_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto exit_usb_put;
}
gb_usb_dev = to_gb_usb_device(hcd);
gb_usb_dev->connection = connection;
gb_connection_set_data(connection, gb_usb_dev);
gb_usb_dev->gbphy_dev = gbphy_dev;
gb_gbphy_set_data(gbphy_dev, gb_usb_dev);
hcd->has_tt = 1;
retval = gb_connection_enable(connection);
if (retval)
goto exit_connection_destroy;
/*
* FIXME: The USB bridged-PHY protocol driver depends on changes to
* USB core which are not yet upstream.
*
* Disable for now.
*/
if (1) {
dev_warn(dev, "USB protocol disabled\n");
retval = -EPROTONOSUPPORT;
goto exit_connection_disable;
}
retval = usb_add_hcd(hcd, 0, 0);
if (retval)
goto exit_connection_disable;
return 0;
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_usb_put:
usb_put_hcd(hcd);
return retval;
}
static void gb_usb_remove(struct gbphy_device *gbphy_dev)
{
struct gb_usb_device *gb_usb_dev = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = gb_usb_dev->connection;
struct usb_hcd *hcd = gb_usb_device_to_hcd(gb_usb_dev);
usb_remove_hcd(hcd);
gb_connection_disable(connection);
gb_connection_destroy(connection);
usb_put_hcd(hcd);
}
static const struct gbphy_device_id gb_usb_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_USB) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_usb_id_table);
static struct gbphy_driver usb_driver = {
.name = "usb",
.probe = gb_usb_probe,
.remove = gb_usb_remove,
.id_table = gb_usb_id_table,
};
module_gbphy_driver(usb_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,249 @@
/*
* Greybus Vibrator protocol driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/pm_runtime.h>
#include "greybus.h"
struct gb_vibrator_device {
struct gb_connection *connection;
struct device *dev;
int minor; /* vibrator minor number */
struct delayed_work delayed_work;
};
/* Greybus Vibrator operation types */
#define GB_VIBRATOR_TYPE_ON 0x02
#define GB_VIBRATOR_TYPE_OFF 0x03
static int turn_off(struct gb_vibrator_device *vib)
{
struct gb_bundle *bundle = vib->connection->bundle;
int ret;
ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_OFF,
NULL, 0, NULL, 0);
gb_pm_runtime_put_autosuspend(bundle);
return ret;
}
static int turn_on(struct gb_vibrator_device *vib, u16 timeout_ms)
{
struct gb_bundle *bundle = vib->connection->bundle;
int ret;
ret = gb_pm_runtime_get_sync(bundle);
if (ret)
return ret;
/* Vibrator was switched ON earlier */
if (cancel_delayed_work_sync(&vib->delayed_work))
turn_off(vib);
ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_ON,
NULL, 0, NULL, 0);
if (ret) {
gb_pm_runtime_put_autosuspend(bundle);
return ret;
}
schedule_delayed_work(&vib->delayed_work, msecs_to_jiffies(timeout_ms));
return 0;
}
static void gb_vibrator_worker(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct gb_vibrator_device *vib =
container_of(delayed_work, struct gb_vibrator_device, delayed_work);
turn_off(vib);
}
static ssize_t timeout_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct gb_vibrator_device *vib = dev_get_drvdata(dev);
unsigned long val;
int retval;
retval = kstrtoul(buf, 10, &val);
if (retval < 0) {
dev_err(dev, "could not parse timeout value %d\n", retval);
return retval;
}
if (val)
retval = turn_on(vib, (u16)val);
else
retval = turn_off(vib);
if (retval)
return retval;
return count;
}
static DEVICE_ATTR_WO(timeout);
static struct attribute *vibrator_attrs[] = {
&dev_attr_timeout.attr,
NULL,
};
ATTRIBUTE_GROUPS(vibrator);
static struct class vibrator_class = {
.name = "vibrator",
.owner = THIS_MODULE,
.dev_groups = vibrator_groups,
};
static DEFINE_IDA(minors);
static int gb_vibrator_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_vibrator_device *vib;
struct device *dev;
int retval;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR)
return -ENODEV;
vib = kzalloc(sizeof(*vib), GFP_KERNEL);
if (!vib)
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
NULL);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto err_free_vib;
}
gb_connection_set_data(connection, vib);
vib->connection = connection;
greybus_set_drvdata(bundle, vib);
retval = gb_connection_enable(connection);
if (retval)
goto err_connection_destroy;
/*
* For now we create a device in sysfs for the vibrator, but odds are
* there is a "real" device somewhere in the kernel for this, but I
* can't find it at the moment...
*/
vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
if (vib->minor < 0) {
retval = vib->minor;
goto err_connection_disable;
}
dev = device_create(&vibrator_class, &bundle->dev,
MKDEV(0, 0), vib, "vibrator%d", vib->minor);
if (IS_ERR(dev)) {
retval = -EINVAL;
goto err_ida_remove;
}
vib->dev = dev;
INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker);
gb_pm_runtime_put_autosuspend(bundle);
return 0;
err_ida_remove:
ida_simple_remove(&minors, vib->minor);
err_connection_disable:
gb_connection_disable(connection);
err_connection_destroy:
gb_connection_destroy(connection);
err_free_vib:
kfree(vib);
return retval;
}
static void gb_vibrator_disconnect(struct gb_bundle *bundle)
{
struct gb_vibrator_device *vib = greybus_get_drvdata(bundle);
int ret;
ret = gb_pm_runtime_get_sync(bundle);
if (ret)
gb_pm_runtime_get_noresume(bundle);
if (cancel_delayed_work_sync(&vib->delayed_work))
turn_off(vib);
device_unregister(vib->dev);
ida_simple_remove(&minors, vib->minor);
gb_connection_disable(vib->connection);
gb_connection_destroy(vib->connection);
kfree(vib);
}
static const struct greybus_bundle_id gb_vibrator_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_VIBRATOR) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_vibrator_id_table);
static struct greybus_driver gb_vibrator_driver = {
.name = "vibrator",
.probe = gb_vibrator_probe,
.disconnect = gb_vibrator_disconnect,
.id_table = gb_vibrator_id_table,
};
static __init int gb_vibrator_init(void)
{
int retval;
retval = class_register(&vibrator_class);
if (retval)
return retval;
retval = greybus_register(&gb_vibrator_driver);
if (retval)
goto err_class_unregister;
return 0;
err_class_unregister:
class_unregister(&vibrator_class);
return retval;
}
module_init(gb_vibrator_init);
static __exit void gb_vibrator_exit(void)
{
greybus_deregister(&gb_vibrator_driver);
class_unregister(&vibrator_class);
ida_destroy(&minors);
}
module_exit(gb_vibrator_exit);
MODULE_LICENSE("GPL v2");