dect
/
linux-2.6
Archived
13
0
Fork 0

staging: ti dspbridge: add resource manager

Add TI's DSP Bridge resource manager driver sources

Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Signed-off-by: Kanigeri, Hari <h-kanigeri2@ti.com>
Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@ti.com>
Signed-off-by: Hebbar, Shivananda <x0hebbar@ti.com>
Signed-off-by: Ramos Falcon, Ernesto <ernesto@ti.com>
Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com>
Signed-off-by: Anna, Suman <s-anna@ti.com>
Signed-off-by: Gupta, Ramesh <grgupta@ti.com>
Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@ti.com>
Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@nokia.com>
Signed-off-by: Armando Uribe De Leon <x0095078@ti.com>
Signed-off-by: Deepak Chitriki <deepak.chitriki@ti.com>
Signed-off-by: Menon, Nishanth <nm@ti.com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Omar Ramirez Luna 2010-06-23 16:01:58 +03:00 committed by Greg Kroah-Hartman
parent c4ca3d5a4b
commit 7d55524d30
13 changed files with 13250 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,754 @@
/*
* disp.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Node Dispatcher interface. Communicates with Resource Manager Server
* (RMS) on DSP. Access to RMS is synchronized in NODE.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/chnldefs.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/nodedefs.h>
#include <dspbridge/nodepriv.h>
#include <dspbridge/rms_sh.h>
/* ----------------------------------- This */
#include <dspbridge/disp.h>
/* Size of a reply from RMS */
#define REPLYSIZE (3 * sizeof(rms_word))
/* Reserved channel offsets for communication with RMS */
#define CHNLTORMSOFFSET 0
#define CHNLFROMRMSOFFSET 1
#define CHNLIOREQS 1
#define SWAP_WORD(x) (((u32)(x) >> 16) | ((u32)(x) << 16))
/*
* ======== disp_object ========
*/
struct disp_object {
struct dev_object *hdev_obj; /* Device for this processor */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct chnl_mgr *hchnl_mgr; /* Channel manager */
struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */
struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */
u8 *pbuf; /* Buffer for commands, replies */
u32 ul_bufsize; /* pbuf size in bytes */
u32 ul_bufsize_rms; /* pbuf size in RMS words */
u32 char_size; /* Size of DSP character */
u32 word_size; /* Size of DSP word */
u32 data_mau_size; /* Size of DSP Data MAU */
};
static u32 refs;
static void delete_disp(struct disp_object *disp_obj);
static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
struct node_strmdef strm_def, u32 max,
u32 chars_in_rms_word);
static int send_message(struct disp_object *disp_obj, u32 dwTimeout,
u32 ul_bytes, OUT u32 *pdw_arg);
/*
* ======== disp_create ========
* Create a NODE Dispatcher object.
*/
int disp_create(OUT struct disp_object **phDispObject,
struct dev_object *hdev_obj,
IN CONST struct disp_attr *pDispAttrs)
{
struct disp_object *disp_obj;
struct bridge_drv_interface *intf_fxns;
u32 ul_chnl_id;
struct chnl_attr chnl_attr_obj;
int status = 0;
u8 dev_type;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(phDispObject != NULL);
DBC_REQUIRE(pDispAttrs != NULL);
DBC_REQUIRE(hdev_obj != NULL);
*phDispObject = NULL;
/* Allocate Node Dispatcher object */
disp_obj = kzalloc(sizeof(struct disp_object), GFP_KERNEL);
if (disp_obj == NULL)
status = -ENOMEM;
else
disp_obj->hdev_obj = hdev_obj;
/* Get Channel manager and Bridge function interface */
if (DSP_SUCCEEDED(status)) {
status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr));
if (DSP_SUCCEEDED(status)) {
(void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
disp_obj->intf_fxns = intf_fxns;
}
}
/* check device type and decide if streams or messag'ing is used for
* RMS/EDS */
if (DSP_FAILED(status))
goto func_cont;
status = dev_get_dev_type(hdev_obj, &dev_type);
if (DSP_FAILED(status))
goto func_cont;
if (dev_type != DSP_UNIT) {
status = -EPERM;
goto func_cont;
}
disp_obj->char_size = DSPWORDSIZE;
disp_obj->word_size = DSPWORDSIZE;
disp_obj->data_mau_size = DSPWORDSIZE;
/* Open channels for communicating with the RMS */
chnl_attr_obj.uio_reqs = CHNLIOREQS;
chnl_attr_obj.event_obj = NULL;
ul_chnl_id = pDispAttrs->ul_chnl_offset + CHNLTORMSOFFSET;
status = (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_to_dsp),
disp_obj->hchnl_mgr,
CHNL_MODETODSP, ul_chnl_id,
&chnl_attr_obj);
if (DSP_SUCCEEDED(status)) {
ul_chnl_id = pDispAttrs->ul_chnl_offset + CHNLFROMRMSOFFSET;
status =
(*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_from_dsp),
disp_obj->hchnl_mgr,
CHNL_MODEFROMDSP, ul_chnl_id,
&chnl_attr_obj);
}
if (DSP_SUCCEEDED(status)) {
/* Allocate buffer for commands, replies */
disp_obj->ul_bufsize = pDispAttrs->ul_chnl_buf_size;
disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE;
disp_obj->pbuf = kzalloc(disp_obj->ul_bufsize, GFP_KERNEL);
if (disp_obj->pbuf == NULL)
status = -ENOMEM;
}
func_cont:
if (DSP_SUCCEEDED(status))
*phDispObject = disp_obj;
else
delete_disp(disp_obj);
DBC_ENSURE(((DSP_FAILED(status)) && ((*phDispObject == NULL))) ||
((DSP_SUCCEEDED(status)) && *phDispObject));
return status;
}
/*
* ======== disp_delete ========
* Delete the NODE Dispatcher.
*/
void disp_delete(struct disp_object *disp_obj)
{
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(disp_obj);
delete_disp(disp_obj);
}
/*
* ======== disp_exit ========
* Discontinue usage of DISP module.
*/
void disp_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
DBC_ENSURE(refs >= 0);
}
/*
* ======== disp_init ========
* Initialize the DISP module.
*/
bool disp_init(void)
{
bool ret = true;
DBC_REQUIRE(refs >= 0);
if (ret)
refs++;
DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
return ret;
}
/*
* ======== disp_node_change_priority ========
* Change the priority of a node currently running on the target.
*/
int disp_node_change_priority(struct disp_object *disp_obj,
struct node_object *hnode,
u32 ulRMSFxn, nodeenv node_env, s32 prio)
{
u32 dw_arg;
struct rms_command *rms_cmd;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
/* Send message to RMS to change priority */
rms_cmd = (struct rms_command *)(disp_obj->pbuf);
rms_cmd->fxn = (rms_word) (ulRMSFxn);
rms_cmd->arg1 = (rms_word) node_env;
rms_cmd->arg2 = prio;
status = send_message(disp_obj, node_get_timeout(hnode),
sizeof(struct rms_command), &dw_arg);
return status;
}
/*
* ======== disp_node_create ========
* Create a node on the DSP by remotely calling the node's create function.
*/
int disp_node_create(struct disp_object *disp_obj,
struct node_object *hnode, u32 ulRMSFxn,
u32 ul_create_fxn,
IN CONST struct node_createargs *pargs,
OUT nodeenv *pNodeEnv)
{
struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj;
struct rms_command *rms_cmd;
struct rms_msg_args *pmsg_args;
struct rms_more_task_args *more_task_args;
enum node_type node_type;
u32 dw_length;
rms_word *pdw_buf = NULL;
u32 ul_bytes;
u32 i;
u32 total;
u32 chars_in_rms_word;
s32 task_args_offset;
s32 sio_in_def_offset;
s32 sio_out_def_offset;
s32 sio_defs_offset;
s32 args_offset = -1;
s32 offset;
struct node_strmdef strm_def;
u32 max;
int status = 0;
struct dsp_nodeinfo node_info;
u8 dev_type;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
DBC_REQUIRE(pNodeEnv != NULL);
status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
if (DSP_FAILED(status))
goto func_end;
if (dev_type != DSP_UNIT) {
dev_dbg(bridge, "%s: unknown device type = 0x%x\n",
__func__, dev_type);
goto func_end;
}
DBC_REQUIRE(pargs != NULL);
node_type = node_get_type(hnode);
node_msg_args = pargs->asa.node_msg_args;
max = disp_obj->ul_bufsize_rms; /*Max # of RMS words that can be sent */
DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
/* Number of RMS words needed to hold arg data */
dw_length =
(node_msg_args.arg_length + chars_in_rms_word -
1) / chars_in_rms_word;
/* Make sure msg args and command fit in buffer */
total = sizeof(struct rms_command) / sizeof(rms_word) +
sizeof(struct rms_msg_args)
/ sizeof(rms_word) - 1 + dw_length;
if (total >= max) {
status = -EPERM;
dev_dbg(bridge, "%s: Message args too large for buffer! size "
"= %d, max = %d\n", __func__, total, max);
}
/*
* Fill in buffer to send to RMS.
* The buffer will have the following format:
*
* RMS command:
* Address of RMS_CreateNode()
* Address of node's create function
* dummy argument
* node type
*
* Message Args:
* max number of messages
* segid for message buffer allocation
* notification type to use when message is received
* length of message arg data
* message args data
*
* Task Args (if task or socket node):
* priority
* stack size
* system stack size
* stack segment
* misc
* number of input streams
* pSTRMInDef[] - offsets of STRM definitions for input streams
* number of output streams
* pSTRMOutDef[] - offsets of STRM definitions for output
* streams
* STRMInDef[] - array of STRM definitions for input streams
* STRMOutDef[] - array of STRM definitions for output streams
*
* Socket Args (if DAIS socket node):
*
*/
if (DSP_SUCCEEDED(status)) {
total = 0; /* Total number of words in buffer so far */
pdw_buf = (rms_word *) disp_obj->pbuf;
rms_cmd = (struct rms_command *)pdw_buf;
rms_cmd->fxn = (rms_word) (ulRMSFxn);
rms_cmd->arg1 = (rms_word) (ul_create_fxn);
if (node_get_load_type(hnode) == NLDR_DYNAMICLOAD) {
/* Flush ICACHE on Load */
rms_cmd->arg2 = 1; /* dummy argument */
} else {
/* Do not flush ICACHE */
rms_cmd->arg2 = 0; /* dummy argument */
}
rms_cmd->data = node_get_type(hnode);
/*
* args_offset is the offset of the data field in struct
* rms_command structure. We need this to calculate stream
* definition offsets.
*/
args_offset = 3;
total += sizeof(struct rms_command) / sizeof(rms_word);
/* Message args */
pmsg_args = (struct rms_msg_args *)(pdw_buf + total);
pmsg_args->max_msgs = node_msg_args.max_msgs;
pmsg_args->segid = node_msg_args.seg_id;
pmsg_args->notify_type = node_msg_args.notify_type;
pmsg_args->arg_length = node_msg_args.arg_length;
total += sizeof(struct rms_msg_args) / sizeof(rms_word) - 1;
memcpy(pdw_buf + total, node_msg_args.pdata,
node_msg_args.arg_length);
total += dw_length;
}
if (DSP_FAILED(status))
goto func_end;
/* If node is a task node, copy task create arguments into buffer */
if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
task_arg_obj = pargs->asa.task_arg_obj;
task_args_offset = total;
total += sizeof(struct rms_more_task_args) / sizeof(rms_word) +
1 + task_arg_obj.num_inputs + task_arg_obj.num_outputs;
/* Copy task arguments */
if (total < max) {
total = task_args_offset;
more_task_args = (struct rms_more_task_args *)(pdw_buf +
total);
/*
* Get some important info about the node. Note that we
* don't just reach into the hnode struct because
* that would break the node object's abstraction.
*/
get_node_info(hnode, &node_info);
more_task_args->priority = node_info.execution_priority;
more_task_args->stack_size = task_arg_obj.stack_size;
more_task_args->sysstack_size =
task_arg_obj.sys_stack_size;
more_task_args->stack_seg = task_arg_obj.stack_seg;
more_task_args->heap_addr = task_arg_obj.udsp_heap_addr;
more_task_args->heap_size = task_arg_obj.heap_size;
more_task_args->misc = task_arg_obj.ul_dais_arg;
more_task_args->num_input_streams =
task_arg_obj.num_inputs;
total +=
sizeof(struct rms_more_task_args) /
sizeof(rms_word);
dev_dbg(bridge, "%s: udsp_heap_addr %x, heap_size %x\n",
__func__, task_arg_obj.udsp_heap_addr,
task_arg_obj.heap_size);
/* Keep track of pSIOInDef[] and pSIOOutDef[]
* positions in the buffer, since this needs to be
* filled in later. */
sio_in_def_offset = total;
total += task_arg_obj.num_inputs;
pdw_buf[total++] = task_arg_obj.num_outputs;
sio_out_def_offset = total;
total += task_arg_obj.num_outputs;
sio_defs_offset = total;
/* Fill SIO defs and offsets */
offset = sio_defs_offset;
for (i = 0; i < task_arg_obj.num_inputs; i++) {
if (DSP_FAILED(status))
break;
pdw_buf[sio_in_def_offset + i] =
(offset - args_offset)
* (sizeof(rms_word) / DSPWORDSIZE);
strm_def = task_arg_obj.strm_in_def[i];
status =
fill_stream_def(pdw_buf, &total, offset,
strm_def, max,
chars_in_rms_word);
offset = total;
}
for (i = 0; (i < task_arg_obj.num_outputs) &&
(DSP_SUCCEEDED(status)); i++) {
pdw_buf[sio_out_def_offset + i] =
(offset - args_offset)
* (sizeof(rms_word) / DSPWORDSIZE);
strm_def = task_arg_obj.strm_out_def[i];
status =
fill_stream_def(pdw_buf, &total, offset,
strm_def, max,
chars_in_rms_word);
offset = total;
}
} else {
/* Args won't fit */
status = -EPERM;
}
}
if (DSP_SUCCEEDED(status)) {
ul_bytes = total * sizeof(rms_word);
DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
status = send_message(disp_obj, node_get_timeout(hnode),
ul_bytes, pNodeEnv);
if (DSP_SUCCEEDED(status)) {
/*
* Message successfully received from RMS.
* Return the status of the Node's create function
* on the DSP-side
*/
status = (((rms_word *) (disp_obj->pbuf))[0]);
if (DSP_FAILED(status))
dev_dbg(bridge, "%s: DSP-side failed: 0x%x\n",
__func__, status);
}
}
func_end:
return status;
}
/*
* ======== disp_node_delete ========
* purpose:
* Delete a node on the DSP by remotely calling the node's delete function.
*
*/
int disp_node_delete(struct disp_object *disp_obj,
struct node_object *hnode, u32 ulRMSFxn,
u32 ul_delete_fxn, nodeenv node_env)
{
u32 dw_arg;
struct rms_command *rms_cmd;
int status = 0;
u8 dev_type;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
if (DSP_SUCCEEDED(status)) {
if (dev_type == DSP_UNIT) {
/*
* Fill in buffer to send to RMS
*/
rms_cmd = (struct rms_command *)disp_obj->pbuf;
rms_cmd->fxn = (rms_word) (ulRMSFxn);
rms_cmd->arg1 = (rms_word) node_env;
rms_cmd->arg2 = (rms_word) (ul_delete_fxn);
rms_cmd->data = node_get_type(hnode);
status = send_message(disp_obj, node_get_timeout(hnode),
sizeof(struct rms_command),
&dw_arg);
if (DSP_SUCCEEDED(status)) {
/*
* Message successfully received from RMS.
* Return the status of the Node's delete
* function on the DSP-side
*/
status = (((rms_word *) (disp_obj->pbuf))[0]);
if (DSP_FAILED(status))
dev_dbg(bridge, "%s: DSP-side failed: "
"0x%x\n", __func__, status);
}
}
}
return status;
}
/*
* ======== disp_node_run ========
* purpose:
* Start execution of a node's execute phase, or resume execution of a node
* that has been suspended (via DISP_NodePause()) on the DSP.
*/
int disp_node_run(struct disp_object *disp_obj,
struct node_object *hnode, u32 ulRMSFxn,
u32 ul_execute_fxn, nodeenv node_env)
{
u32 dw_arg;
struct rms_command *rms_cmd;
int status = 0;
u8 dev_type;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
if (DSP_SUCCEEDED(status)) {
if (dev_type == DSP_UNIT) {
/*
* Fill in buffer to send to RMS.
*/
rms_cmd = (struct rms_command *)disp_obj->pbuf;
rms_cmd->fxn = (rms_word) (ulRMSFxn);
rms_cmd->arg1 = (rms_word) node_env;
rms_cmd->arg2 = (rms_word) (ul_execute_fxn);
rms_cmd->data = node_get_type(hnode);
status = send_message(disp_obj, node_get_timeout(hnode),
sizeof(struct rms_command),
&dw_arg);
if (DSP_SUCCEEDED(status)) {
/*
* Message successfully received from RMS.
* Return the status of the Node's execute
* function on the DSP-side
*/
status = (((rms_word *) (disp_obj->pbuf))[0]);
if (DSP_FAILED(status))
dev_dbg(bridge, "%s: DSP-side failed: "
"0x%x\n", __func__, status);
}
}
}
return status;
}
/*
* ======== delete_disp ========
* purpose:
* Frees the resources allocated for the dispatcher.
*/
static void delete_disp(struct disp_object *disp_obj)
{
int status = 0;
struct bridge_drv_interface *intf_fxns;
if (disp_obj) {
intf_fxns = disp_obj->intf_fxns;
/* Free Node Dispatcher resources */
if (disp_obj->chnl_from_dsp) {
/* Channel close can fail only if the channel handle
* is invalid. */
status = (*intf_fxns->pfn_chnl_close)
(disp_obj->chnl_from_dsp);
if (DSP_FAILED(status)) {
dev_dbg(bridge, "%s: Failed to close channel "
"from RMS: 0x%x\n", __func__, status);
}
}
if (disp_obj->chnl_to_dsp) {
status =
(*intf_fxns->pfn_chnl_close) (disp_obj->
chnl_to_dsp);
if (DSP_FAILED(status)) {
dev_dbg(bridge, "%s: Failed to close channel to"
" RMS: 0x%x\n", __func__, status);
}
}
kfree(disp_obj->pbuf);
kfree(disp_obj);
}
}
/*
* ======== fill_stream_def ========
* purpose:
* Fills stream definitions.
*/
static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
struct node_strmdef strm_def, u32 max,
u32 chars_in_rms_word)
{
struct rms_strm_def *strm_def_obj;
u32 total = *ptotal;
u32 name_len;
u32 dw_length;
int status = 0;
if (total + sizeof(struct rms_strm_def) / sizeof(rms_word) >= max) {
status = -EPERM;
} else {
strm_def_obj = (struct rms_strm_def *)(pdw_buf + total);
strm_def_obj->bufsize = strm_def.buf_size;
strm_def_obj->nbufs = strm_def.num_bufs;
strm_def_obj->segid = strm_def.seg_id;
strm_def_obj->align = strm_def.buf_alignment;
strm_def_obj->timeout = strm_def.utimeout;
}
if (DSP_SUCCEEDED(status)) {
/*
* Since we haven't added the device name yet, subtract
* 1 from total.
*/
total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
DBC_REQUIRE(strm_def.sz_device);
dw_length = strlen(strm_def.sz_device) + 1;
/* Number of RMS_WORDS needed to hold device name */
name_len =
(dw_length + chars_in_rms_word - 1) / chars_in_rms_word;
if (total + name_len >= max) {
status = -EPERM;
} else {
/*
* Zero out last word, since the device name may not
* extend to completely fill this word.
*/
pdw_buf[total + name_len - 1] = 0;
/** TODO USE SERVICES * */
memcpy(pdw_buf + total, strm_def.sz_device, dw_length);
total += name_len;
*ptotal = total;
}
}
return status;
}
/*
* ======== send_message ======
* Send command message to RMS, get reply from RMS.
*/
static int send_message(struct disp_object *disp_obj, u32 dwTimeout,
u32 ul_bytes, u32 *pdw_arg)
{
struct bridge_drv_interface *intf_fxns;
struct chnl_object *chnl_obj;
u32 dw_arg = 0;
u8 *pbuf;
struct chnl_ioc chnl_ioc_obj;
int status = 0;
DBC_REQUIRE(pdw_arg != NULL);
*pdw_arg = (u32) NULL;
intf_fxns = disp_obj->intf_fxns;
chnl_obj = disp_obj->chnl_to_dsp;
pbuf = disp_obj->pbuf;
/* Send the command */
status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0,
0L, dw_arg);
if (DSP_FAILED(status))
goto func_end;
status =
(*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, dwTimeout, &chnl_ioc_obj);
if (DSP_SUCCEEDED(status)) {
if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
status = -ETIME;
else
status = -EPERM;
}
}
/* Get the reply */
if (DSP_FAILED(status))
goto func_end;
chnl_obj = disp_obj->chnl_from_dsp;
ul_bytes = REPLYSIZE;
status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes,
0, 0L, dw_arg);
if (DSP_FAILED(status))
goto func_end;
status =
(*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, dwTimeout, &chnl_ioc_obj);
if (DSP_SUCCEEDED(status)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
status = -ETIME;
} else if (chnl_ioc_obj.byte_size < ul_bytes) {
/* Did not get all of the reply from the RMS */
status = -EPERM;
} else {
if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
DBC_ASSERT(chnl_ioc_obj.pbuf == pbuf);
status = (*((rms_word *) chnl_ioc_obj.pbuf));
*pdw_arg =
(((rms_word *) (chnl_ioc_obj.pbuf))[1]);
} else {
status = -EPERM;
}
}
}
func_end:
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,644 @@
/*
* drv_interface.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP/BIOS Bridge driver interface.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#ifdef MODULE
#include <linux/module.h>
#endif
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/services.h>
#include <dspbridge/clk.h>
#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dspapi-ioctl.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dspdrv.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/pwr.h>
/* ----------------------------------- This */
#include <drv_interface.h>
#include <dspbridge/cfg.h>
#include <dspbridge/resourcecleanup.h>
#include <dspbridge/chnl.h>
#include <dspbridge/proc.h>
#include <dspbridge/dev.h>
#include <dspbridge/drvdefs.h>
#include <dspbridge/drv.h>
#ifdef CONFIG_BRIDGE_DVFS
#include <mach-omap2/omap3-opp.h>
#endif
#define BRIDGE_NAME "C6410"
/* ----------------------------------- Globals */
#define DRIVER_NAME "DspBridge"
#define DSPBRIDGE_VERSION "0.3"
s32 dsp_debug;
struct platform_device *omap_dspbridge_dev;
struct device *bridge;
/* This is a test variable used by Bridge to test different sleep states */
s32 dsp_test_sleepstate;
static struct cdev bridge_cdev;
static struct class *bridge_class;
static u32 driver_context;
static s32 driver_major;
static char *base_img;
char *iva_img;
static s32 shm_size = 0x500000; /* 5 MB */
static int tc_wordswapon; /* Default value is always false */
#ifdef CONFIG_BRIDGE_RECOVERY
#define REC_TIMEOUT 5000 /*recovery timeout in msecs */
static atomic_t bridge_cref; /* number of bridge open handles */
static struct workqueue_struct *bridge_rec_queue;
static struct work_struct bridge_recovery_work;
static DECLARE_COMPLETION(bridge_comp);
static DECLARE_COMPLETION(bridge_open_comp);
static bool recover;
#endif
#ifdef CONFIG_PM
struct omap34_xx_bridge_suspend_data {
int suspended;
wait_queue_head_t suspend_wq;
};
static struct omap34_xx_bridge_suspend_data bridge_suspend_data;
static int omap34_xxbridge_suspend_lockout(struct omap34_xx_bridge_suspend_data
*s, struct file *f)
{
if ((s)->suspended) {
if ((f)->f_flags & O_NONBLOCK)
return -EPERM;
wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0);
}
return 0;
}
#endif
module_param(dsp_debug, int, 0);
MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false");
module_param(dsp_test_sleepstate, int, 0);
MODULE_PARM_DESC(dsp_test_sleepstate, "DSP Sleep state = 0");
module_param(base_img, charp, 0);
MODULE_PARM_DESC(base_img, "DSP base image, default = NULL");
module_param(shm_size, int, 0);
MODULE_PARM_DESC(shm_size, "shm size, default = 4 MB, minimum = 64 KB");
module_param(tc_wordswapon, int, 0);
MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0");
MODULE_AUTHOR("Texas Instruments");
MODULE_LICENSE("GPL");
MODULE_VERSION(DSPBRIDGE_VERSION);
static char *driver_name = DRIVER_NAME;
static const struct file_operations bridge_fops = {
.open = bridge_open,
.release = bridge_release,
.unlocked_ioctl = bridge_ioctl,
.mmap = bridge_mmap,
};
#ifdef CONFIG_PM
static u32 time_out = 1000;
#ifdef CONFIG_BRIDGE_DVFS
s32 dsp_max_opps = VDD1_OPP5;
#endif
/* Maximum Opps that can be requested by IVA */
/*vdd1 rate table */
#ifdef CONFIG_BRIDGE_DVFS
const struct omap_opp vdd1_rate_table_bridge[] = {
{0, 0, 0},
/*OPP1 */
{S125M, VDD1_OPP1, 0},
/*OPP2 */
{S250M, VDD1_OPP2, 0},
/*OPP3 */
{S500M, VDD1_OPP3, 0},
/*OPP4 */
{S550M, VDD1_OPP4, 0},
/*OPP5 */
{S600M, VDD1_OPP5, 0},
};
#endif
#endif
struct dspbridge_platform_data *omap_dspbridge_pdata;
u32 vdd1_dsp_freq[6][4] = {
{0, 0, 0, 0},
/*OPP1 */
{0, 90000, 0, 86000},
/*OPP2 */
{0, 180000, 80000, 170000},
/*OPP3 */
{0, 360000, 160000, 340000},
/*OPP4 */
{0, 396000, 325000, 376000},
/*OPP5 */
{0, 430000, 355000, 430000},
};
#ifdef CONFIG_BRIDGE_RECOVERY
static void bridge_recover(struct work_struct *work)
{
struct dev_object *dev;
struct cfg_devnode *dev_node;
if (atomic_read(&bridge_cref)) {
INIT_COMPLETION(bridge_comp);
while (!wait_for_completion_timeout(&bridge_comp,
msecs_to_jiffies(REC_TIMEOUT)))
pr_info("%s:%d handle(s) still opened\n",
__func__, atomic_read(&bridge_cref));
}
dev = dev_get_first();
dev_get_dev_node(dev, &dev_node);
if (!dev_node || DSP_FAILED(proc_auto_start(dev_node, dev)))
pr_err("DSP could not be restarted\n");
recover = false;
complete_all(&bridge_open_comp);
}
void bridge_recover_schedule(void)
{
INIT_COMPLETION(bridge_open_comp);
recover = true;
queue_work(bridge_rec_queue, &bridge_recovery_work);
}
#endif
#ifdef CONFIG_BRIDGE_DVFS
static int dspbridge_scale_notification(struct notifier_block *op,
unsigned long val, void *ptr)
{
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
return 0;
}
static struct notifier_block iva_clk_notifier = {
.notifier_call = dspbridge_scale_notification,
NULL,
};
#endif
/**
* omap3_bridge_startup() - perform low lever initializations
* @pdev: pointer to platform device
*
* Initializes recovery, PM and DVFS required data, before calling
* clk and memory init routines.
*/
static int omap3_bridge_startup(struct platform_device *pdev)
{
struct dspbridge_platform_data *pdata = pdev->dev.platform_data;
struct drv_data *drv_datap = NULL;
u32 phys_membase, phys_memsize;
int err;
#ifdef CONFIG_BRIDGE_RECOVERY
bridge_rec_queue = create_workqueue("bridge_rec_queue");
INIT_WORK(&bridge_recovery_work, bridge_recover);
INIT_COMPLETION(bridge_comp);
#endif
#ifdef CONFIG_PM
/* Initialize the wait queue */
bridge_suspend_data.suspended = 0;
init_waitqueue_head(&bridge_suspend_data.suspend_wq);
#ifdef CONFIG_BRIDGE_DVFS
for (i = 0; i < 6; i++)
pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;
err = cpufreq_register_notifier(&iva_clk_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
if (err)
pr_err("%s: clk_notifier_register failed for iva2_ck\n",
__func__);
#endif
#endif
dsp_clk_init();
services_init();
drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
if (!drv_datap) {
err = -ENOMEM;
goto err1;
}
drv_datap->shm_size = shm_size;
drv_datap->tc_wordswapon = tc_wordswapon;
if (base_img) {
drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
if (!drv_datap->base_img) {
err = -ENOMEM;
goto err2;
}
strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
}
dev_set_drvdata(bridge, drv_datap);
if (shm_size < 0x10000) { /* 64 KB */
err = -EINVAL;
pr_err("%s: shm size must be at least 64 KB\n", __func__);
goto err3;
}
dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);
phys_membase = pdata->phys_mempool_base;
phys_memsize = pdata->phys_mempool_size;
if (phys_membase > 0 && phys_memsize > 0)
mem_ext_phys_pool_init(phys_membase, phys_memsize);
if (tc_wordswapon)
dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);
driver_context = dsp_init(&err);
if (err) {
pr_err("DSP Bridge driver initialization failed\n");
goto err4;
}
return 0;
err4:
mem_ext_phys_pool_release();
err3:
kfree(drv_datap->base_img);
err2:
kfree(drv_datap);
err1:
#ifdef CONFIG_BRIDGE_DVFS
cpufreq_unregister_notifier(&iva_clk_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
dsp_clk_exit();
services_exit();
return err;
}
static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
{
int err;
dev_t dev = 0;
#ifdef CONFIG_BRIDGE_DVFS
int i = 0;
#endif
omap_dspbridge_dev = pdev;
/* Global bridge device */
bridge = &omap_dspbridge_dev->dev;
/* Bridge low level initializations */
err = omap3_bridge_startup(pdev);
if (err)
goto err1;
/* use 2.6 device model */
err = alloc_chrdev_region(&dev, 0, 1, driver_name);
if (err) {
pr_err("%s: Can't get major %d\n", __func__, driver_major);
goto err1;
}
cdev_init(&bridge_cdev, &bridge_fops);
bridge_cdev.owner = THIS_MODULE;
err = cdev_add(&bridge_cdev, dev, 1);
if (err) {
pr_err("%s: Failed to add bridge device\n", __func__);
goto err2;
}
/* udev support */
bridge_class = class_create(THIS_MODULE, "ti_bridge");
if (IS_ERR(bridge_class)) {
pr_err("%s: Error creating bridge class\n", __func__);
goto err3;
}
driver_major = MAJOR(dev);
device_create(bridge_class, NULL, MKDEV(driver_major, 0),
NULL, "DspBridge");
pr_info("DSP Bridge driver loaded\n");
return 0;
err3:
cdev_del(&bridge_cdev);
err2:
unregister_chrdev_region(dev, 1);
err1:
return err;
}
static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
{
dev_t devno;
bool ret;
int status = 0;
void *hdrv_obj = NULL;
status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
if (DSP_FAILED(status))
goto func_cont;
#ifdef CONFIG_BRIDGE_DVFS
if (cpufreq_unregister_notifier(&iva_clk_notifier,
CPUFREQ_TRANSITION_NOTIFIER))
pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
__func__);
#endif /* #ifdef CONFIG_BRIDGE_DVFS */
if (driver_context) {
/* Put the DSP in reset state */
ret = dsp_deinit(driver_context);
driver_context = 0;
DBC_ASSERT(ret == true);
}
func_cont:
mem_ext_phys_pool_release();
dsp_clk_exit();
services_exit();
devno = MKDEV(driver_major, 0);
cdev_del(&bridge_cdev);
unregister_chrdev_region(devno, 1);
if (bridge_class) {
/* remove the device from sysfs */
device_destroy(bridge_class, MKDEV(driver_major, 0));
class_destroy(bridge_class);
}
return 0;
}
#ifdef CONFIG_PM
static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
{
u32 status;
u32 command = PWR_EMERGENCYDEEPSLEEP;
status = pwr_sleep_dsp(command, time_out);
if (DSP_FAILED(status))
return -1;
bridge_suspend_data.suspended = 1;
return 0;
}
static int BRIDGE_RESUME(struct platform_device *pdev)
{
u32 status;
status = pwr_wake_dsp(time_out);
if (DSP_FAILED(status))
return -1;
bridge_suspend_data.suspended = 0;
wake_up(&bridge_suspend_data.suspend_wq);
return 0;
}
#else
#define BRIDGE_SUSPEND NULL
#define BRIDGE_RESUME NULL
#endif
static struct platform_driver bridge_driver = {
.driver = {
.name = BRIDGE_NAME,
},
.probe = omap34_xx_bridge_probe,
.remove = __devexit_p(omap34_xx_bridge_remove),
.suspend = BRIDGE_SUSPEND,
.resume = BRIDGE_RESUME,
};
static int __init bridge_init(void)
{
return platform_driver_register(&bridge_driver);
}
static void __exit bridge_exit(void)
{
platform_driver_unregister(&bridge_driver);
}
/*
* This function is called when an application opens handle to the
* bridge driver.
*/
static int bridge_open(struct inode *ip, struct file *filp)
{
int status = 0;
struct process_context *pr_ctxt = NULL;
/*
* Allocate a new process context and insert it into global
* process context list.
*/
#ifdef CONFIG_BRIDGE_RECOVERY
if (recover) {
if (filp->f_flags & O_NONBLOCK ||
wait_for_completion_interruptible(&bridge_open_comp))
return -EBUSY;
}
#endif
pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
if (pr_ctxt) {
pr_ctxt->res_state = PROC_RES_ALLOCATED;
spin_lock_init(&pr_ctxt->dmm_map_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
spin_lock_init(&pr_ctxt->dmm_rsv_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
mutex_init(&pr_ctxt->node_mutex);
mutex_init(&pr_ctxt->strm_mutex);
} else {
status = -ENOMEM;
}
filp->private_data = pr_ctxt;
#ifdef CONFIG_BRIDGE_RECOVERY
if (!status)
atomic_inc(&bridge_cref);
#endif
return status;
}
/*
* This function is called when an application closes handle to the bridge
* driver.
*/
static int bridge_release(struct inode *ip, struct file *filp)
{
int status = 0;
struct process_context *pr_ctxt;
if (!filp->private_data) {
status = -EIO;
goto err;
}
pr_ctxt = filp->private_data;
flush_signals(current);
drv_remove_all_resources(pr_ctxt);
proc_detach(pr_ctxt);
kfree(pr_ctxt);
filp->private_data = NULL;
err:
#ifdef CONFIG_BRIDGE_RECOVERY
if (!atomic_dec_return(&bridge_cref))
complete(&bridge_comp);
#endif
return status;
}
/* This function provides IO interface to the bridge driver. */
static long bridge_ioctl(struct file *filp, unsigned int code,
unsigned long args)
{
int status;
u32 retval = 0;
union Trapped_Args buf_in;
DBC_REQUIRE(filp != NULL);
#ifdef CONFIG_BRIDGE_RECOVERY
if (recover) {
status = -EIO;
goto err;
}
#endif
#ifdef CONFIG_PM
status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
if (status != 0)
return status;
#endif
if (!filp->private_data) {
status = -EIO;
goto err;
}
status = copy_from_user(&buf_in, (union Trapped_Args *)args,
sizeof(union Trapped_Args));
if (!status) {
status = api_call_dev_ioctl(code, &buf_in, &retval,
filp->private_data);
if (DSP_SUCCEEDED(status)) {
status = retval;
} else {
dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
"status 0x%x\n", __func__, code, status);
status = -1;
}
}
err:
return status;
}
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
u32 offset = vma->vm_pgoff << PAGE_SHIFT;
u32 status;
DBC_ASSERT(vma->vm_start < vma->vm_end);
vma->vm_flags |= VM_RESERVED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
"%lx flags %lx\n", __func__, filp, offset,
vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (status != 0)
status = -EAGAIN;
return status;
}
/* To remove all process resources before removing the process from the
* process context list */
int drv_remove_all_resources(void *hPCtxt)
{
int status = 0;
struct process_context *ctxt = (struct process_context *)hPCtxt;
drv_remove_all_strm_res_elements(ctxt);
drv_remove_all_node_res_elements(ctxt);
drv_remove_all_dmm_res_elements(ctxt);
ctxt->res_state = PROC_RES_FREED;
return status;
}
/* Bridge driver initialization and de-initialization functions */
module_init(bridge_init);
module_exit(bridge_exit);

View File

@ -0,0 +1,27 @@
/*
* drv_interface.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _DRV_INTERFACE_H_
#define _DRV_INTERFACE_H_
/* Prototypes for all functions in this bridge */
static int __init bridge_init(void); /* Initialize bridge */
static void __exit bridge_exit(void); /* Opposite of initialize */
static int bridge_open(struct inode *, struct file *); /* Open */
static int bridge_release(struct inode *, struct file *); /* Release */
static long bridge_ioctl(struct file *, unsigned int, unsigned long);
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
#endif /* ifndef _DRV_INTERFACE_H_ */

View File

@ -0,0 +1,142 @@
/*
* dspdrv.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Interface to allocate and free bridge resources.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/cfg.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h>
/* ----------------------------------- This */
#include <dspbridge/dspdrv.h>
/*
* ======== dsp_init ========
* Allocates bridge resources. Loads a base image onto DSP, if specified.
*/
u32 dsp_init(OUT u32 *init_status)
{
char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510";
int status = -EPERM;
struct drv_object *drv_obj = NULL;
u32 device_node;
u32 device_node_string;
if (!api_init())
goto func_cont;
status = drv_create(&drv_obj);
if (DSP_FAILED(status)) {
api_exit();
goto func_cont;
}
/* End drv_create */
/* Request Resources */
status = drv_request_resources((u32) &dev_node, &device_node_string);
if (DSP_SUCCEEDED(status)) {
/* Attempt to Start the Device */
status = dev_start_device((struct cfg_devnode *)
device_node_string);
if (DSP_FAILED(status))
(void)drv_release_resources
((u32) device_node_string, drv_obj);
} else {
dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__);
status = -EPERM;
}
/* Unwind whatever was loaded */
if (DSP_FAILED(status)) {
/* irrespective of the status of dev_remove_device we conitinue
* unloading. Get the Driver Object iterate through and remove.
* Reset the status to E_FAIL to avoid going through
* api_init_complete2. */
for (device_node = drv_get_first_dev_extension();
device_node != 0;
device_node = drv_get_next_dev_extension(device_node)) {
(void)dev_remove_device((struct cfg_devnode *)
device_node);
(void)drv_release_resources((u32) device_node, drv_obj);
}
/* Remove the Driver Object */
(void)drv_destroy(drv_obj);
drv_obj = NULL;
api_exit();
dev_dbg(bridge, "%s: Logical device failed init\n", __func__);
} /* Unwinding the loaded drivers */
func_cont:
/* Attempt to Start the Board */
if (DSP_SUCCEEDED(status)) {
/* BRD_AutoStart could fail if the dsp execuetable is not the
* correct one. We should not propagate that error
* into the device loader. */
(void)api_init_complete2();
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
DBC_ENSURE((DSP_SUCCEEDED(status) && drv_obj != NULL) ||
(DSP_FAILED(status) && drv_obj == NULL));
*init_status = status;
/* Return the Driver Object */
return (u32) drv_obj;
}
/*
* ======== dsp_deinit ========
* Frees the resources allocated for bridge.
*/
bool dsp_deinit(u32 deviceContext)
{
bool ret = true;
u32 device_node;
struct mgr_object *mgr_obj = NULL;
while ((device_node = drv_get_first_dev_extension()) != 0) {
(void)dev_remove_device((struct cfg_devnode *)device_node);
(void)drv_release_resources((u32) device_node,
(struct drv_object *)deviceContext);
}
(void)drv_destroy((struct drv_object *)deviceContext);
/* Get the Manager Object from Registry
* MGR Destroy will unload the DCD dll */
if (DSP_SUCCEEDED(cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT)))
(void)mgr_destroy(mgr_obj);
api_exit();
return ret;
}

View File

@ -0,0 +1,374 @@
/*
* mgr.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implementation of Manager interface to the device object at the
* driver level. This queries the NDB data base and retrieves the
* data about Node and Processor.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/cfg.h>
#include <dspbridge/sync.h>
/* ----------------------------------- Others */
#include <dspbridge/dbdcd.h>
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
/* ----------------------------------- This */
#include <dspbridge/mgr.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
#define ZLDLLNAME ""
struct mgr_object {
struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
};
/* ----------------------------------- Globals */
static u32 refs;
/*
* ========= mgr_create =========
* Purpose:
* MGR Object gets created only once during driver Loading.
*/
int mgr_create(OUT struct mgr_object **phMgrObject,
struct cfg_devnode *dev_node_obj)
{
int status = 0;
struct mgr_object *pmgr_obj = NULL;
DBC_REQUIRE(phMgrObject != NULL);
DBC_REQUIRE(refs > 0);
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr);
if (DSP_SUCCEEDED(status)) {
/* If succeeded store the handle in the MGR Object */
status = cfg_set_object((u32) pmgr_obj, REG_MGR_OBJECT);
if (DSP_SUCCEEDED(status)) {
*phMgrObject = pmgr_obj;
} else {
dcd_destroy_manager(pmgr_obj->hdcd_mgr);
kfree(pmgr_obj);
}
} else {
/* failed to Create DCD Manager */
kfree(pmgr_obj);
}
} else {
status = -ENOMEM;
}
DBC_ENSURE(DSP_FAILED(status) || pmgr_obj);
return status;
}
/*
* ========= mgr_destroy =========
* This function is invoked during bridge driver unloading.Frees MGR object.
*/
int mgr_destroy(struct mgr_object *hmgr_obj)
{
int status = 0;
struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hmgr_obj);
/* Free resources */
if (hmgr_obj->hdcd_mgr)
dcd_destroy_manager(hmgr_obj->hdcd_mgr);
kfree(pmgr_obj);
/* Update the Registry with NULL for MGR Object */
(void)cfg_set_object(0, REG_MGR_OBJECT);
return status;
}
/*
* ======== mgr_enum_node_info ========
* Enumerate and get configuration information about nodes configured
* in the node database.
*/
int mgr_enum_node_info(u32 node_id, OUT struct dsp_ndbprops *pndb_props,
u32 undb_props_size, OUT u32 *pu_num_nodes)
{
int status = 0;
struct dsp_uuid node_uuid, temp_uuid;
u32 temp_index = 0;
u32 node_index = 0;
struct dcd_genericobj gen_obj;
struct mgr_object *pmgr_obj = NULL;
DBC_REQUIRE(pndb_props != NULL);
DBC_REQUIRE(pu_num_nodes != NULL);
DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
DBC_REQUIRE(refs > 0);
*pu_num_nodes = 0;
/* Get The Manager Object from the Registry */
status = cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT);
if (DSP_FAILED(status))
goto func_cont;
DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit failed or no more items in the
* Enumeration. We will exit the loop other than 0; */
while (status == 0) {
status = dcd_enumerate_object(temp_index++, DSP_DCDNODETYPE,
&temp_uuid);
if (status == 0) {
node_index++;
if (node_id == (node_index - 1))
node_uuid = temp_uuid;
}
}
if (DSP_SUCCEEDED(status)) {
if (node_id > (node_index - 1)) {
status = -EINVAL;
} else {
status = dcd_get_object_def(pmgr_obj->hdcd_mgr,
(struct dsp_uuid *)
&node_uuid, DSP_DCDNODETYPE,
&gen_obj);
if (DSP_SUCCEEDED(status)) {
/* Get the Obj def */
*pndb_props =
gen_obj.obj_data.node_obj.ndb_props;
*pu_num_nodes = node_index;
}
}
}
func_cont:
DBC_ENSURE((DSP_SUCCEEDED(status) && *pu_num_nodes > 0) ||
(DSP_FAILED(status) && *pu_num_nodes == 0));
return status;
}
/*
* ======== mgr_enum_processor_info ========
* Enumerate and get configuration information about available
* DSP processors.
*/
int mgr_enum_processor_info(u32 processor_id,
OUT struct dsp_processorinfo *
processor_info, u32 processor_info_size,
OUT u8 *pu_num_procs)
{
int status = 0;
int status1 = 0;
int status2 = 0;
struct dsp_uuid temp_uuid;
u32 temp_index = 0;
u32 proc_index = 0;
struct dcd_genericobj gen_obj;
struct mgr_object *pmgr_obj = NULL;
struct mgr_processorextinfo *ext_info;
struct dev_object *hdev_obj;
struct drv_object *hdrv_obj;
u8 dev_type;
struct cfg_devnode *dev_node;
bool proc_detect = false;
DBC_REQUIRE(processor_info != NULL);
DBC_REQUIRE(pu_num_procs != NULL);
DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
DBC_REQUIRE(refs > 0);
*pu_num_procs = 0;
status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
if (DSP_SUCCEEDED(status)) {
status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
if (DSP_SUCCEEDED(status)) {
status = dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
status = dev_get_dev_node(hdev_obj, &dev_node);
if (dev_type != DSP_UNIT)
status = -EPERM;
if (DSP_SUCCEEDED(status))
processor_info->processor_type = DSPTYPE64;
}
}
if (DSP_FAILED(status))
goto func_end;
/* Get The Manager Object from the Registry */
if (DSP_FAILED(cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT))) {
dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
goto func_end;
}
DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit no more items in the
* Enumeration. We will exit the loop other than 0; */
while (status1 == 0) {
status1 = dcd_enumerate_object(temp_index++,
DSP_DCDPROCESSORTYPE,
&temp_uuid);
if (status1 != 0)
break;
proc_index++;
/* Get the Object properties to find the Device/Processor
* Type */
if (proc_detect != false)
continue;
status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr,
(struct dsp_uuid *)&temp_uuid,
DSP_DCDPROCESSORTYPE, &gen_obj);
if (DSP_SUCCEEDED(status2)) {
/* Get the Obj def */
if (processor_info_size <
sizeof(struct mgr_processorextinfo)) {
*processor_info = gen_obj.obj_data.proc_info;
} else {
/* extended info */
ext_info = (struct mgr_processorextinfo *)
processor_info;
*ext_info = gen_obj.obj_data.ext_proc_obj;
}
dev_dbg(bridge, "%s: Got proctype from DCD %x\n",
__func__, processor_info->processor_type);
/* See if we got the needed processor */
if (dev_type == DSP_UNIT) {
if (processor_info->processor_type ==
DSPPROCTYPE_C64)
proc_detect = true;
} else if (dev_type == IVA_UNIT) {
if (processor_info->processor_type ==
IVAPROCTYPE_ARM7)
proc_detect = true;
}
/* User applciatiuons aonly check for chip type, so
* this clumsy overwrite */
processor_info->processor_type = DSPTYPE64;
} else {
dev_dbg(bridge, "%s: Failed to get DCD processor info "
"%x\n", __func__, status2);
status = -EPERM;
}
}
*pu_num_procs = proc_index;
if (proc_detect == false) {
dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use "
"CFG registry\n", __func__);
processor_info->processor_type = DSPTYPE64;
}
func_end:
return status;
}
/*
* ======== mgr_exit ========
* Decrement reference count, and free resources when reference count is
* 0.
*/
void mgr_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0)
dcd_exit();
DBC_ENSURE(refs >= 0);
}
/*
* ======== mgr_get_dcd_handle ========
* Retrieves the MGR handle. Accessor Function.
*/
int mgr_get_dcd_handle(struct mgr_object *hMGRHandle,
OUT u32 *phDCDHandle)
{
int status = -EPERM;
struct mgr_object *pmgr_obj = (struct mgr_object *)hMGRHandle;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(phDCDHandle != NULL);
*phDCDHandle = (u32) NULL;
if (pmgr_obj) {
*phDCDHandle = (u32) pmgr_obj->hdcd_mgr;
status = 0;
}
DBC_ENSURE((DSP_SUCCEEDED(status) && *phDCDHandle != (u32) NULL) ||
(DSP_FAILED(status) && *phDCDHandle == (u32) NULL));
return status;
}
/*
* ======== mgr_init ========
* Initialize MGR's private state, keeping a reference count on each call.
*/
bool mgr_init(void)
{
bool ret = true;
bool init_dcd = false;
DBC_REQUIRE(refs >= 0);
if (refs == 0) {
init_dcd = dcd_init(); /* DCD Module */
if (!init_dcd)
ret = false;
}
if (ret)
refs++;
DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
return ret;
}
/*
* ======== mgr_wait_for_bridge_events ========
* Block on any Bridge event(s)
*/
int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
u32 count, OUT u32 *pu_index,
u32 utimeout)
{
int status;
struct sync_object *sync_events[MAX_EVENTS];
u32 i;
DBC_REQUIRE(count < MAX_EVENTS);
for (i = 0; i < count; i++)
sync_events[i] = anotifications[i]->handle;
status = sync_wait_on_multiple_events(sync_events, count, utimeout,
pu_index);
return status;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
/*
* pwr.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* PWR API for controlling DSP power states.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- This */
#include <dspbridge/pwr.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/devdefs.h>
#include <dspbridge/drv.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspioctl.h>
/*
* ======== pwr_sleep_dsp ========
* Send command to DSP to enter sleep state.
*/
int pwr_sleep_dsp(IN CONST u32 sleepCode, IN CONST u32 timeout)
{
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *dw_context;
int status = -EPERM;
struct dev_object *hdev_obj = NULL;
u32 ioctlcode = 0;
u32 arg = timeout;
for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
hdev_obj != NULL;
hdev_obj =
(struct dev_object *)drv_get_next_dev_object((u32) hdev_obj)) {
if (DSP_FAILED(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context **)
&dw_context))) {
continue;
}
if (DSP_FAILED(dev_get_intf_fxns(hdev_obj,
(struct bridge_drv_interface **)
&intf_fxns))) {
continue;
}
if (sleepCode == PWR_DEEPSLEEP)
ioctlcode = BRDIOCTL_DEEPSLEEP;
else if (sleepCode == PWR_EMERGENCYDEEPSLEEP)
ioctlcode = BRDIOCTL_EMERGENCYSLEEP;
else
status = -EINVAL;
if (status != -EINVAL) {
status = (*intf_fxns->pfn_dev_cntrl) (dw_context,
ioctlcode,
(void *)&arg);
}
}
return status;
}
/*
* ======== pwr_wake_dsp ========
* Send command to DSP to wake it from sleep.
*/
int pwr_wake_dsp(IN CONST u32 timeout)
{
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *dw_context;
int status = -EPERM;
struct dev_object *hdev_obj = NULL;
u32 arg = timeout;
for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
if (DSP_SUCCEEDED
(dev_get_intf_fxns
(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
BRDIOCTL_WAKEUP,
(void *)&arg);
}
}
}
return status;
}
/*
* ======== pwr_pm_pre_scale========
* Sends pre-notification message to DSP.
*/
int pwr_pm_pre_scale(IN u16 voltage_domain, u32 level)
{
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *dw_context;
int status = -EPERM;
struct dev_object *hdev_obj = NULL;
u32 arg[2];
arg[0] = voltage_domain;
arg[1] = level;
for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
if (DSP_SUCCEEDED
(dev_get_intf_fxns
(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
BRDIOCTL_PRESCALE_NOTIFY,
(void *)&arg);
}
}
}
return status;
}
/*
* ======== pwr_pm_post_scale========
* Sends post-notification message to DSP.
*/
int pwr_pm_post_scale(IN u16 voltage_domain, u32 level)
{
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *dw_context;
int status = -EPERM;
struct dev_object *hdev_obj = NULL;
u32 arg[2];
arg[0] = voltage_domain;
arg[1] = level;
for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
if (DSP_SUCCEEDED
(dev_get_intf_fxns
(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
BRDIOCTL_POSTSCALE_NOTIFY,
(void *)&arg);
}
}
}
return status;
}

View File

@ -0,0 +1,535 @@
/*
* rmm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* This memory manager provides general heap management and arbitrary
* alignment for any number of memory segments.
*
* Notes:
*
* Memory blocks are allocated from the end of the first free memory
* block large enough to satisfy the request. Alignment requirements
* are satisfied by "sliding" the block forward until its base satisfies
* the alignment specification; if this is not possible then the next
* free block large enough to hold the request is tried.
*
* Since alignment can cause the creation of a new free block - the
* unused memory formed between the start of the original free block
* and the start of the allocated block - the memory manager must free
* this memory to prevent a memory leak.
*
* Overlay memory is managed by reserving through rmm_alloc, and freeing
* it through rmm_free. The memory manager prevents DSP code/data that is
* overlayed from being overwritten as long as the memory it runs at has
* been allocated, and not yet freed.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/list.h>
/* ----------------------------------- This */
#include <dspbridge/rmm.h>
/*
* ======== rmm_header ========
* This header is used to maintain a list of free memory blocks.
*/
struct rmm_header {
struct rmm_header *next; /* form a free memory link list */
u32 size; /* size of the free memory */
u32 addr; /* DSP address of memory block */
};
/*
* ======== rmm_ovly_sect ========
* Keeps track of memory occupied by overlay section.
*/
struct rmm_ovly_sect {
struct list_head list_elem;
u32 addr; /* Start of memory section */
u32 size; /* Length (target MAUs) of section */
s32 page; /* Memory page */
};
/*
* ======== rmm_target_obj ========
*/
struct rmm_target_obj {
struct rmm_segment *seg_tab;
struct rmm_header **free_list;
u32 num_segs;
struct lst_list *ovly_list; /* List of overlay memory in use */
};
static u32 refs; /* module reference count */
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
u32 align, u32 *dspAddr);
static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
u32 size);
/*
* ======== rmm_alloc ========
*/
int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
u32 align, u32 *dspAddr, bool reserve)
{
struct rmm_ovly_sect *sect;
struct rmm_ovly_sect *prev_sect = NULL;
struct rmm_ovly_sect *new_sect;
u32 addr;
int status = 0;
DBC_REQUIRE(target);
DBC_REQUIRE(dspAddr != NULL);
DBC_REQUIRE(size > 0);
DBC_REQUIRE(reserve || (target->num_segs > 0));
DBC_REQUIRE(refs > 0);
if (!reserve) {
if (!alloc_block(target, segid, size, align, dspAddr)) {
status = -ENOMEM;
} else {
/* Increment the number of allocated blocks in this
* segment */
target->seg_tab[segid].number++;
}
goto func_end;
}
/* An overlay section - See if block is already in use. If not,
* insert into the list in ascending address size. */
addr = *dspAddr;
sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
/* Find place to insert new list element. List is sorted from
* smallest to largest address. */
while (sect != NULL) {
if (addr <= sect->addr) {
/* Check for overlap with sect */
if ((addr + size > sect->addr) || (prev_sect &&
(prev_sect->addr +
prev_sect->size >
addr))) {
status = -ENXIO;
}
break;
}
prev_sect = sect;
sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
(struct list_head *)
sect);
}
if (DSP_SUCCEEDED(status)) {
/* No overlap - allocate list element for new section. */
new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
if (new_sect == NULL) {
status = -ENOMEM;
} else {
lst_init_elem((struct list_head *)new_sect);
new_sect->addr = addr;
new_sect->size = size;
new_sect->page = segid;
if (sect == NULL) {
/* Put new section at the end of the list */
lst_put_tail(target->ovly_list,
(struct list_head *)new_sect);
} else {
/* Put new section just before sect */
lst_insert_before(target->ovly_list,
(struct list_head *)new_sect,
(struct list_head *)sect);
}
}
}
func_end:
return status;
}
/*
* ======== rmm_create ========
*/
int rmm_create(struct rmm_target_obj **target_obj,
struct rmm_segment seg_tab[], u32 num_segs)
{
struct rmm_header *hptr;
struct rmm_segment *sptr, *tmp;
struct rmm_target_obj *target;
s32 i;
int status = 0;
DBC_REQUIRE(target_obj != NULL);
DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
/* Allocate DBL target object */
target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
if (target == NULL)
status = -ENOMEM;
if (DSP_FAILED(status))
goto func_cont;
target->num_segs = num_segs;
if (!(num_segs > 0))
goto func_cont;
/* Allocate the memory for freelist from host's memory */
target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
GFP_KERNEL);
if (target->free_list == NULL) {
status = -ENOMEM;
} else {
/* Allocate headers for each element on the free list */
for (i = 0; i < (s32) num_segs; i++) {
target->free_list[i] =
kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
if (target->free_list[i] == NULL) {
status = -ENOMEM;
break;
}
}
/* Allocate memory for initial segment table */
target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
GFP_KERNEL);
if (target->seg_tab == NULL) {
status = -ENOMEM;
} else {
/* Initialize segment table and free list */
sptr = target->seg_tab;
for (i = 0, tmp = seg_tab; num_segs > 0;
num_segs--, i++) {
*sptr = *tmp;
hptr = target->free_list[i];
hptr->addr = tmp->base;
hptr->size = tmp->length;
hptr->next = NULL;
tmp++;
sptr++;
}
}
}
func_cont:
/* Initialize overlay memory list */
if (DSP_SUCCEEDED(status)) {
target->ovly_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
if (target->ovly_list == NULL)
status = -ENOMEM;
else
INIT_LIST_HEAD(&target->ovly_list->head);
}
if (DSP_SUCCEEDED(status)) {
*target_obj = target;
} else {
*target_obj = NULL;
if (target)
rmm_delete(target);
}
DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj)
|| (DSP_FAILED(status) && *target_obj == NULL));
return status;
}
/*
* ======== rmm_delete ========
*/
void rmm_delete(struct rmm_target_obj *target)
{
struct rmm_ovly_sect *ovly_section;
struct rmm_header *hptr;
struct rmm_header *next;
u32 i;
DBC_REQUIRE(target);
kfree(target->seg_tab);
if (target->ovly_list) {
while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
(target->ovly_list))) {
kfree(ovly_section);
}
DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
kfree(target->ovly_list);
}
if (target->free_list != NULL) {
/* Free elements on freelist */
for (i = 0; i < target->num_segs; i++) {
hptr = next = target->free_list[i];
while (next) {
hptr = next;
next = hptr->next;
kfree(hptr);
}
}
kfree(target->free_list);
}
kfree(target);
}
/*
* ======== rmm_exit ========
*/
void rmm_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
DBC_ENSURE(refs >= 0);
}
/*
* ======== rmm_free ========
*/
bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 addr, u32 size,
bool reserved)
{
struct rmm_ovly_sect *sect;
bool ret = true;
DBC_REQUIRE(target);
DBC_REQUIRE(reserved || segid < target->num_segs);
DBC_REQUIRE(reserved || (addr >= target->seg_tab[segid].base &&
(addr + size) <= (target->seg_tab[segid].base +
target->seg_tab[segid].
length)));
/*
* Free or unreserve memory.
*/
if (!reserved) {
ret = free_block(target, segid, addr, size);
if (ret)
target->seg_tab[segid].number--;
} else {
/* Unreserve memory */
sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
while (sect != NULL) {
if (addr == sect->addr) {
DBC_ASSERT(size == sect->size);
/* Remove from list */
lst_remove_elem(target->ovly_list,
(struct list_head *)sect);
kfree(sect);
break;
}
sect =
(struct rmm_ovly_sect *)lst_next(target->ovly_list,
(struct list_head
*)sect);
}
if (sect == NULL)
ret = false;
}
return ret;
}
/*
* ======== rmm_init ========
*/
bool rmm_init(void)
{
DBC_REQUIRE(refs >= 0);
refs++;
return true;
}
/*
* ======== rmm_stat ========
*/
bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
struct dsp_memstat *pMemStatBuf)
{
struct rmm_header *head;
bool ret = false;
u32 max_free_size = 0;
u32 total_free_size = 0;
u32 free_blocks = 0;
DBC_REQUIRE(pMemStatBuf != NULL);
DBC_ASSERT(target != NULL);
if ((u32) segid < target->num_segs) {
head = target->free_list[segid];
/* Collect data from free_list */
while (head != NULL) {
max_free_size = max(max_free_size, head->size);
total_free_size += head->size;
free_blocks++;
head = head->next;
}
/* ul_size */
pMemStatBuf->ul_size = target->seg_tab[segid].length;
/* ul_num_free_blocks */
pMemStatBuf->ul_num_free_blocks = free_blocks;
/* ul_total_free_size */
pMemStatBuf->ul_total_free_size = total_free_size;
/* ul_len_max_free_block */
pMemStatBuf->ul_len_max_free_block = max_free_size;
/* ul_num_alloc_blocks */
pMemStatBuf->ul_num_alloc_blocks =
target->seg_tab[segid].number;
ret = true;
}
return ret;
}
/*
* ======== balloc ========
* This allocation function allocates memory from the lowest addresses
* first.
*/
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
u32 align, u32 *dspAddr)
{
struct rmm_header *head;
struct rmm_header *prevhead = NULL;
struct rmm_header *next;
u32 tmpalign;
u32 alignbytes;
u32 hsize;
u32 allocsize;
u32 addr;
alignbytes = (align == 0) ? 1 : align;
prevhead = NULL;
head = target->free_list[segid];
do {
hsize = head->size;
next = head->next;
addr = head->addr; /* alloc from the bottom */
/* align allocation */
(tmpalign = (u32) addr % alignbytes);
if (tmpalign != 0)
tmpalign = alignbytes - tmpalign;
allocsize = size + tmpalign;
if (hsize >= allocsize) { /* big enough */
if (hsize == allocsize && prevhead != NULL) {
prevhead->next = next;
kfree(head);
} else {
head->size = hsize - allocsize;
head->addr += allocsize;
}
/* free up any hole created by alignment */
if (tmpalign)
free_block(target, segid, addr, tmpalign);
*dspAddr = addr + tmpalign;
return true;
}
prevhead = head;
head = next;
} while (head != NULL);
return false;
}
/*
* ======== free_block ========
* TO DO: free_block() allocates memory, which could result in failure.
* Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
* free_block() could use an rmm_header from the pool, freeing as blocks
* are coalesced.
*/
static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
u32 size)
{
struct rmm_header *head;
struct rmm_header *thead;
struct rmm_header *rhead;
bool ret = true;
/* Create a memory header to hold the newly free'd block. */
rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
if (rhead == NULL) {
ret = false;
} else {
/* search down the free list to find the right place for addr */
head = target->free_list[segid];
if (addr >= head->addr) {
while (head->next != NULL && addr > head->next->addr)
head = head->next;
thead = head->next;
head->next = rhead;
rhead->next = thead;
rhead->addr = addr;
rhead->size = size;
} else {
*rhead = *head;
head->next = rhead;
head->addr = addr;
head->size = size;
thead = rhead->next;
}
/* join with upper block, if possible */
if (thead != NULL && (rhead->addr + rhead->size) ==
thead->addr) {
head->next = rhead->next;
thead->size = size + thead->size;
thead->addr = addr;
kfree(rhead);
rhead = thead;
}
/* join with the lower block, if possible */
if ((head->addr + head->size) == rhead->addr) {
head->next = rhead->next;
head->size = head->size + rhead->size;
kfree(rhead);
}
}
return ret;
}

View File

@ -0,0 +1,861 @@
/*
* strm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP/BIOS Bridge Stream Manager.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdefs.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/nodepriv.h>
/* ----------------------------------- Others */
#include <dspbridge/cmm.h>
/* ----------------------------------- This */
#include <dspbridge/strm.h>
#include <dspbridge/cfg.h>
#include <dspbridge/resourcecleanup.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
#define DEFAULTTIMEOUT 10000
#define DEFAULTNUMBUFS 2
/*
* ======== strm_mgr ========
* The strm_mgr contains device information needed to open the underlying
* channels of a stream.
*/
struct strm_mgr {
struct dev_object *dev_obj; /* Device for this processor */
struct chnl_mgr *hchnl_mgr; /* Channel manager */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
};
/*
* ======== strm_object ========
* This object is allocated in strm_open().
*/
struct strm_object {
struct strm_mgr *strm_mgr_obj;
struct chnl_object *chnl_obj;
u32 dir; /* DSP_TONODE or DSP_FROMNODE */
u32 utimeout;
u32 num_bufs; /* Max # of bufs allowed in stream */
u32 un_bufs_in_strm; /* Current # of bufs in stream */
u32 ul_n_bytes; /* bytes transferred since idled */
/* STREAM_IDLE, STREAM_READY, ... */
enum dsp_streamstate strm_state;
void *user_event; /* Saved for strm_get_info() */
enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */
u32 udma_chnl_id; /* DMA chnl id */
u32 udma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */
u32 segment_id; /* >0 is SM segment.=0 is local heap */
u32 buf_alignment; /* Alignment for stream bufs */
/* Stream's SM address translator */
struct cmm_xlatorobject *xlator;
};
/* ----------------------------------- Globals */
static u32 refs; /* module reference count */
/* ----------------------------------- Function Prototypes */
static int delete_strm(struct strm_object *hStrm);
static void delete_strm_mgr(struct strm_mgr *strm_mgr_obj);
/*
* ======== strm_allocate_buffer ========
* Purpose:
* Allocates buffers for a stream.
*/
int strm_allocate_buffer(struct strm_object *hStrm, u32 usize,
OUT u8 **ap_buffer, u32 num_bufs,
struct process_context *pr_ctxt)
{
int status = 0;
u32 alloc_cnt = 0;
u32 i;
void *hstrm_res;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(ap_buffer != NULL);
if (hStrm) {
/*
* Allocate from segment specified at time of stream open.
*/
if (usize == 0)
status = -EINVAL;
} else {
status = -EFAULT;
}
if (DSP_FAILED(status))
goto func_end;
for (i = 0; i < num_bufs; i++) {
DBC_ASSERT(hStrm->xlator != NULL);
(void)cmm_xlator_alloc_buf(hStrm->xlator, &ap_buffer[i], usize);
if (ap_buffer[i] == NULL) {
status = -ENOMEM;
alloc_cnt = i;
break;
}
}
if (DSP_FAILED(status))
strm_free_buffer(hStrm, ap_buffer, alloc_cnt, pr_ctxt);
if (DSP_FAILED(status))
goto func_end;
if (drv_get_strm_res_element(hStrm, &hstrm_res, pr_ctxt) !=
-ENOENT)
drv_proc_update_strm_res(num_bufs, hstrm_res);
func_end:
return status;
}
/*
* ======== strm_close ========
* Purpose:
* Close a stream opened with strm_open().
*/
int strm_close(struct strm_object *hStrm,
struct process_context *pr_ctxt)
{
struct bridge_drv_interface *intf_fxns;
struct chnl_info chnl_info_obj;
int status = 0;
void *hstrm_res;
DBC_REQUIRE(refs > 0);
if (!hStrm) {
status = -EFAULT;
} else {
/* Have all buffers been reclaimed? If not, return
* -EPIPE */
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status =
(*intf_fxns->pfn_chnl_get_info) (hStrm->chnl_obj,
&chnl_info_obj);
DBC_ASSERT(DSP_SUCCEEDED(status));
if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
status = -EPIPE;
else
status = delete_strm(hStrm);
}
if (DSP_FAILED(status))
goto func_end;
if (drv_get_strm_res_element(hStrm, &hstrm_res, pr_ctxt) !=
-ENOENT)
drv_proc_remove_strm_res_element(hstrm_res, pr_ctxt);
func_end:
DBC_ENSURE(status == 0 || status == -EFAULT ||
status == -EPIPE || status == -EPERM);
dev_dbg(bridge, "%s: hStrm: %p, status 0x%x\n", __func__,
hStrm, status);
return status;
}
/*
* ======== strm_create ========
* Purpose:
* Create a STRM manager object.
*/
int strm_create(OUT struct strm_mgr **phStrmMgr,
struct dev_object *dev_obj)
{
struct strm_mgr *strm_mgr_obj;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(phStrmMgr != NULL);
DBC_REQUIRE(dev_obj != NULL);
*phStrmMgr = NULL;
/* Allocate STRM manager object */
strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
if (strm_mgr_obj == NULL)
status = -ENOMEM;
else
strm_mgr_obj->dev_obj = dev_obj;
/* Get Channel manager and Bridge function interface */
if (DSP_SUCCEEDED(status)) {
status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr));
if (DSP_SUCCEEDED(status)) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
}
}
if (DSP_SUCCEEDED(status))
*phStrmMgr = strm_mgr_obj;
else
delete_strm_mgr(strm_mgr_obj);
DBC_ENSURE((DSP_SUCCEEDED(status) && *phStrmMgr) ||
(DSP_FAILED(status) && *phStrmMgr == NULL));
return status;
}
/*
* ======== strm_delete ========
* Purpose:
* Delete the STRM Manager Object.
*/
void strm_delete(struct strm_mgr *strm_mgr_obj)
{
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(strm_mgr_obj);
delete_strm_mgr(strm_mgr_obj);
}
/*
* ======== strm_exit ========
* Purpose:
* Discontinue usage of STRM module.
*/
void strm_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
DBC_ENSURE(refs >= 0);
}
/*
* ======== strm_free_buffer ========
* Purpose:
* Frees the buffers allocated for a stream.
*/
int strm_free_buffer(struct strm_object *hStrm, u8 ** ap_buffer,
u32 num_bufs, struct process_context *pr_ctxt)
{
int status = 0;
u32 i = 0;
void *hstrm_res = NULL;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(ap_buffer != NULL);
if (!hStrm)
status = -EFAULT;
if (DSP_SUCCEEDED(status)) {
for (i = 0; i < num_bufs; i++) {
DBC_ASSERT(hStrm->xlator != NULL);
status =
cmm_xlator_free_buf(hStrm->xlator, ap_buffer[i]);
if (DSP_FAILED(status))
break;
ap_buffer[i] = NULL;
}
}
if (drv_get_strm_res_element(hStrm, hstrm_res, pr_ctxt) !=
-ENOENT)
drv_proc_update_strm_res(num_bufs - i, hstrm_res);
return status;
}
/*
* ======== strm_get_info ========
* Purpose:
* Retrieves information about a stream.
*/
int strm_get_info(struct strm_object *hStrm,
OUT struct stream_info *stream_info,
u32 stream_info_size)
{
struct bridge_drv_interface *intf_fxns;
struct chnl_info chnl_info_obj;
int status = 0;
void *virt_base = NULL; /* NULL if no SM used */
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(stream_info != NULL);
DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
if (!hStrm) {
status = -EFAULT;
} else {
if (stream_info_size < sizeof(struct stream_info)) {
/* size of users info */
status = -EINVAL;
}
}
if (DSP_FAILED(status))
goto func_end;
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status =
(*intf_fxns->pfn_chnl_get_info) (hStrm->chnl_obj, &chnl_info_obj);
if (DSP_FAILED(status))
goto func_end;
if (hStrm->xlator) {
/* We have a translator */
DBC_ASSERT(hStrm->segment_id > 0);
cmm_xlator_info(hStrm->xlator, (u8 **) &virt_base, 0,
hStrm->segment_id, false);
}
stream_info->segment_id = hStrm->segment_id;
stream_info->strm_mode = hStrm->strm_mode;
stream_info->virt_base = virt_base;
stream_info->user_strm->number_bufs_allowed = hStrm->num_bufs;
stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
chnl_info_obj.cio_reqs;
/* # of bytes transferred since last call to DSPStream_Idle() */
stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
/* Determine stream state based on channel state and info */
if (chnl_info_obj.dw_state & CHNL_STATEEOS) {
stream_info->user_strm->ss_stream_state = STREAM_DONE;
} else {
if (chnl_info_obj.cio_cs > 0)
stream_info->user_strm->ss_stream_state = STREAM_READY;
else if (chnl_info_obj.cio_reqs > 0)
stream_info->user_strm->ss_stream_state =
STREAM_PENDING;
else
stream_info->user_strm->ss_stream_state = STREAM_IDLE;
}
func_end:
return status;
}
/*
* ======== strm_idle ========
* Purpose:
* Idles a particular stream.
*/
int strm_idle(struct strm_object *hStrm, bool fFlush)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
DBC_REQUIRE(refs > 0);
if (!hStrm) {
status = -EFAULT;
} else {
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status = (*intf_fxns->pfn_chnl_idle) (hStrm->chnl_obj,
hStrm->utimeout, fFlush);
}
dev_dbg(bridge, "%s: hStrm: %p fFlush: 0x%x status: 0x%x\n",
__func__, hStrm, fFlush, status);
return status;
}
/*
* ======== strm_init ========
* Purpose:
* Initialize the STRM module.
*/
bool strm_init(void)
{
bool ret = true;
DBC_REQUIRE(refs >= 0);
if (ret)
refs++;
DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
return ret;
}
/*
* ======== strm_issue ========
* Purpose:
* Issues a buffer on a stream
*/
int strm_issue(struct strm_object *hStrm, IN u8 *pbuf, u32 ul_bytes,
u32 ul_buf_size, u32 dw_arg)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
void *tmp_buf = NULL;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pbuf != NULL);
if (!hStrm) {
status = -EFAULT;
} else {
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
if (hStrm->segment_id != 0) {
tmp_buf = cmm_xlator_translate(hStrm->xlator,
(void *)pbuf,
CMM_VA2DSPPA);
if (tmp_buf == NULL)
status = -ESRCH;
}
if (DSP_SUCCEEDED(status)) {
status = (*intf_fxns->pfn_chnl_add_io_req)
(hStrm->chnl_obj, pbuf, ul_bytes, ul_buf_size,
(u32) tmp_buf, dw_arg);
}
if (status == -EIO)
status = -ENOSR;
}
dev_dbg(bridge, "%s: hStrm: %p pbuf: %p ul_bytes: 0x%x dw_arg: 0x%x "
"status: 0x%x\n", __func__, hStrm, pbuf,
ul_bytes, dw_arg, status);
return status;
}
/*
* ======== strm_open ========
* Purpose:
* Open a stream for sending/receiving data buffers to/from a task or
* XDAIS socket node on the DSP.
*/
int strm_open(struct node_object *hnode, u32 dir, u32 index,
IN struct strm_attr *pattr,
OUT struct strm_object **phStrm,
struct process_context *pr_ctxt)
{
struct strm_mgr *strm_mgr_obj;
struct bridge_drv_interface *intf_fxns;
u32 ul_chnl_id;
struct strm_object *strm_obj = NULL;
s8 chnl_mode;
struct chnl_attr chnl_attr_obj;
int status = 0;
struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
void *hstrm_res;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(phStrm != NULL);
DBC_REQUIRE(pattr != NULL);
*phStrm = NULL;
if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
status = -EPERM;
} else {
/* Get the channel id from the node (set in node_connect()) */
status = node_get_channel_id(hnode, dir, index, &ul_chnl_id);
}
if (DSP_SUCCEEDED(status))
status = node_get_strm_mgr(hnode, &strm_mgr_obj);
if (DSP_SUCCEEDED(status)) {
strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL);
if (strm_obj == NULL) {
status = -ENOMEM;
} else {
strm_obj->strm_mgr_obj = strm_mgr_obj;
strm_obj->dir = dir;
strm_obj->strm_state = STREAM_IDLE;
strm_obj->user_event = pattr->user_event;
if (pattr->stream_attr_in != NULL) {
strm_obj->utimeout =
pattr->stream_attr_in->utimeout;
strm_obj->num_bufs =
pattr->stream_attr_in->num_bufs;
strm_obj->strm_mode =
pattr->stream_attr_in->strm_mode;
strm_obj->segment_id =
pattr->stream_attr_in->segment_id;
strm_obj->buf_alignment =
pattr->stream_attr_in->buf_alignment;
strm_obj->udma_chnl_id =
pattr->stream_attr_in->udma_chnl_id;
strm_obj->udma_priority =
pattr->stream_attr_in->udma_priority;
chnl_attr_obj.uio_reqs =
pattr->stream_attr_in->num_bufs;
} else {
strm_obj->utimeout = DEFAULTTIMEOUT;
strm_obj->num_bufs = DEFAULTNUMBUFS;
strm_obj->strm_mode = STRMMODE_PROCCOPY;
strm_obj->segment_id = 0; /* local mem */
strm_obj->buf_alignment = 0;
strm_obj->udma_chnl_id = 0;
strm_obj->udma_priority = 0;
chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS;
}
chnl_attr_obj.reserved1 = NULL;
/* DMA chnl flush timeout */
chnl_attr_obj.reserved2 = strm_obj->utimeout;
chnl_attr_obj.event_obj = NULL;
if (pattr->user_event != NULL)
chnl_attr_obj.event_obj = pattr->user_event;
}
}
if (DSP_FAILED(status))
goto func_cont;
if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0))
goto func_cont;
/* No System DMA */
DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
/* Get the shared mem mgr for this streams dev object */
status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
if (DSP_SUCCEEDED(status)) {
/*Allocate a SM addr translator for this strm. */
status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
if (DSP_SUCCEEDED(status)) {
DBC_ASSERT(strm_obj->segment_id > 0);
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
(u8 **) &pattr->virt_base,
pattr->ul_virt_size,
strm_obj->segment_id, true);
}
}
func_cont:
if (DSP_SUCCEEDED(status)) {
/* Open channel */
chnl_mode = (dir == DSP_TONODE) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
intf_fxns = strm_mgr_obj->intf_fxns;
status = (*intf_fxns->pfn_chnl_open) (&(strm_obj->chnl_obj),
strm_mgr_obj->hchnl_mgr,
chnl_mode, ul_chnl_id,
&chnl_attr_obj);
if (DSP_FAILED(status)) {
/*
* over-ride non-returnable status codes so we return
* something documented
*/
if (status != -ENOMEM && status !=
-EINVAL && status != -EPERM) {
/*
* We got a status that's not return-able.
* Assert that we got something we were
* expecting (-EFAULT isn't acceptable,
* strm_mgr_obj->hchnl_mgr better be valid or we
* assert here), and then return -EPERM.
*/
DBC_ASSERT(status == -ENOSR ||
status == -ECHRNG ||
status == -EALREADY ||
status == -EIO);
status = -EPERM;
}
}
}
if (DSP_SUCCEEDED(status)) {
*phStrm = strm_obj;
drv_proc_insert_strm_res_element(*phStrm, &hstrm_res, pr_ctxt);
} else {
(void)delete_strm(strm_obj);
}
/* ensure we return a documented error code */
DBC_ENSURE((DSP_SUCCEEDED(status) && *phStrm) ||
(*phStrm == NULL && (status == -EFAULT ||
status == -EPERM
|| status == -EINVAL)));
dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
"phStrm: %p status: 0x%x\n", __func__,
hnode, dir, index, pattr, phStrm, status);
return status;
}
/*
* ======== strm_reclaim ========
* Purpose:
* Relcaims a buffer from a stream.
*/
int strm_reclaim(struct strm_object *hStrm, OUT u8 ** buf_ptr,
u32 *pulBytes, u32 *pulBufSize, u32 *pdw_arg)
{
struct bridge_drv_interface *intf_fxns;
struct chnl_ioc chnl_ioc_obj;
int status = 0;
void *tmp_buf = NULL;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(buf_ptr != NULL);
DBC_REQUIRE(pulBytes != NULL);
DBC_REQUIRE(pdw_arg != NULL);
if (!hStrm) {
status = -EFAULT;
goto func_end;
}
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status =
(*intf_fxns->pfn_chnl_get_ioc) (hStrm->chnl_obj, hStrm->utimeout,
&chnl_ioc_obj);
if (DSP_SUCCEEDED(status)) {
*pulBytes = chnl_ioc_obj.byte_size;
if (pulBufSize)
*pulBufSize = chnl_ioc_obj.buf_size;
*pdw_arg = chnl_ioc_obj.dw_arg;
if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
status = -ETIME;
} else {
/* Allow reclaims after idle to succeed */
if (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
status = -EPERM;
}
}
/* Translate zerocopy buffer if channel not canceled. */
if (DSP_SUCCEEDED(status)
&& (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
&& (hStrm->strm_mode == STRMMODE_ZEROCOPY)) {
/*
* This is a zero-copy channel so chnl_ioc_obj.pbuf
* contains the DSP address of SM. We need to
* translate it to a virtual address for the user
* thread to access.
* Note: Could add CMM_DSPPA2VA to CMM in the future.
*/
tmp_buf = cmm_xlator_translate(hStrm->xlator,
chnl_ioc_obj.pbuf,
CMM_DSPPA2PA);
if (tmp_buf != NULL) {
/* now convert this GPP Pa to Va */
tmp_buf = cmm_xlator_translate(hStrm->xlator,
tmp_buf,
CMM_PA2VA);
}
if (tmp_buf == NULL)
status = -ESRCH;
chnl_ioc_obj.pbuf = tmp_buf;
}
*buf_ptr = chnl_ioc_obj.pbuf;
}
func_end:
/* ensure we return a documented return code */
DBC_ENSURE(DSP_SUCCEEDED(status) || status == -EFAULT ||
status == -ETIME || status == -ESRCH ||
status == -EPERM);
dev_dbg(bridge, "%s: hStrm: %p buf_ptr: %p pulBytes: %p pdw_arg: %p "
"status 0x%x\n", __func__, hStrm,
buf_ptr, pulBytes, pdw_arg, status);
return status;
}
/*
* ======== strm_register_notify ========
* Purpose:
* Register to be notified on specific events for this stream.
*/
int strm_register_notify(struct strm_object *hStrm, u32 event_mask,
u32 notify_type, struct dsp_notification
* hnotification)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hnotification != NULL);
if (!hStrm) {
status = -EFAULT;
} else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
DSP_STREAMDONE)) != 0) {
status = -EINVAL;
} else {
if (notify_type != DSP_SIGNALEVENT)
status = -ENOSYS;
}
if (DSP_SUCCEEDED(status)) {
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status =
(*intf_fxns->pfn_chnl_register_notify) (hStrm->chnl_obj,
event_mask,
notify_type,
hnotification);
}
/* ensure we return a documented return code */
DBC_ENSURE(DSP_SUCCEEDED(status) || status == -EFAULT ||
status == -ETIME || status == -ESRCH ||
status == -ENOSYS || status == -EPERM);
return status;
}
/*
* ======== strm_select ========
* Purpose:
* Selects a ready stream.
*/
int strm_select(IN struct strm_object **strm_tab, u32 nStrms,
OUT u32 *pmask, u32 utimeout)
{
u32 index;
struct chnl_info chnl_info_obj;
struct bridge_drv_interface *intf_fxns;
struct sync_object **sync_events = NULL;
u32 i;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(strm_tab != NULL);
DBC_REQUIRE(pmask != NULL);
DBC_REQUIRE(nStrms > 0);
*pmask = 0;
for (i = 0; i < nStrms; i++) {
if (!strm_tab[i]) {
status = -EFAULT;
break;
}
}
if (DSP_FAILED(status))
goto func_end;
/* Determine which channels have IO ready */
for (i = 0; i < nStrms; i++) {
intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns;
status = (*intf_fxns->pfn_chnl_get_info) (strm_tab[i]->chnl_obj,
&chnl_info_obj);
if (DSP_FAILED(status)) {
break;
} else {
if (chnl_info_obj.cio_cs > 0)
*pmask |= (1 << i);
}
}
if (DSP_SUCCEEDED(status) && utimeout > 0 && *pmask == 0) {
/* Non-zero timeout */
sync_events = kmalloc(nStrms * sizeof(struct sync_object *),
GFP_KERNEL);
if (sync_events == NULL) {
status = -ENOMEM;
} else {
for (i = 0; i < nStrms; i++) {
intf_fxns =
strm_tab[i]->strm_mgr_obj->intf_fxns;
status = (*intf_fxns->pfn_chnl_get_info)
(strm_tab[i]->chnl_obj, &chnl_info_obj);
if (DSP_FAILED(status))
break;
else
sync_events[i] =
chnl_info_obj.sync_event;
}
}
if (DSP_SUCCEEDED(status)) {
status =
sync_wait_on_multiple_events(sync_events, nStrms,
utimeout, &index);
if (DSP_SUCCEEDED(status)) {
/* Since we waited on the event, we have to
* reset it */
sync_set_event(sync_events[index]);
*pmask = 1 << index;
}
}
}
func_end:
kfree(sync_events);
DBC_ENSURE((DSP_SUCCEEDED(status) && (*pmask != 0 || utimeout == 0)) ||
(DSP_FAILED(status) && *pmask == 0));
return status;
}
/*
* ======== delete_strm ========
* Purpose:
* Frees the resources allocated for a stream.
*/
static int delete_strm(struct strm_object *hStrm)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
if (hStrm) {
if (hStrm->chnl_obj) {
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
/* Channel close can fail only if the channel handle
* is invalid. */
status = (*intf_fxns->pfn_chnl_close) (hStrm->chnl_obj);
/* Free all SM address translator resources */
if (DSP_SUCCEEDED(status)) {
if (hStrm->xlator) {
/* force free */
(void)cmm_xlator_delete(hStrm->xlator,
true);
}
}
}
kfree(hStrm);
} else {
status = -EFAULT;
}
return status;
}
/*
* ======== delete_strm_mgr ========
* Purpose:
* Frees stream manager.
*/
static void delete_strm_mgr(struct strm_mgr *strm_mgr_obj)
{
if (strm_mgr_obj)
kfree(strm_mgr_obj);
}