浏览代码

Add dm365 porting.

weety 9 年之前
父节点
当前提交
b71cb4c09d
共有 45 个文件被更改,包括 13431 次插入0 次删除
  1. 14 0
      bsp/dm365/SConscript
  2. 33 0
      bsp/dm365/SConstruct
  3. 11 0
      bsp/dm365/applications/SConscript
  4. 229 0
      bsp/dm365/applications/application.c
  5. 105 0
      bsp/dm365/applications/board.c
  6. 22 0
      bsp/dm365/applications/board.h
  7. 985 0
      bsp/dm365/applications/ftpd.c
  8. 161 0
      bsp/dm365/applications/startup.c
  9. 85 0
      bsp/dm365/dm365_ram.ld
  10. 31 0
      bsp/dm365/drivers/SConscript
  11. 1771 0
      bsp/dm365/drivers/davinci_emac.c
  12. 467 0
      bsp/dm365/drivers/davinci_emac.h
  13. 268 0
      bsp/dm365/drivers/davinci_serial.c
  14. 152 0
      bsp/dm365/drivers/gpio.c
  15. 61 0
      bsp/dm365/drivers/gpio.h
  16. 626 0
      bsp/dm365/drivers/i2c-davinci.c
  17. 188 0
      bsp/dm365/drivers/mii.h
  18. 1456 0
      bsp/dm365/drivers/mmcsd.c
  19. 122 0
      bsp/dm365/drivers/mmcsd.h
  20. 939 0
      bsp/dm365/drivers/spi-davinci.c
  21. 51 0
      bsp/dm365/drivers/spi-davinci.h
  22. 22 0
      bsp/dm365/platform/SConscript
  23. 344 0
      bsp/dm365/platform/dm365.c
  24. 61 0
      bsp/dm365/platform/dm365_timer.h
  25. 228 0
      bsp/dm365/platform/dm36x.h
  26. 1587 0
      bsp/dm365/platform/dma.c
  27. 308 0
      bsp/dm365/platform/edma.h
  28. 199 0
      bsp/dm365/platform/findbit.S
  29. 298 0
      bsp/dm365/platform/interrupt.c
  30. 181 0
      bsp/dm365/platform/irqs.h
  31. 48 0
      bsp/dm365/platform/psc.c
  32. 86 0
      bsp/dm365/platform/psc.h
  33. 66 0
      bsp/dm365/platform/reset.c
  34. 367 0
      bsp/dm365/platform/start_gcc.S
  35. 29 0
      bsp/dm365/platform/system_clock.c
  36. 189 0
      bsp/dm365/platform/trap.c
  37. 271 0
      bsp/dm365/rtconfig.h
  38. 80 0
      bsp/dm365/rtconfig.py
  39. 二进制
      components/drivers/drivers.rar
  40. 109 0
      libcpu/arm/dm36x/context_gcc.S
  41. 117 0
      libcpu/arm/dm36x/context_rvds.S
  42. 246 0
      libcpu/arm/dm36x/cpuport.c
  43. 575 0
      libcpu/arm/dm36x/mmu.c
  44. 164 0
      libcpu/arm/dm36x/mmu.h
  45. 79 0
      libcpu/arm/dm36x/stack.c

+ 14 - 0
bsp/dm365/SConscript

@@ -0,0 +1,14 @@
+# for module compiling
+import os
+Import('RTT_ROOT')
+
+cwd = str(Dir('#'))
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+    path = os.path.join(cwd, d)
+    if os.path.isfile(os.path.join(path, 'SConscript')):
+        objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')

+ 33 - 0
bsp/dm365/SConstruct

@@ -0,0 +1,33 @@
+import os
+import sys
+import rtconfig
+
+if os.getenv('RTT_ROOT'):
+    RTT_ROOT = os.getenv('RTT_ROOT')
+else:
+    RTT_ROOT = os.path.normpath(os.getcwd() + '/../..')
+
+sys.path = sys.path + [os.path.join(RTT_ROOT, 'tools')]
+from building import *
+
+TARGET = 'rtthread-dm365.' + rtconfig.TARGET_EXT
+
+env = Environment(tools = ['mingw'],
+	AS = rtconfig.AS, ASFLAGS = rtconfig.AFLAGS,
+	CC = rtconfig.CC, CCFLAGS = rtconfig.CFLAGS,
+	AR = rtconfig.AR, ARFLAGS = '-rc',
+	LINK = rtconfig.LINK, LINKFLAGS = rtconfig.LFLAGS)
+env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
+
+Export('RTT_ROOT')
+Export('rtconfig')
+
+# prepare building environment
+objs = PrepareBuilding(env, RTT_ROOT)
+
+# libc testsuite 
+objs = objs + SConscript(RTT_ROOT + '/examples/libc/SConscript', variant_dir='build/examples/libc', duplicate=0)
+
+# make a building
+DoBuilding(TARGET, objs)
+

+ 11 - 0
bsp/dm365/applications/SConscript

@@ -0,0 +1,11 @@
+import rtconfig
+Import('RTT_ROOT')
+from building import *
+
+cwd = GetCurrentDir()
+src = Glob('*.c')
+
+CPPPATH = [cwd, str(Dir('#'))]
+group = DefineGroup('Applications', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 229 - 0
bsp/dm365/applications/application.c

@@ -0,0 +1,229 @@
+/*
+ * File      : application.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author		Notes
+ * 2007-11-20     Yi.Qiu		add rtgui application
+ * 2008-6-28      Bernard		no rtgui init
+ */
+
+/**
+ * @addtogroup dm365
+ */
+/*@{*/
+
+#include <rtthread.h>
+//#include <rtdevice.h>
+
+#ifdef RT_USING_DFS
+/* dfs init */
+#include <dfs_init.h>
+/* dfs filesystem:ELM FatFs filesystem init */
+#include <dfs_elm.h>
+/* dfs Filesystem APIs */
+#include <dfs_fs.h>
+#ifdef RT_USING_DFS_UFFS
+/* dfs filesystem:UFFS filesystem init */
+#include <dfs_uffs.h>
+#endif
+#endif
+
+#if defined(RT_USING_DFS_DEVFS)
+#include <devfs.h>
+#endif
+
+#ifdef RT_USING_SDIO
+#include <drivers/mmcsd_core.h>
+
+#endif
+
+#ifdef RT_USING_LWIP
+#include <netif/ethernetif.h>
+#endif
+
+#ifdef RT_USING_SPI
+#include <spi-davinci.h>
+#endif
+
+#ifdef RT_USING_LED
+#include "led.h"
+#endif
+
+#define RT_INIT_THREAD_STACK_SIZE (2*1024)
+
+#ifdef RT_USING_DFS_ROMFS
+#include <dfs_romfs.h>
+#endif
+
+void rt_init_thread_entry(void* parameter)
+{
+	platform_init();
+	
+/* Filesystem Initialization */
+#ifdef RT_USING_DFS
+	{
+		/* init the device filesystem */
+		dfs_init();
+
+#if defined(RT_USING_DFS_ELMFAT)
+		/* init the elm chan FatFs filesystam*/
+		elm_init();
+#endif
+
+#if defined(RT_USING_DFS_ROMFS)
+		dfs_romfs_init();
+		if (dfs_mount(RT_NULL, "/rom", "rom", 0, &romfs_root) == 0)
+		{
+			rt_kprintf("ROM File System initialized!\n");
+		}
+		else
+			rt_kprintf("ROM File System initialzation failed!\n");
+#endif
+
+#if defined(RT_USING_DFS_DEVFS)
+		devfs_init();
+		if (dfs_mount(RT_NULL, "/dev", "devfs", 0, 0) == 0)
+			rt_kprintf("Device File System initialized!\n");
+		else
+			rt_kprintf("Device File System initialzation failed!\n");
+
+		#ifdef RT_USING_NEWLIB
+		/* init libc */
+		libc_system_init(RT_CONSOLE_DEVICE_NAME);
+		#endif
+#endif
+
+#if defined(RT_USING_DFS_UFFS)
+	{
+		/* init the uffs filesystem */
+		dfs_uffs_init();
+
+		/* mount flash device as flash directory */
+		if(dfs_mount("nand0", "/nand0", "uffs", 0, 0) == 0)
+			rt_kprintf("UFFS File System initialized!\n");
+		else
+			rt_kprintf("UFFS File System initialzation failed!\n");
+	}
+#endif
+
+#ifdef RT_USING_I2C
+	{
+		rt_i2c_core_init();
+		davinci_i2c_init("I2C1");
+		pcf8563_init("I2C1", 0x51);
+	}
+#endif
+
+#ifdef RT_USING_SPI
+	{
+		rt_hw_spi_init();
+	}
+#endif
+
+#ifdef RT_USING_SDIO
+	rt_mmcsd_core_init();
+	rt_mmcsd_blk_init();
+	#if 1
+	//rt_hw_mmcsd_init();
+	spi_sd_init("spi10");
+	rt_thread_delay(RT_TICK_PER_SECOND*2);
+	/* mount sd card fat partition 1 as root directory */
+		if (dfs_mount("sd0", "/", "elm", 0, 0) == 0)
+		{
+			rt_kprintf("File System initialized!\n");
+		}
+		else
+			rt_kprintf("File System initialzation failed!%d\n", rt_get_errno());
+	#endif
+#endif
+	}
+#endif
+
+#ifdef RT_USING_LWIP
+	{
+		/* register ethernetif device */
+		eth_system_device_init();
+		rt_hw_davinci_emac_init();
+		/* init lwip system */
+		lwip_system_init();
+	}
+#endif
+
+}
+
+
+void rt_led_thread_entry(void* parameter)
+{
+	while(1)
+	{
+		/* light on leds for one second */
+		rt_thread_delay(100);
+
+		/* light off leds for one second */
+		rt_thread_delay(100);
+	}
+}
+
+int rt_application_init()
+{
+	rt_thread_t init_thread;
+#ifdef RT_USING_LED
+	rt_thread_t led_thread;
+#endif
+
+#if (RT_THREAD_PRIORITY_MAX == 32)
+	init_thread = rt_thread_create("init",
+								rt_init_thread_entry, RT_NULL,
+								RT_INIT_THREAD_STACK_SIZE, 8, 20);
+#ifdef RT_USING_LED
+	led_thread = rt_thread_create("led",
+								rt_led_thread_entry, RT_NULL,
+								512, 20, 20);
+#endif
+#else
+	init_thread = rt_thread_create("init",
+								rt_init_thread_entry, RT_NULL,
+								RT_INIT_THREAD_STACK_SIZE, 80, 20);
+#ifdef RT_USING_LED
+	led_thread = rt_thread_create("led",
+								rt_led_thread_entry, RT_NULL,
+								512, 200, 20);
+#endif
+#endif
+
+	if (init_thread != RT_NULL)
+		rt_thread_startup(init_thread);
+#ifdef RT_USING_LED
+	if(led_thread != RT_NULL)
+		rt_thread_startup(led_thread);
+#endif
+
+	return 0;
+}
+
+/* NFSv3 Initialization */
+#if defined(RT_USING_DFS) && defined(RT_USING_LWIP) && defined(RT_USING_DFS_NFS)
+#include <dfs_nfs.h>
+void nfs_start(void)
+{
+	nfs_init();
+
+	if (dfs_mount(RT_NULL, "/nfs", "nfs", 0, RT_NFS_HOST_EXPORT) == 0)
+	{
+		rt_kprintf("NFSv3 File System initialized!\n");
+	}
+	else
+		rt_kprintf("NFSv3 File System initialzation failed!\n");
+}
+
+#include "finsh.h"
+FINSH_FUNCTION_EXPORT(nfs_start, start net filesystem);
+#endif
+
+/*@}*/

+ 105 - 0
bsp/dm365/applications/board.c

@@ -0,0 +1,105 @@
+/*
+ * File      : board.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006 - 2009 RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2010-11-13     weety      first version
+ */
+
+#include <rtthread.h>
+#include <rthw.h>
+#include <mmu.h>
+#include "board.h"
+
+/**
+ * @addtogroup dm365
+ */
+/*@{*/
+
+extern void rt_hw_clock_init(void);
+extern void rt_hw_uart_init(void);
+
+static struct mem_desc dm365_mem_desc[] = {
+	{ 0x80000000, 0x88000000-1, 0x80000000, SECT_RW_CB, 0, SECT_MAPPED },       /* 128M cached SDRAM memory */
+	{ 0xA0000000, 0xA8000000-1, 0x80000000, SECT_RW_NCNB, 0, SECT_MAPPED },     /* 128M No cached SDRAM memory */
+	{ 0xFFFF0000, 0xFFFF1000-1, 0x80000000, SECT_TO_PAGE, PAGE_RO_CB, PAGE_MAPPED }, /* isr vector table */
+	{ 0x01C00000, 0x02000000-1, 0x01C00000, SECT_RW_NCNB, 0, SECT_MAPPED },       /* CFG BUS peripherals */
+	{ 0x02000000, 0x0A000000-1, 0x02000000, SECT_RW_NCNB, 0, SECT_MAPPED },       /* AEMIF */
+};
+
+
+/**
+ * This function will handle rtos timer
+ */
+void rt_timer_handler(int vector, void *param)
+{
+	rt_tick_increase();
+}
+
+/**
+ * This function will init timer0 for system ticks
+ */
+ void rt_hw_timer_init()
+ {
+	/* timer0, input clocks 24MHz */
+	volatile timer_regs_t *regs =
+		(volatile timer_regs_t*)DAVINCI_TIMER1_BASE;//DAVINCI_TIMER0_BASE;
+
+	/*disable timer*/
+	regs->tcr &= ~(0x3UL << 6);
+
+	//TIMMODE 32BIT UNCHAINED MODE
+	regs->tgcr |=(0x1UL << 2);
+
+	/*not in reset timer */
+	regs->tgcr |= (0x1UL << 0);
+
+	//regs->tgcr &= ~(0x1UL << 1);
+
+	/* set Period Registers */
+	regs->prd12 = 24000000/RT_TICK_PER_SECOND;
+	regs->tim12 = 0;
+
+	/* Set enable mode */
+	regs->tcr |= (0x2UL << 6); //period mode
+	
+
+	/* install interrupt handler */
+	rt_hw_interrupt_install(IRQ_DM365_TINT2, rt_timer_handler,
+							RT_NULL, "timer1_12");//IRQ_DM365_TINT0_TINT12
+	rt_hw_interrupt_umask(IRQ_DM365_TINT2);//IRQ_DM365_TINT2
+
+ }
+
+/**
+ * This function will init dm365 board
+ */
+void rt_hw_board_init()
+{
+	psc_change_state(DAVINCI_DM365_LPSC_TIMER0, 3);
+	psc_change_state(DAVINCI_DM365_LPSC_TIMER1, 3);
+	/* initialize the system clock */
+	//rt_hw_clock_init();
+	davinci_clk_init();
+
+	/* initialize uart */
+	rt_hw_uart_init();
+#ifdef RT_USING_CONSOLE
+	rt_console_set_device(RT_CONSOLE_DEVICE_NAME);
+#endif
+
+	/* initialize mmu */
+	rt_hw_mmu_init(dm365_mem_desc, sizeof(dm365_mem_desc)/sizeof(dm365_mem_desc[0]));
+
+	/* initialize timer0 */
+	rt_hw_timer_init();
+
+}
+
+/*@}*/

+ 22 - 0
bsp/dm365/applications/board.h

@@ -0,0 +1,22 @@
+/*
+ * File      : board.h
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2006-10-08     Bernard      add board.h to this bsp
+ */
+
+#ifndef __BOARD_H__
+#define __BOARD_H__
+
+#include <dm36x.h>
+
+void rt_hw_board_init(void);
+
+#endif

+ 985 - 0
bsp/dm365/applications/ftpd.c

@@ -0,0 +1,985 @@
+#include <string.h>
+#include <stdlib.h>
+
+#include <rtthread.h>
+#include <dfs_posix.h>
+#include <lwip/sockets.h>
+#include <time.h>
+
+#define FTP_PORT			21
+#define FTP_SRV_ROOT		"/"
+#define FTP_MAX_CONNECTION	2
+#define FTP_USER			"rtt"
+#define FTP_PASSWORD		"demo"
+#define FTP_WELCOME_MSG		"220 welcome on RT-Thread FTP server.\r\n"
+#define FTP_BUFFER_SIZE		8192
+
+#define INET_ADDRSTRLEN 16
+
+
+struct ftp_session
+{
+	rt_bool_t is_anonymous;
+
+	int sockfd;
+	struct sockaddr_in remote;
+	struct sockaddr_in server;
+
+	char serveraddr[INET_ADDRSTRLEN];
+
+	/* pasv data */
+	int  pasv_listen_sockfd;
+	char pasv_active;
+	int  pasv_sockfd;
+
+	unsigned short pasv_port;
+	size_t offset;
+
+	/* current directory */
+	char currentdir[256];
+
+	struct ftp_session* next;
+};
+static struct ftp_session* session_list = NULL;
+
+int ftp_process_request(struct ftp_session* session, char * buf);
+int ftp_get_filesize(char *filename);
+
+struct ftp_session* ftp_new_session()
+{
+	struct ftp_session* session;
+
+	session = (struct ftp_session*)rt_malloc(sizeof(struct ftp_session));
+	rt_memset((void *)session, 0, sizeof(struct ftp_session));
+
+	session->next = session_list;
+	session_list = session;
+
+	return session;
+}
+
+void ftp_close_session(struct ftp_session* session)
+{
+	struct ftp_session* list;
+
+	if (session_list == session)
+	{
+		session_list = session_list->next;
+		session->next = NULL;
+	}
+	else
+	{
+		list = session_list;
+		while (list->next != session) list = list->next;
+
+		list->next = session->next;
+		session->next = NULL;
+	}
+
+	rt_free(session);
+}
+
+static int open_data_connection(struct ftp_session* session)
+{
+	socklen_t len = sizeof(struct sockaddr);
+	struct sockaddr_in sin;
+#if 0
+	/* Previous PORT command from client */
+	if (ctrl->data_address[0]) {
+		ctrl->data_sd = socket(AF_INET, SOCK_STREAM, 0);
+		if (-1 == ctrl->data_sd) {
+			printf("Failed creating data socket");
+			return -1;
+		}
+
+		memset(&sin, 0, sizeof(sin));
+		sin.sin_family = AF_INET;
+		sin.sin_port = htons(ctrl->data_port);
+		inet_aton(ctrl->data_address, &(sin.sin_addr));
+
+		if (connect(ctrl->data_sd, (struct sockaddr *)&sin, len) == -1) {
+			printf("Failed connecting data socket to client");
+			close(ctrl->data_sd);
+			ctrl->data_sd = -1;
+
+			return -1;
+		}
+
+		DBG("Connected successfully to client's previously requested address:PORT %s:%d", ctrl->data_address, ctrl->data_port);
+		return 0;
+	}
+#endif
+	/* Previous PASV command, accept connect from client */
+	if (session->pasv_listen_sockfd > 0) {
+		char client_ip[100];
+
+		session->pasv_sockfd = accept(session->pasv_listen_sockfd, (struct sockaddr *)&sin, &len);
+		if (-1 == session->pasv_sockfd) {
+			printf("Failed accepting connection from client");
+			return -1;
+		}
+
+		len = sizeof(struct sockaddr);
+		if (-1 == getpeername(session->pasv_sockfd, (struct sockaddr *)&sin, &len)) {
+			printf("Cannot determine client address");
+			closesocket(session->pasv_sockfd);
+			session->pasv_sockfd = -1;
+			return -1;
+		}
+
+		printf("Client PASV data connection from %s\n", inet_ntoa(sin.sin_addr));
+	}
+
+	return 0;
+}
+
+static void close_data_connection(struct ftp_session* session)
+{
+	/* PASV server listening socket */
+	if (session->pasv_listen_sockfd > 0) {
+		closesocket(session->pasv_listen_sockfd);
+		session->pasv_listen_sockfd = -1;
+	}
+
+	/* PASV client socket */
+	if (session->pasv_sockfd > 0) {
+		closesocket(session->pasv_sockfd);
+		session->pasv_sockfd = -1;
+	}
+#if 0
+	/* PORT */
+	if (ctrl->data_address[0]) {
+		ctrl->data_address[0] = 0;
+		ctrl->data_port = 0;
+	}
+#endif
+}
+
+
+int ftp_get_filesize(char * filename)
+{
+	int pos;
+	int end;
+	int fd;
+
+	fd = open(filename, O_RDONLY, 0);
+	if (fd < 0) return -1;
+
+	pos = lseek(fd, 0, SEEK_CUR);
+	end = lseek(fd, 0, SEEK_END);
+	lseek (fd, pos, SEEK_SET);
+	close(fd);
+
+	return end;
+}
+
+rt_bool_t is_absolute_path(char* path)
+{
+#ifdef _WIN32
+	if (path[0] == '\\' ||
+		(path[1] == ':' && path[2] == '\\'))
+		return RT_TRUE;
+#else
+	if (path[0] == '/') return RT_TRUE;
+#endif
+
+	return RT_FALSE;
+}
+
+int build_full_path(struct ftp_session* session, char* path, char* new_path, size_t size)
+{
+	if (is_absolute_path(path) == RT_TRUE)
+		strcpy(new_path, path);
+	else
+	{
+		rt_sprintf(new_path, "%s/%s", session->currentdir, path);
+	}
+
+	return 0;
+}
+
+void ftpd_thread_entry(void* parameter)
+{
+	int numbytes;
+	int sockfd, maxfdp1;
+	struct sockaddr_in local;
+	fd_set readfds, tmpfds;
+	struct ftp_session* session;
+	rt_uint32_t addr_len = sizeof(struct sockaddr);
+	char * buffer = (char *) rt_malloc(FTP_BUFFER_SIZE);
+
+	local.sin_port=htons(FTP_PORT);
+	local.sin_family=PF_INET;
+	local.sin_addr.s_addr=INADDR_ANY;
+
+	FD_ZERO(&readfds);
+	FD_ZERO(&tmpfds);
+
+	sockfd=socket(AF_INET, SOCK_STREAM, 0);
+	if(sockfd < 0)
+	{
+		rt_kprintf("create socket failed\n");
+		return ;
+	}
+
+	bind(sockfd, (struct sockaddr *)&local, addr_len);
+	listen(sockfd, FTP_MAX_CONNECTION);
+
+	FD_SET(sockfd, &readfds);
+	for(;;)
+	{
+	    /* get maximum fd */
+	    maxfdp1 = sockfd + 1;
+        session = session_list;
+	    while (session != RT_NULL)
+	    {
+	        if (maxfdp1 < session->sockfd + 1)
+                maxfdp1 = session->sockfd + 1;
+
+            FD_SET(session->sockfd, &readfds);
+            session = session->next;
+	    }
+
+		tmpfds=readfds;
+		if (select(maxfdp1, &tmpfds, 0, 0, 0) == 0) continue;
+
+		if(FD_ISSET(sockfd, &tmpfds))
+		{
+			int com_socket;
+			struct sockaddr_in remote;
+
+			com_socket = accept(sockfd, (struct sockaddr*)&remote, &addr_len);
+			if(com_socket == -1)
+			{
+				rt_kprintf("Error on accept()\nContinuing...\n");
+				continue;
+			}
+			else
+			{
+				rt_kprintf("Got connection from %s\n", inet_ntoa(remote.sin_addr));
+				send(com_socket, FTP_WELCOME_MSG, strlen(FTP_WELCOME_MSG), 0);
+				FD_SET(com_socket, &readfds);
+
+				/* new session */
+				session = ftp_new_session();
+				if (session != NULL)
+				{
+					if (-1 == getsockname(com_socket, (struct sockaddr *)&session->server, &addr_len)) {
+						printf("Cannot determine our address, need it if client should connect to us\n");
+					}
+					ipaddr_ntoa_r(&(session->server.sin_addr), session->serveraddr, sizeof(session->serveraddr));
+					strcpy(session->currentdir, FTP_SRV_ROOT);
+					session->sockfd = com_socket;
+					session->remote = remote;
+				}
+			}
+		}
+
+		{
+			struct ftp_session* next;
+
+			session = session_list;
+			while (session != NULL)
+			{
+				next = session->next;
+				if (FD_ISSET(session->sockfd, &tmpfds))
+				{
+					numbytes=recv(session->sockfd, buffer, FTP_BUFFER_SIZE, 0);
+					if(numbytes==0 || numbytes==-1)
+					{
+						rt_kprintf("Client %s disconnected\n", inet_ntoa(session->remote.sin_addr));
+						FD_CLR(session->sockfd, &readfds);
+						closesocket(session->sockfd);
+						session->sockfd = -1;
+						ftp_close_session(session);
+					}
+					else
+					{
+						buffer[numbytes]=0;
+						if(ftp_process_request(session, buffer)==-1)
+						{
+							rt_kprintf("Client %s disconnected\r\n", inet_ntoa(session->remote.sin_addr));
+							closesocket(session->sockfd);
+							session->sockfd = -1;
+							ftp_close_session(session);
+						}
+					}
+				}
+
+				session = next;
+			}
+		}
+	}
+
+	// rt_free(buffer);
+}
+
+int do_list(char* directory, int sockfd)
+{
+	DIR* dirp;
+	struct dirent* entry;
+	char line_buffer[256], line_length;
+#ifdef _WIN32
+	struct _stat s;
+#else
+	struct stat s;
+#endif
+
+	dirp = opendir(directory);
+	if (dirp == NULL)
+	{
+		line_length = rt_sprintf(line_buffer, "500 Internal Error\r\n");
+		send(sockfd, line_buffer, line_length, 0);
+		return -1;
+	}
+
+	while (1)
+	{
+		entry = readdir(dirp);
+		if (entry == NULL) break;
+
+		rt_sprintf(line_buffer, "%s/%s", directory, entry->d_name);
+#ifdef _WIN32
+		if (_stat(line_buffer, &s) ==0)
+#else
+		if (stat(line_buffer, &s) == 0)
+#endif
+		{
+			if (s.st_mode & S_IFDIR)
+				line_length = rt_sprintf(line_buffer, "drw-r--r-- 1 admin admin %d Jan 1 2000 %s\r\n", 0, entry->d_name);
+			else
+				line_length = rt_sprintf(line_buffer, "-rw-r--r-- 1 admin admin %d Jan 1 2000 %s\r\n", s.st_size, entry->d_name);
+
+			send(sockfd, line_buffer, line_length, 0);
+		}
+		else
+		{
+			rt_kprintf("Get directory entry error\n");
+			break;
+		}
+	}
+
+	closedir(dirp);
+	return 0;
+}
+
+int do_simple_list(char* directory, int sockfd)
+{
+	DIR* dirp;
+	struct dirent* entry;
+	char line_buffer[256], line_length;
+
+	dirp = opendir(directory);
+	if (dirp == NULL)
+	{
+		line_length = rt_sprintf(line_buffer, "500 Internal Error\r\n");
+		send(sockfd, line_buffer, line_length, 0);
+		return -1;
+	}
+
+	while (1)
+	{
+		entry = readdir(dirp);
+		if (entry == NULL) break;
+
+		line_length = rt_sprintf(line_buffer, "%s\r\n", entry->d_name);
+		send(sockfd, line_buffer, line_length, 0);
+	}
+
+	closedir(dirp);
+	return 0;
+}
+
+int str_begin_with(char* src, char* match)
+{
+	while (*match)
+	{
+		/* check source */
+		if (*src == 0) return -1;
+
+		if (*match != *src) return -1;
+		match ++; src ++;
+	}
+
+	return 0;
+}
+
+int ftp_process_request(struct ftp_session* session, char *buf)
+{
+	int  fd;
+	struct timeval tv;
+	fd_set readfds;
+	char filename[256];
+	int  numbytes;
+	char *sbuf;
+	char *parameter_ptr, *ptr;
+	rt_uint32_t addr_len = sizeof(struct sockaddr_in);
+	struct sockaddr_in local, pasvremote;
+
+	sbuf =(char *)rt_malloc(FTP_BUFFER_SIZE);
+
+	tv.tv_sec=3, tv.tv_usec=0;
+	local.sin_family=PF_INET;
+	local.sin_addr.s_addr=INADDR_ANY;
+
+	/* remove \r\n */
+	ptr = buf;
+	while (*ptr)
+	{
+		if (*ptr == '\r' || *ptr == '\n') *ptr = 0;
+		ptr ++;
+	}
+
+	/* get request parameter */
+	parameter_ptr = strchr(buf, ' '); if (parameter_ptr != NULL) parameter_ptr ++;
+
+	// debug:
+	rt_kprintf("%s requested: \"%s\"\n", inet_ntoa(session->remote.sin_addr), buf);
+
+	//
+	//-----------------------
+	if(str_begin_with(buf, "USER")==0)
+	{
+		rt_kprintf("%s sent login \"%s\"\n", inet_ntoa(session->remote.sin_addr), parameter_ptr);
+		// login correct
+		if(strcmp(parameter_ptr, "anonymous") == 0)
+		{
+			session->is_anonymous = RT_TRUE;
+			rt_sprintf(sbuf, "331 Anonymous login OK send e-mail address for password.\r\n", parameter_ptr);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else if (strcmp(parameter_ptr, FTP_USER) == 0)
+		{
+			session->is_anonymous = RT_FALSE;
+			rt_sprintf(sbuf, "331 Password required for %s\r\n", parameter_ptr);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			// incorrect login
+			rt_sprintf(sbuf, "530 Login incorrect. Bye.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return -1;
+		}
+		return 0;
+	}
+	else if(str_begin_with(buf, "PASS")==0)
+	{
+		rt_kprintf("%s sent password \"%s\"\n", inet_ntoa(session->remote.sin_addr), parameter_ptr);
+		if (strcmp(parameter_ptr, FTP_PASSWORD)==0 ||
+			session->is_anonymous == RT_TRUE)
+		{
+			// password correct
+			rt_sprintf(sbuf, "230 User logged in.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		// incorrect password
+		rt_sprintf(sbuf, "530 Login or Password incorrect. Bye!\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		rt_free(sbuf);
+		return -1;
+	}
+	else if(str_begin_with(buf, "LIST")==0  )
+	{
+		memset(sbuf,0,FTP_BUFFER_SIZE);
+		open_data_connection(session);
+		rt_sprintf(sbuf, "150 Opening Binary mode connection for file list.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		do_list(session->currentdir, session->pasv_sockfd);
+		close_data_connection(session);
+		session->pasv_active = 0;
+		rt_sprintf(sbuf, "226 Transfert Complete.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "NLST")==0 )
+	{
+		memset(sbuf, 0, FTP_BUFFER_SIZE);
+		rt_sprintf(sbuf, "150 Opening Binary mode connection for file list.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		open_data_connection(session);
+		do_simple_list(session->currentdir, session->pasv_sockfd);
+		close_data_connection(session);
+		session->pasv_active = 0;
+		rt_sprintf(sbuf, "226 Transfert Complete.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "PWD")==0 || str_begin_with(buf, "XPWD")==0)
+	{
+		rt_sprintf(sbuf, "257 \"%s\" is current directory.\r\n", session->currentdir);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "TYPE")==0)
+	{
+		// Ignore it
+		if(strcmp(parameter_ptr, "I")==0)
+		{
+			rt_sprintf(sbuf, "200 Type set to binary.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "200 Type set to ascii.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if(str_begin_with(buf, "PASV")==0)
+	{
+		int dig1, dig2;
+		//int sockfd;
+		int optval=1;
+		int port;
+		struct sockaddr_in data;
+		socklen_t len = sizeof(struct sockaddr);
+		char *msg, *p;
+
+		if (session->pasv_sockfd > 0) {
+			closesocket(session->pasv_sockfd);
+			session->pasv_sockfd = -1;
+		}
+
+		if (session->pasv_listen_sockfd > 0)
+			closesocket(session->pasv_listen_sockfd);
+
+		session->pasv_port = 10000;
+		session->pasv_active = 1;
+		local.sin_port=htons(session->pasv_port);
+		local.sin_addr.s_addr=INADDR_ANY;
+
+		dig1 = (int)(session->pasv_port/256);
+		dig2 = session->pasv_port % 256;
+
+		FD_ZERO(&readfds);
+		if((session->pasv_listen_sockfd=socket(PF_INET, SOCK_STREAM, 0))==-1)
+		{
+			rt_sprintf(sbuf, "425 Can't open data connection0.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			goto err1;
+		}
+		if(setsockopt(session->pasv_listen_sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval))==-1)
+		{
+			rt_sprintf(sbuf, "425 Can't open data connection1.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			goto err1;
+		}
+		if(bind(session->pasv_listen_sockfd, (struct sockaddr *)&local, addr_len)==-1)
+		{
+			rt_sprintf(sbuf, "425 Can't open data connection2.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			goto err1;
+		}
+		if(listen(session->pasv_listen_sockfd, 1)==-1)
+		{
+			rt_sprintf(sbuf, "425 Can't open data connection3.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			goto err1;
+		}
+		if (-1 == getsockname(session->pasv_listen_sockfd, (struct sockaddr *)&data, &len)) {
+			rt_kprintf("Cannot determine our address, need it if client should connect to us\n");
+			goto err1;
+		}
+
+		port = ntohs(data.sin_port);
+		rt_kprintf("Port %d\n", port);
+
+		/* Convert server IP address and port to comma separated list */
+		msg = strdup(session->serveraddr);
+		if (!msg) {
+			rt_sprintf(sbuf, "426 Internal server error.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			goto err1;
+		}
+		p = msg;
+		while ((p = strchr(p, '.')))
+			*p++ = ',';
+	
+		rt_kprintf("Listening %d seconds @ port %d\n", tv.tv_sec, session->pasv_port);
+		rt_sprintf(sbuf, "227 Entering passive mode (%s,%d,%d)\r\n", msg, port / 256, port % 256);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		rt_free(sbuf);
+		rt_free(msg);
+		return 0;
+	#if 0
+		FD_SET(sockfd, &readfds);
+		select(0, &readfds, 0, 0, &tv);
+		if(FD_ISSET(sockfd, &readfds))
+		{
+			if((session->pasv_sockfd = accept(sockfd, (struct sockaddr*)&pasvremote, &addr_len))==-1)
+			{
+				rt_sprintf(sbuf, "425 Can't open data connection4.\r\n");
+				send(session->sockfd, sbuf, strlen(sbuf), 0);
+				goto err1;
+			}
+			else
+			{
+				rt_kprintf("Got Data(PASV) connection from %s\n", inet_ntoa(pasvremote.sin_addr));
+				session->pasv_active = 1;
+				closesocket(sockfd);
+			}
+		}
+		else
+		{
+err1:
+			closesocket(session->pasv_sockfd);
+			session->pasv_active = 0;
+			rt_free(sbuf);
+			return 0;
+		}
+	#endif
+err1:
+		close_data_connection(session);
+		session->pasv_active = 0;
+		rt_free(sbuf);
+		rt_free(msg);
+		return 0;
+
+	}
+	else if (str_begin_with(buf, "RETR")==0)
+	{
+		int file_size;
+
+		open_data_connection(session);
+
+		strcpy(filename, buf + 5);
+
+		build_full_path(session, parameter_ptr, filename, 256);
+		file_size = ftp_get_filesize(filename);
+		if (file_size == -1)
+		{
+			rt_sprintf(sbuf, "550 \"%s\" : not a regular file\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			session->offset=0;
+			close_data_connection(session);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		fd = open(filename, O_RDONLY, 0);
+		if (fd < 0)
+		{
+			close_data_connection(session);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		if(session->offset>0 && session->offset < file_size)
+		{
+			lseek(fd, session->offset, SEEK_SET);
+			rt_sprintf(sbuf, "150 Opening binary mode data connection for partial \"%s\" (%d/%d bytes).\r\n",
+				filename, file_size - session->offset, file_size);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "150 Opening binary mode data connection for \"%s\" (%d bytes).\r\n", filename, file_size);
+		}
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		while((numbytes = read(fd, sbuf, FTP_BUFFER_SIZE))>0)
+		{
+			send(session->pasv_sockfd, sbuf, numbytes, 0);
+		}
+		rt_sprintf(sbuf, "226 Finished.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		close(fd);
+		close_data_connection(session);
+	}
+	else if (str_begin_with(buf, "STOR")==0)
+	{
+		open_data_connection(session);
+		if(session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "550 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			close_data_connection(session);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0);
+		if(fd < 0)
+		{
+			rt_sprintf(sbuf, "550 Cannot open \"%s\" for writing.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			close_data_connection(session);
+			rt_free(sbuf);
+			return 0;
+		}
+		rt_sprintf(sbuf, "150 Opening binary mode data connection for \"%s\".\r\n", filename);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		FD_ZERO(&readfds);
+		FD_SET(session->pasv_sockfd, &readfds);
+		rt_kprintf("Waiting %d seconds for data...\n", tv.tv_sec);
+		while(select(session->pasv_sockfd+1, &readfds, 0, 0, &tv)>0 )
+		{
+			if((numbytes=recv(session->pasv_sockfd, sbuf, FTP_BUFFER_SIZE, 0))>0)
+			{
+				write(fd, sbuf, numbytes);
+			}
+			else if(numbytes==0)
+			{
+				close(fd);
+				close_data_connection(session);
+				rt_sprintf(sbuf, "226 Finished.\r\n");
+				send(session->sockfd, sbuf, strlen(sbuf), 0);
+				break;
+			}
+			else if(numbytes==-1)
+			{
+				close(fd);
+				close_data_connection(session);
+				rt_free(sbuf);
+				return -1;
+			}
+		}
+		close_data_connection(session);
+	}
+	else if(str_begin_with(buf, "SIZE")==0)
+	{
+		int file_size;
+
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		file_size = ftp_get_filesize(filename);
+		if( file_size == -1)
+		{
+			rt_sprintf(sbuf, "550 \"%s\" : not a regular file\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "213 %d\r\n", file_size);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if(str_begin_with(buf, "MDTM")==0)
+	{
+		rt_sprintf(sbuf, "550 \"/\" : not a regular file\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "SYST")==0)
+	{
+		rt_sprintf(sbuf, "215 %s\r\n", "RT-Thread RTOS");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "CWD")==0)
+	{
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		rt_sprintf(sbuf, "250 Changed to directory \"%s\"\r\n", filename);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		strcpy(session->currentdir, filename);
+		rt_kprintf("Changed to directory %s", filename);
+	}
+	else if(str_begin_with(buf, "CDUP")==0)
+	{
+		rt_sprintf(filename, "%s/%s", session->currentdir, "..");
+
+		rt_sprintf(sbuf, "250 Changed to directory \"%s\"\r\n", filename);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		strcpy(session->currentdir, filename);
+		rt_kprintf("Changed to directory %s", filename);
+	}
+	else if(str_begin_with(buf, "PORT")==0)
+	{
+		int i;
+		int portcom[6];
+		char tmpip[100];
+
+		i=0;
+		portcom[i++]=atoi(strtok(parameter_ptr, ".,;()"));
+		for(;i<6;i++)
+			portcom[i]=atoi(strtok(0, ".,;()"));
+		rt_sprintf(tmpip, "%d.%d.%d.%d", portcom[0], portcom[1], portcom[2], portcom[3]);
+
+		FD_ZERO(&readfds);
+		if((session->pasv_sockfd=socket(AF_INET, SOCK_STREAM, 0))==-1)
+		{
+			rt_sprintf(sbuf, "425 Can't open data connection.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			closesocket(session->pasv_sockfd);
+			session->pasv_active = 0;
+			rt_free(sbuf);
+			return 0;
+		}
+		pasvremote.sin_addr.s_addr=inet_addr(tmpip);
+		pasvremote.sin_port=htons(portcom[4] * 256 + portcom[5]);
+		pasvremote.sin_family=PF_INET;
+		if(connect(session->pasv_sockfd, (struct sockaddr *)&pasvremote, addr_len)==-1)
+		{
+			// is it only local address?try using gloal ip addr
+			pasvremote.sin_addr=session->remote.sin_addr;
+			if(connect(session->pasv_sockfd, (struct sockaddr *)&pasvremote, addr_len)==-1)
+			{
+				rt_sprintf(sbuf, "425 Can't open data connection.\r\n");
+				send(session->sockfd, sbuf, strlen(sbuf), 0);
+				closesocket(session->pasv_sockfd);
+				rt_free(sbuf);
+				return 0;
+			}
+		}
+		session->pasv_active=1;
+		session->pasv_port = portcom[4] * 256 + portcom[5];
+		rt_kprintf("Connected to Data(PORT) %s @ %d\n", tmpip, portcom[4] * 256 + portcom[5]);
+		rt_sprintf(sbuf, "200 Port Command Successful.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "REST")==0)
+	{
+		if(atoi(parameter_ptr)>=0)
+		{
+			session->offset=atoi(parameter_ptr);
+			rt_sprintf(sbuf, "350 Send RETR or STOR to start transfert.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if(str_begin_with(buf, "MKD")==0)
+	{
+		if (session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "530 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		if(mkdir(filename, 0) == -1)
+		{
+			rt_sprintf(sbuf, "550 File \"%s\" exists.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "257 directory \"%s\" successfully created.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if(str_begin_with(buf, "DELE")==0)
+	{
+		if (session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "530 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		if(unlink(filename)==0)
+			rt_sprintf(sbuf, "250 Successfully deleted file \"%s\".\r\n", filename);
+		else
+		{
+			rt_sprintf(sbuf, "550 Not such file or directory: %s.\r\n", filename);
+		}
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "RMD")==0)
+	{
+		if (session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "530 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		if(unlink(filename) == -1)
+		{
+			rt_sprintf(sbuf, "550 Directory \"%s\" doesn't exist.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "257 directory \"%s\" successfully deleted.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if(str_begin_with(buf, "RNFR")==0)
+	{
+		if (session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "530 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+		build_full_path(session, parameter_ptr, filename, 256);
+
+		rt_sprintf(sbuf, "350 Successfully rececive old file \"%s\".\r\n", filename);
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "RNTO")==0)
+	{
+		char new_filename[256];
+		if (session->is_anonymous == RT_TRUE)
+		{
+			rt_sprintf(sbuf, "530 Permission denied.\r\n");
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+			rt_free(sbuf);
+			return 0;
+		}
+		build_full_path(session, parameter_ptr, new_filename, 256);
+
+		if(rename(filename, new_filename) == -1)
+		{
+			rt_sprintf(sbuf, "553 rename file \"%s\" error.\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+		else
+		{
+			rt_sprintf(sbuf, "250 Successfully rename to new file \"%s\".\r\n", filename);
+			send(session->sockfd, sbuf, strlen(sbuf), 0);
+		}
+	}
+	else if((str_begin_with(buf, "NOOP")==0) || str_begin_with(buf, "noop")==0)
+	{
+		rt_sprintf(sbuf, "200 noop!\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	else if(str_begin_with(buf, "QUIT")==0)
+	{
+		rt_sprintf(sbuf, "221 Bye!\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+		rt_free(sbuf);
+		return -1;
+	}
+	else
+	{
+		rt_sprintf(sbuf, "502 Not Implemented.\r\n");
+		send(session->sockfd, sbuf, strlen(sbuf), 0);
+	}
+	rt_free(sbuf);
+	return 0;
+}
+
+void ftpd_start()
+{
+	rt_thread_t tid;
+
+	tid = rt_thread_create("ftpd",
+		ftpd_thread_entry, RT_NULL,
+		4096, 30, 5);
+	if (tid != RT_NULL) rt_thread_startup(tid);
+}
+
+#ifdef RT_USING_FINSH
+#include <finsh.h>
+FINSH_FUNCTION_EXPORT(ftpd_start, start ftp server);
+
+#ifdef FINSH_USING_MSH
+int cmd_ftpd_start(int argc, char** argv)
+{
+	ftpd_start();
+	return 0;
+}
+FINSH_FUNCTION_EXPORT_ALIAS(cmd_ftpd_start, __cmd_ftpd_start, start ftp server.);
+#endif
+
+#endif

+ 161 - 0
bsp/dm365/applications/startup.c

@@ -0,0 +1,161 @@
+/*
+ * File      : startup.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2010-11-13     weety      first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+
+#include <dm36x.h>
+#ifdef RT_USING_FINSH
+#include <finsh.h>
+#endif
+
+extern void rt_hw_interrupt_init(void);
+extern void rt_hw_board_init(void);
+extern void rt_serial_init(void);
+extern void rt_system_timer_init(void);
+extern void rt_system_scheduler_init(void);
+extern void rt_thread_idle_init(void);
+extern void rt_hw_cpu_icache_enable(void);
+extern void rt_show_version(void);
+extern void rt_system_heap_init(void*, void*);
+extern void rt_hw_finsh_init(void);
+extern void rt_application_init(void);
+
+/**
+ * @addtogroup dm365
+ */
+
+/*@{*/
+#if defined(__CC_ARM)
+	extern int Image$$ER_ZI$$ZI$$Base;
+	extern int Image$$ER_ZI$$ZI$$Length;
+	extern int Image$$ER_ZI$$ZI$$Limit;
+#elif (defined (__GNUC__))
+	rt_uint8_t _irq_stack_start[1024];
+	rt_uint8_t _fiq_stack_start[1024];
+	rt_uint8_t _undefined_stack_start[512];
+	rt_uint8_t _abort_stack_start[512];
+	rt_uint8_t _svc_stack_start[1024] SECTION(".nobss");
+	extern unsigned char __bss_start;
+	extern unsigned char __bss_end;
+#endif
+
+#ifdef RT_USING_FINSH
+extern void finsh_system_init(void);
+#endif
+
+rt_inline void rt_swi(void)
+{
+	rt_kprintf("before swi\n");
+	//asm ("swi 0x2");
+	rt_kprintf("after swi\n");
+}
+
+#define	BREAKPOINT()			asm(".word 	0xe7ffdeff")
+void breakpoint(void) 
+{ 
+	//#define BP 0xe7fddefe 
+	//*(unsigned long *)breakinst = BP; 
+	//__asm__ __volatile__(" 
+	//.globl breakinst 
+	//nop 
+	//breakinst: nop 
+	//nop 
+	//nop 
+	//"); 
+	rt_kprintf("before breakpoint\n");
+	BREAKPOINT();
+	rt_kprintf("after breakpoint\n");
+} 
+
+/**
+ * This function will startup RT-Thread RTOS.
+ */
+void rtthread_startup(void)
+{
+	/* enable cpu cache */
+	//rt_hw_cpu_icache_enable();
+	//rt_hw_cpu_dcache_enable();
+
+	/* initialize hardware interrupt */
+	rt_hw_interrupt_init();
+
+	/* initialize board */
+	rt_hw_board_init();
+
+	/* show version */
+	rt_show_version();
+	
+	/* initialize tick */
+	rt_system_tick_init();
+
+	/* initialize kernel object */
+	rt_system_object_init();
+
+	/* initialize timer system */
+	rt_system_timer_init();
+
+	/* initialize heap memory system */
+#ifdef __CC_ARM
+	rt_system_heap_init((void*)&Image$$ER_ZI$$ZI$$Limit, (void*)0x88000000);
+#else
+	rt_system_heap_init((void*)&__bss_end, (void*)0x88000000);
+#endif
+
+#ifdef RT_USING_MODULE
+	/* initialize module system*/
+	rt_system_module_init();
+#endif
+
+	/* initialize scheduler system */
+	rt_system_scheduler_init();
+
+	/* initialize application */
+	rt_application_init();
+
+#ifdef RT_USING_FINSH
+	/* initialize finsh */
+	finsh_system_init();
+#ifdef RT_USING_DEVICE
+	finsh_set_device(RT_CONSOLE_DEVICE_NAME);
+#endif
+#endif
+
+	/* initialize system timer thread */
+	rt_system_timer_thread_init();
+
+	/* initialize idle thread */
+	rt_thread_idle_init();
+
+	/* start scheduler */
+	rt_system_scheduler_start();
+
+	/* never reach here */
+	return ;
+}
+
+int main(void)
+{
+	rt_uint32_t UNUSED level;
+
+	/* disable interrupt first */
+	level = rt_hw_interrupt_disable();
+
+	/* startup RT-Thread RTOS */
+	rtthread_startup();
+
+	return 0;
+}
+
+/*@}*/

+ 85 - 0
bsp/dm365/dm365_ram.ld

@@ -0,0 +1,85 @@
+OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+	. = 0x80000000;
+
+	. = ALIGN(4);
+	.text :	
+	{
+		*(.init)
+		*(.text)
+		*(.gnu.linkonce.t*)
+		
+		/* section information for finsh shell */
+		. = ALIGN(4);
+		__fsymtab_start = .;
+		KEEP(*(FSymTab))
+		__fsymtab_end = .;
+		. = ALIGN(4);
+		__vsymtab_start = .;
+		KEEP(*(VSymTab))
+		__vsymtab_end = .;
+		. = ALIGN(4);	
+
+		/* section information for modules */
+		. = ALIGN(4);
+		__rtmsymtab_start = .;
+		KEEP(*(RTMSymTab))
+		__rtmsymtab_end = .;
+	}
+
+	. = ALIGN(4);
+	.rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r*) *(.eh_frame) }
+
+	. = ALIGN(4);
+	.ctors :
+	{
+		PROVIDE(__ctors_start__ = .);
+		KEEP(*(SORT(.ctors.*)))
+		KEEP(*(.ctors))
+		PROVIDE(__ctors_end__ = .);
+	}
+
+	.dtors :
+	{
+		PROVIDE(__dtors_start__ = .);
+		KEEP(*(SORT(.dtors.*)))
+		KEEP(*(.dtors))
+		PROVIDE(__dtors_end__ = .);
+	}
+
+	. = ALIGN(4);
+	.data :
+	{
+		*(.data)
+		*(.data.*)
+		*(.gnu.linkonce.d*)
+	}
+
+	. = ALIGN(4);
+	.nobss : { *(.nobss) }
+
+
+	. = ALIGN(4);
+	__bss_start = .;
+	.bss : { *(.bss) }
+	__bss_end = .;
+
+	/* stabs debugging sections. */
+	.stab 0 : { *(.stab) }
+	.stabstr 0 : { *(.stabstr) }
+	.stab.excl 0 : { *(.stab.excl) }
+	.stab.exclstr 0 : { *(.stab.exclstr) }
+	.stab.index 0 : { *(.stab.index) }
+	.stab.indexstr 0 : { *(.stab.indexstr) }
+	.comment 0 : { *(.comment) }
+	.debug_abbrev 0 : { *(.debug_abbrev) }
+	.debug_info 0 : { *(.debug_info) }
+	.debug_line 0 : { *(.debug_line) }
+	.debug_pubnames 0 : { *(.debug_pubnames) }
+	.debug_aranges 0 : { *(.debug_aranges) }
+
+	_end = .;
+}

+ 31 - 0
bsp/dm365/drivers/SConscript

@@ -0,0 +1,31 @@
+Import('RTT_ROOT')
+from building import *
+
+cwd = GetCurrentDir()
+#src_drv = ['']
+
+# The set of source files associated with this SConscript file.
+path = [cwd]
+
+src = Split("""
+davinci_serial.c
+""")
+
+if GetDepend('RT_USING_GPIO'):
+	src += ['gpio.c']
+
+if GetDepend('RT_USING_I2C'):
+	src += ['i2c-davinci.c']
+
+if GetDepend('RT_USING_SDIO'):
+	src += ['mmcsd.c']
+
+if GetDepend('RT_USING_SPI'):
+	src += ['spi-davinci.c']
+
+if GetDepend('RT_USING_LWIP'):
+	src += ['davinci_emac.c']
+
+group = DefineGroup('Startup', src, depend = [''], CPPPATH = path)
+
+Return('group')

+ 1771 - 0
bsp/dm365/drivers/davinci_emac.c

@@ -0,0 +1,1771 @@
+/*
+ * File      : davinci_emac.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2013-01-30     weety      first version
+ */
+
+#include <rtthread.h>
+#include <netif/ethernetif.h>
+#include <lwipopts.h>
+#include <dm36x.h>
+#include "davinci_emac.h"
+
+#define MMU_NOCACHE_ADDR(a)      	((rt_uint32_t)a | (1UL<<29))
+#define CACHE_LINE_SIZE	32
+
+extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
+extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
+
+/* EMAC internal utility function */
+static inline rt_uint32_t emac_virt_to_phys(void *addr)
+{
+	return addr;
+}
+
+static inline rt_uint32_t virt_to_phys(void *addr)
+{
+	return addr;
+}
+
+/* Cache macros - Packet buffers would be from pbuf pool which is cached */
+#define EMAC_VIRT_NOCACHE(addr) (addr)
+#define EMAC_CACHE_INVALIDATE(addr, size) \
+	mmu_invalidate_dcache(addr, size)
+#define EMAC_CACHE_WRITEBACK(addr, size) \
+	mmu_clean_dcache(addr, size)
+#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
+	mmu_clean_invalidated_dcache(addr, size)
+
+/* DM644x does not have BD's in cached memory - so no cache functions */
+#define BD_CACHE_INVALIDATE(addr, size)
+#define BD_CACHE_WRITEBACK(addr, size)
+#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
+
+
+static struct emac_priv davinci_emac_device;
+static struct rt_semaphore sem_ack;
+
+/* clock frequency for EMAC */
+static unsigned long emac_bus_frequency;
+static unsigned long mdio_max_freq;
+
+#define EMAC_AUTONEG_TIMEOUT	5000000	
+#define EMAC_LINK_TIMEOUT	        500000  
+
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+	"No error", "SOP error", "Ownership bit not set in SOP buffer",
+	"Zero Next Buffer Descriptor Pointer Without EOP",
+	"Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+	"Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+	"No error", "Reserved", "Ownership bit not set in input buffer",
+	"Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+#define emac_read(reg)		  davinci_readl(priv->emac_base + (reg))
+#define emac_write(reg, val)      davinci_writel(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg)	  davinci_readl((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) davinci_writel(val, (priv->ctrl_base + (reg)))
+
+#define emac_mdio_read(reg)	  davinci_readl(priv->mdio_base + (reg))
+#define emac_mdio_write(reg, val) davinci_writel(val, (priv->mdio_base + (reg)))
+
+static void emac_int_enable(struct emac_priv *priv);
+static void emac_int_disable(struct emac_priv *priv);
+static int emac_init_txch(struct emac_priv *priv, rt_uint32_t ch);
+
+/* PHY/MII bus related */
+
+/* Wait until mdio is ready for next command */
+#define MDIO_WAIT_FOR_USER_ACCESS\
+		while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
+			MDIO_USERACCESS_GO) != 0)
+
+static int emac_mii_read(struct emac_priv *priv, int phy_id, int phy_reg)
+{
+	unsigned int phy_data = 0;
+	unsigned int phy_control;
+
+	/* Wait until mdio is ready for next command */
+	MDIO_WAIT_FOR_USER_ACCESS;
+
+	phy_control = (MDIO_USERACCESS_GO |
+		       MDIO_USERACCESS_READ |
+		       ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
+		       ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
+		       (phy_data & MDIO_USERACCESS_DATA));
+	emac_mdio_write(MDIO_USERACCESS(0), phy_control);
+
+	/* Wait until mdio is ready for next command */
+	MDIO_WAIT_FOR_USER_ACCESS;
+
+	return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
+
+}
+
+static int emac_mii_write(struct emac_priv *priv, int phy_id,
+			  int phy_reg, rt_uint16_t phy_data)
+{
+
+	unsigned int control;
+
+	/*  until mdio is ready for next command */
+	MDIO_WAIT_FOR_USER_ACCESS;
+
+	control = (MDIO_USERACCESS_GO |
+		   MDIO_USERACCESS_WRITE |
+		   ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
+		   ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
+		   (phy_data & MDIO_USERACCESS_DATA));
+	emac_mdio_write(MDIO_USERACCESS(0), control);
+
+	return 0;
+}
+
+static int emac_mii_reset(struct emac_priv *priv)
+{
+	unsigned int clk_div;
+	int mdio_bus_freq = emac_bus_frequency;
+
+	if (mdio_max_freq && mdio_bus_freq)
+		clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
+	else
+		clk_div = 0xFF;
+
+	clk_div &= MDIO_CONTROL_CLKDIV;
+
+	/* Set enable and clock divider in MDIOControl */
+	emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
+
+	return 0;
+
+}
+
+
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF		0x00
+#define DUPLEX_FULL		0x01
+
+static void udelay(rt_uint32_t us)
+{
+    rt_uint32_t len;
+    for (;us > 0; us --)
+        for (len = 0; len < 10; len++ );
+}
+
+static void davinci_emac_phy_reset(rt_device_t dev)
+{
+	int i;
+	rt_uint16_t status, adv;
+	struct emac_priv *priv = dev->user_data;;
+
+	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
+	emac_mii_write(priv, priv->phy_addr, MII_ADVERTISE, adv);
+	rt_kprintf("%s: Starting autonegotiation...\n", dev->parent.name);
+	emac_mii_write(priv, priv->phy_addr, MII_BMCR, (BMCR_ANENABLE
+					 | BMCR_ANRESTART));
+
+	for (i = 0; i < EMAC_AUTONEG_TIMEOUT / 100; i++) {
+		status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
+		if (status & BMSR_ANEGCOMPLETE)
+			break;
+		udelay(100);
+	}
+
+	if (status & BMSR_ANEGCOMPLETE)
+		rt_kprintf("%s: Autonegotiation complete\n", dev->parent.name);
+	else
+		rt_kprintf("%s: Autonegotiation timed out (status=0x%04x)\n",
+		       dev->parent.name, status);
+}
+
+
+static int davinci_emac_phy_init(rt_device_t dev)
+{
+	struct emac_priv *priv = dev->user_data;
+	rt_uint16_t phy_id, status, adv, lpa;
+	int media, speed, duplex;
+	int i;
+
+	/* Check if the PHY is up to snuff... */
+	phy_id = emac_mii_read(priv, priv->phy_addr, MII_PHYSID1);
+	if (phy_id == 0xffff) {
+		rt_kprintf("%s: No PHY present\n", dev->parent.name);
+		return 0;
+	}
+
+	status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
+	if (!(status & BMSR_LSTATUS)) {
+		/* Try to re-negotiate if we don't have link already. */
+		davinci_emac_phy_reset(dev);
+
+		for (i = 0; i < EMAC_LINK_TIMEOUT / 100; i++) {
+			status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
+			if (status & BMSR_LSTATUS)
+				break;
+			udelay(100);
+		}
+	}
+
+	if (!(status & BMSR_LSTATUS)) {
+		rt_kprintf("%s: link down (status: 0x%04x)\n",
+		       dev->parent.name, status);
+		priv->link = 0;
+		return 0;
+	} else {
+		adv = emac_mii_read(priv, priv->phy_addr, MII_ADVERTISE);
+		lpa = emac_mii_read(priv, priv->phy_addr, MII_LPA);
+		media = mii_nway_result(lpa & adv);
+		speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
+			 ? 1 : 0);
+		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+		rt_kprintf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
+		       dev->parent.name,
+		       speed ? "100" : "10",
+		       duplex ? "full" : "half",
+		       lpa);
+		priv->speed = speed;
+		priv->duplex = duplex;
+		priv->link = 1;
+
+		return 1;
+	}
+}
+
+/**
+ * emac_update_phystatus: Update Phy status
+ * @priv: The DaVinci EMAC driver private structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+	rt_uint32_t mac_control;
+	rt_uint32_t new_duplex;
+	rt_uint32_t cur_duplex;
+
+	mac_control = emac_read(EMAC_MACCONTROL);
+	cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+	if (priv->phy_mask)
+		new_duplex = priv->duplex;
+	else
+		new_duplex = DUPLEX_FULL;
+
+	/* We get called only if link has changed (speed/duplex/status) */
+	if ((priv->link) && (new_duplex != cur_duplex)) {
+		priv->duplex = new_duplex;
+		if (DUPLEX_FULL == priv->duplex)
+			mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+		else
+			mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+	}
+
+	if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+		mac_control = emac_read(EMAC_MACCONTROL);
+		mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
+				EMAC_DM646X_MACCONTORL_GIGFORCE);
+	} else {
+		/* Clear the GIG bit and GIGFORCE bit */
+		mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+					EMAC_DM646X_MACCONTORL_GIG);
+
+		if (priv->rmii_en && (priv->speed == SPEED_100))
+			mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+		else
+			mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+	}
+
+	/* Update mac_control if changed */
+	emac_write(EMAC_MACCONTROL, mac_control);
+#if 0
+	if (priv->link) {
+		/* link ON */
+	/* reactivate the transmit queue if it is stopped */
+	} else {
+		/* link OFF */
+	}
+#endif
+}
+
+
+void davinci_emac_update_link(void *param)
+{
+	struct emac_priv *priv = param;
+	rt_device_t dev = &(priv->parent.parent);
+	rt_uint32_t status, status_change = 0;
+	rt_uint32_t link;
+	rt_uint32_t media;
+	rt_uint16_t adv, lpa;
+
+	status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
+	if ((status & BMSR_LSTATUS) == 0)
+		link = 0;
+	else
+		link = 1;
+
+	if (link != priv->link) {
+		priv->link = link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (priv->link) {
+			adv = emac_mii_read(priv, priv->phy_addr, MII_ADVERTISE);
+			lpa = emac_mii_read(priv, priv->phy_addr, MII_LPA);
+			media = mii_nway_result(lpa & adv);
+			priv->speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
+				 ? 100 : 10);
+			priv->duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+			rt_kprintf("%s: link up (%dMbps/%s-duplex)\n",
+					dev->parent.name, priv->speed,
+					DUPLEX_FULL == priv->duplex ? "Full":"Half");
+			eth_device_linkchange(&priv->parent, RT_TRUE);
+		} else {
+			rt_kprintf("%s: link down\n", dev->parent.name);
+			eth_device_linkchange(&priv->parent, RT_FALSE);
+		}
+		emac_update_phystatus(priv);
+
+	}
+
+}
+
+
+/**
+ * emac_net_tx_complete: TX packet completion function
+ * @priv: The DaVinci EMAC driver private structure
+ * @net_data_tokens: packet token - pbuf pointer
+ * @num_tokens: number of pbuf's to free
+ * @ch: TX channel number
+ *
+ * Frees the pbuf once packet is transmitted
+ *
+ */
+static int emac_net_tx_complete(struct emac_priv *priv,
+				void **net_data_tokens,
+				int num_tokens, rt_uint32_t ch)
+{
+	rt_uint32_t cnt;
+
+	for (cnt = 0; cnt < num_tokens; cnt++) {
+		struct pbuf *p = (struct pbuf *)net_data_tokens[cnt];
+		if (p == RT_NULL)
+			continue;
+		priv->net_dev_stats.tx_packets++;
+		priv->net_dev_stats.tx_bytes += p->len;
+		//free pbuf
+	}
+	rt_sem_release(&sem_ack);
+	return 0;
+}
+
+/**
+ * emac_txch_teardown: TX channel teardown
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: TX channel number
+ *
+ * Called to teardown TX channel
+ *
+ */
+static void emac_txch_teardown(struct emac_priv *priv, rt_uint32_t ch)
+{
+	rt_uint32_t teardown_cnt = 0xFFFFFFF0; /* Some high value */
+	struct emac_txch *txch = priv->txch[ch];
+	struct emac_tx_bd __iomem *curr_bd;
+
+	while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
+	       EMAC_TEARDOWN_VALUE) {
+		/* wait till tx teardown complete */
+		--teardown_cnt;
+		if (0 == teardown_cnt) {
+			rt_kprintf("EMAC: TX teardown aborted\n");
+			break;
+		}
+	}
+	emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
+
+	/* process sent packets and return pbuf's to upper layer */
+	if (1 == txch->queue_active) {
+		curr_bd = txch->active_queue_head;
+		while (curr_bd != RT_NULL) {
+			emac_net_tx_complete(priv, (void *)
+					&curr_bd->buf_token, 1, ch);
+			if (curr_bd != txch->active_queue_tail)
+				curr_bd = curr_bd->next;
+			else
+				break;
+		}
+		txch->bd_pool_head = txch->active_queue_head;
+		txch->active_queue_head =
+		txch->active_queue_tail = RT_NULL;
+	}
+}
+
+
+/**
+ * emac_stop_txch: Stop TX channel operation
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: TX channel number
+ *
+ * Called to stop TX channel operation
+ *
+ */
+static void emac_stop_txch(struct emac_priv *priv, rt_uint32_t ch)
+{
+	struct emac_txch *txch = priv->txch[ch];
+
+	if (txch) {
+		txch->teardown_pending = 1;
+		emac_write(EMAC_TXTEARDOWN, 0);
+		emac_txch_teardown(priv, ch);
+		txch->teardown_pending = 0;
+		emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
+	}
+}
+
+
+
+/**
+ * emac_tx_bdproc: TX buffer descriptor (packet) processing
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: TX channel number to process buffer descriptors for
+ * @budget: number of packets allowed to process
+ * @pending: indication to caller that packets are pending to process
+ *
+ * Processes TX buffer descriptors after packets are transmitted - checks
+ * ownership bit on the TX * descriptor and requeues it to free pool & frees
+ * the PBUF buffer. Only "budget" number of packets are processed and
+ * indication of pending packets provided to the caller
+ *
+ * Returns number of packets processed
+ */
+static int emac_tx_bdproc(struct emac_priv *priv, rt_uint32_t ch, rt_uint32_t budget)
+{
+	unsigned long flags;
+	rt_uint32_t frame_status;
+	rt_uint32_t pkts_processed = 0;
+	rt_uint32_t tx_complete_cnt = 0;
+	struct emac_tx_bd __iomem *curr_bd;
+	struct emac_txch *txch = priv->txch[ch];
+	rt_uint32_t *tx_complete_ptr = txch->tx_complete;
+
+	if (1 == txch->teardown_pending) {
+		rt_kprintf("DaVinci EMAC:emac_tx_bdproc: "\
+				"teardown pending\n");
+		return 0;  /* dont handle any pkt completions */
+	}
+
+	++txch->proc_count;
+	rt_sem_take(&priv->tx_lock, RT_WAITING_FOREVER);
+	curr_bd = txch->active_queue_head;
+	if (RT_NULL == curr_bd) {
+		emac_write(EMAC_TXCP(ch),
+			   emac_virt_to_phys(txch->last_hw_bdprocessed));
+		txch->no_active_pkts++;
+		rt_sem_release(&priv->tx_lock);
+		return 0;
+	}
+	BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+	frame_status = curr_bd->mode;
+	while ((curr_bd) &&
+	      ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
+	      (pkts_processed < budget)) {
+		emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
+		txch->active_queue_head = curr_bd->next;
+		if (frame_status & EMAC_CPPI_EOQ_BIT) {
+			if (curr_bd->next) {	/* misqueued packet */
+				emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
+				++txch->mis_queued_packets;
+			} else {
+				txch->queue_active = 0; /* end of queue */
+			}
+		}
+		*tx_complete_ptr = (rt_uint32_t) curr_bd->buf_token;
+		++tx_complete_ptr;
+		++tx_complete_cnt;
+		curr_bd->next = txch->bd_pool_head;
+		txch->bd_pool_head = curr_bd;
+		--txch->active_queue_count;
+		pkts_processed++;
+		txch->last_hw_bdprocessed = curr_bd;
+		curr_bd = txch->active_queue_head;
+		if (curr_bd) {
+			BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+			frame_status = curr_bd->mode;
+		}
+	} /* end of pkt processing loop */
+
+	emac_net_tx_complete(priv,
+			     (void *)&txch->tx_complete[0],
+			     tx_complete_cnt, ch);
+	rt_sem_release(&priv->tx_lock);
+	return pkts_processed;
+}
+
+
+#define EMAC_ERR_TX_OUT_OF_BD -1
+
+/**
+ * emac_send: EMAC Transmit function (internal)
+ * @priv: The DaVinci EMAC driver private structure
+ * @pkt: packet pointer (contains pbuf ptr)
+ * @ch: TX channel number
+ *
+ * Called by the transmit function to queue the packet in EMAC hardware queue
+ *
+ * Returns success(0) or error code (typically out of desc's)
+ */
+static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, rt_uint32_t ch)
+{
+	unsigned long flags;
+	struct emac_tx_bd __iomem *curr_bd;
+	struct emac_txch *txch;
+	struct emac_netbufobj *buf_list;
+
+	txch = priv->txch[ch];
+	buf_list = pkt->buf_list;   /* get handle to the buffer array */
+
+	/* check packet size and pad if short */
+	if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
+		buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
+		pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
+	}
+
+	rt_sem_take(&priv->tx_lock, RT_WAITING_FOREVER);
+	curr_bd = txch->bd_pool_head;
+	if (curr_bd == RT_NULL) {
+		txch->out_of_tx_bd++;
+		rt_sem_release(&priv->tx_lock);
+		return EMAC_ERR_TX_OUT_OF_BD;
+	}
+
+	txch->bd_pool_head = curr_bd->next;
+	curr_bd->buf_token = buf_list->buf_token;
+	curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr);
+	curr_bd->off_b_len = buf_list->length;
+	curr_bd->h_next = 0;
+	curr_bd->next = RT_NULL;
+	curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
+			 EMAC_CPPI_EOP_BIT | pkt->pkt_length);
+
+	/* flush the packet from cache if write back cache is present */
+	BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+
+	/* send the packet */
+	if (txch->active_queue_head == RT_NULL) {
+		txch->active_queue_head = curr_bd;
+		txch->active_queue_tail = curr_bd;
+		if (1 != txch->queue_active) {
+			emac_write(EMAC_TXHDP(ch),
+					emac_virt_to_phys(curr_bd));
+			txch->queue_active = 1;
+		}
+		++txch->queue_reinit;
+	} else {
+		register struct emac_tx_bd __iomem *tail_bd;
+		register rt_uint32_t frame_status;
+
+		tail_bd = txch->active_queue_tail;
+		tail_bd->next = curr_bd;
+		txch->active_queue_tail = curr_bd;
+		tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
+		tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
+		frame_status = tail_bd->mode;
+		if (frame_status & EMAC_CPPI_EOQ_BIT) {
+			emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
+			frame_status &= ~(EMAC_CPPI_EOQ_BIT);
+			tail_bd->mode = frame_status;
+			++txch->end_of_queue_add;
+		}
+	}
+	txch->active_queue_count++;
+	rt_sem_release(&priv->tx_lock);
+	return 0;
+}
+
+/**
+ * emac_dev_xmit: EMAC Transmit function
+ * @pbuf: PBUF pointer
+ * @priv: The DaVinci EMAC driver private structure
+ *
+ * Called by the system to transmit a packet  - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(RT_EOK) or error code (typically out of desc's)
+ */
+static int emac_dev_xmit(struct pbuf *p, struct emac_priv *priv)
+{
+	int ret_code;
+	struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
+	struct emac_netpktobj tx_packet;  /* packet object */
+
+	/* If no link, return */
+	if (!priv->link) {
+			rt_kprintf("DaVinci EMAC: No link to transmit\n");
+		return -RT_EBUSY;
+	}
+
+	/* Build the buffer and packet objects - Since only single fragment is
+	 * supported, need not set length and token in both packet & object.
+	 * Doing so for completeness sake & to show that this needs to be done
+	 * in multifragment case
+	 */
+	tx_packet.buf_list = &tx_buf;
+	tx_packet.num_bufs = 1; /* only single fragment supported */
+	tx_packet.pkt_length = p->len;
+	tx_packet.pkt_token = (void *)p;
+	tx_buf.length = p->len;
+	tx_buf.buf_token = (void *)p;
+	tx_buf.data_ptr = p->payload;
+	EMAC_CACHE_WRITEBACK((unsigned long)p->payload, p->len);
+	ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
+	if (ret_code != 0) {
+		if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
+			rt_kprintf("DaVinci EMAC: xmit() fatal"\
+					" err. Out of TX BD's\n");
+		}
+		priv->net_dev_stats.tx_dropped++;
+		return -RT_EBUSY;
+	}
+
+	return RT_EOK;
+}
+
+/**
+ * emac_cleanup_txch: Book-keep function to clean TX channel resources
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: TX channel number
+ *
+ * Called to clean up TX channel resources
+ *
+ */
+static void emac_cleanup_txch(struct emac_priv *priv, rt_uint32_t ch)
+{
+	struct emac_txch *txch = priv->txch[ch];
+
+	if (txch) {
+		if (txch->bd_mem)
+			txch->bd_mem = NULL;
+		rt_free(txch->tx_complete);
+		rt_free(txch);
+		priv->txch[ch] = NULL;
+	}
+}
+
+
+/**
+ * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct emac_priv *priv)
+{
+	rt_kprintf("emac tx timeout.\n");
+	priv->net_dev_stats.tx_errors++;
+	emac_int_disable(priv);
+	emac_stop_txch(priv, EMAC_DEF_TX_CH);
+	emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
+	emac_init_txch(priv, EMAC_DEF_TX_CH);
+	emac_write(EMAC_TXHDP(0), 0);
+	emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
+	emac_int_enable(priv);
+}
+
+
+/* ethernet device interface */
+/* transmit packet. */
+rt_err_t rt_davinci_emac_tx( rt_device_t dev, struct pbuf* p)
+{
+	/*struct pbuf* q;
+	rt_uint8_t* bufptr, *buf = RT_NULL;
+	unsigned long ctrl;
+	rt_uint32_t addr;*/
+	rt_err_t err;
+	struct emac_priv *priv = dev->user_data;
+
+	emac_dev_xmit(p, priv);
+
+	
+#if 0
+	buf = rt_malloc(p->tot_len);
+	if (!buf) {
+		rt_kprintf("%s:alloc buf failed\n", __func__);
+		return -RT_ENOMEM;
+	}
+	bufptr = buf;
+
+
+	/*for (q = p; q != RT_NULL; q = q->next)
+	{
+		memcpy(bufptr, q->payload, q->len);
+		bufptr += q->len;
+	}*/
+#endif
+	/* wait ack */
+	err = rt_sem_take(&sem_ack, RT_TICK_PER_SECOND*5);
+	if (err != RT_EOK)
+	{
+		emac_dev_tx_timeout(priv);
+	}
+	//rt_free(buf);
+
+	return RT_EOK;
+}
+
+/**
+ * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number to process buffer descriptors for
+ * @curr_bd: current buffer descriptor
+ * @buffer: buffer pointer for descriptor
+ * @buf_token: buffer token (stores pbuf information)
+ *
+ * Prepares the recycled buffer descriptor and addes it to hardware
+ * receive queue - if queue empty this descriptor becomes the head
+ * else addes the descriptor to end of queue
+ *
+ */
+static void emac_addbd_to_rx_queue(struct emac_priv *priv, rt_uint32_t ch,
+		struct emac_rx_bd __iomem *curr_bd,
+		char *buffer, void *buf_token)
+{
+	struct emac_rxch *rxch = priv->rxch[ch];
+
+	/* populate the hardware descriptor */
+	curr_bd->h_next = 0;
+	curr_bd->buff_ptr = virt_to_phys(buffer);
+	curr_bd->off_b_len = rxch->buf_size;
+	curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
+	curr_bd->next = RT_NULL;
+	curr_bd->data_ptr = buffer;
+	curr_bd->buf_token = buf_token;
+
+	/* write back  */
+	BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+	if (rxch->active_queue_head == RT_NULL) {
+		rxch->active_queue_head = curr_bd;
+		rxch->active_queue_tail = curr_bd;
+		if (0 != rxch->queue_active) {
+			emac_write(EMAC_RXHDP(ch),
+				   emac_virt_to_phys(rxch->active_queue_head));
+			rxch->queue_active = 1;
+		}
+	} else {
+		struct emac_rx_bd __iomem *tail_bd;
+		rt_uint32_t frame_status;
+
+		tail_bd = rxch->active_queue_tail;
+		rxch->active_queue_tail = curr_bd;
+		tail_bd->next = curr_bd;
+		tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
+		tail_bd->h_next = emac_virt_to_phys(curr_bd);
+		frame_status = tail_bd->mode;
+		if (frame_status & EMAC_CPPI_EOQ_BIT) {
+			emac_write(EMAC_RXHDP(ch),
+					emac_virt_to_phys(curr_bd));
+			frame_status &= ~(EMAC_CPPI_EOQ_BIT);
+			tail_bd->mode = frame_status;
+			++rxch->end_of_queue_add;
+		}
+	}
+	++rxch->recycled_bd;
+}
+
+/**
+ * emac_net_rx_cb: Prepares packet and sends to upper layer
+ * @priv: The DaVinci EMAC driver private structure
+ * @net_pkt_list: Network packet list (received packets)
+ *
+ * Invalidates packet buffer memory and sends the received packet to upper
+ * layer
+ *
+ * Returns success or appropriate error code (none as of now)
+ */
+static int emac_net_rx_cb(struct emac_priv *priv,
+			  struct emac_netpktobj *net_pkt_list)
+{
+	struct eth_device *device = &priv->parent;
+	struct pbuf *p;
+	p = (struct pbuf *)net_pkt_list->pkt_token;
+	/* set length of packet */
+	p->tot_len = net_pkt_list->pkt_length;
+	p->len = net_pkt_list->pkt_length;
+	EMAC_CACHE_INVALIDATE((unsigned long)p->payload, p->len);
+	if (device->netif->input(p, device->netif) != RT_EOK)
+	{
+		pbuf_free(p);
+	}
+	priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
+	priv->net_dev_stats.rx_packets++;
+	return 0;
+}
+
+/**
+ * emac_net_alloc_rx_buf: Allocate a pbuf for RX
+ * @priv: The DaVinci EMAC driver private structure
+ * @buf_size: size of PBUF data buffer to allocate
+ * @data_token: data token returned (pbuf handle for storing in buffer desc)
+ * @ch: RX channel number
+ *
+ * Called during RX channel setup - allocates pbuf buffer of required size
+ * and provides the pbuf handle and allocated buffer data pointer to caller
+ *
+ * Returns pbuf data pointer or 0 on failure to alloc pbuf
+ */
+static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
+		void **data_token, rt_uint32_t ch)
+{
+	struct pbuf* p;
+
+	p = pbuf_alloc(PBUF_LINK, buf_size, PBUF_RAM);
+	if (RT_NULL == p) {
+		rt_kprintf("DaVinci EMAC: failed to alloc pbuf\n");
+		return RT_NULL;
+	}
+
+	/* set device pointer in p and reserve space for extra bytes */
+	*data_token = (void *) p;
+	EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p->payload, buf_size);
+	return p->payload;
+}
+
+
+/**
+ * emac_rx_bdproc: RX buffer descriptor (packet) processing
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number to process buffer descriptors for
+ * @budget: number of packets allowed to process
+ *
+ * Processes RX buffer descriptors - checks ownership bit on the RX buffer
+ * descriptor, sends the receive packet to upper layer, allocates a new PBUF
+ * and recycles the buffer descriptor (requeues it in hardware RX queue).
+ * Only "budget" number of packets are processed and indication of pending
+ * packets provided to the caller.
+ *
+ * Returns number of packets processed (and indication of pending packets)
+ */
+static int emac_rx_bdproc(struct emac_priv *priv, rt_uint32_t ch, rt_uint32_t budget)
+{
+	unsigned long flags;
+	rt_uint32_t frame_status;
+	rt_uint32_t pkts_processed = 0;
+	char *new_buffer;
+	struct emac_rx_bd __iomem *curr_bd;
+	struct emac_rx_bd __iomem *last_bd;
+	struct emac_netpktobj *curr_pkt, pkt_obj;
+	struct emac_netbufobj buf_obj;
+	struct emac_netbufobj *rx_buf_obj;
+	void *new_buf_token;
+	struct emac_rxch *rxch = priv->rxch[ch];
+
+	if (1 == rxch->teardown_pending)
+		return 0;
+	++rxch->proc_count;
+	rt_sem_take(&priv->rx_lock, RT_WAITING_FOREVER);
+	pkt_obj.buf_list = &buf_obj;
+	curr_pkt = &pkt_obj;
+	curr_bd = rxch->active_queue_head;
+	BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+	frame_status = curr_bd->mode;
+
+	while ((curr_bd) &&
+	       ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
+	       (pkts_processed < budget)) {
+
+		new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
+					&new_buf_token, EMAC_DEF_RX_CH);
+		if (RT_NULL == new_buffer) {
+			++rxch->out_of_rx_buffers;
+			goto end_emac_rx_bdproc;
+		}
+
+		/* populate received packet data structure */
+		rx_buf_obj = &curr_pkt->buf_list[0];
+		rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
+		rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
+		rx_buf_obj->buf_token = curr_bd->buf_token;
+		curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
+		curr_pkt->num_bufs = 1;
+		curr_pkt->pkt_length =
+			(frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
+		emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
+		++rxch->processed_bd;
+		last_bd = curr_bd;
+		curr_bd = last_bd->next;
+		rxch->active_queue_head = curr_bd;
+
+		/* check if end of RX queue ? */
+		if (frame_status & EMAC_CPPI_EOQ_BIT) {
+			if (curr_bd) {
+				++rxch->mis_queued_packets;
+				emac_write(EMAC_RXHDP(ch),
+					   emac_virt_to_phys(curr_bd));
+			} else {
+				++rxch->end_of_queue;
+				rxch->queue_active = 0;
+			}
+		}
+
+		/* recycle BD */
+		emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
+				       new_buf_token);
+
+		/* return the packet to the user - BD ptr passed in
+		 * last parameter for potential *future* use */
+		rt_sem_release(&priv->rx_lock);
+		emac_net_rx_cb(priv, curr_pkt);//???
+		rt_sem_take(&priv->rx_lock, RT_WAITING_FOREVER);
+		curr_bd = rxch->active_queue_head;
+		if (curr_bd) {
+			BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+			frame_status = curr_bd->mode;
+		}
+		++pkts_processed;
+	}
+
+end_emac_rx_bdproc:
+	rt_sem_release(&priv->rx_lock);
+	return pkts_processed;
+}
+
+
+/* reception packet. */
+struct pbuf *rt_davinci_emac_rx(rt_device_t dev)
+{
+	struct emac_priv *priv = dev->user_data;
+	struct pbuf* p = RT_NULL;
+	rt_uint32_t len;
+	void *buffer;
+
+	struct pbuf* q;
+	rt_uint8_t *buf = RT_NULL;
+	unsigned int mask;
+	rt_uint32_t status = 0;
+	rt_uint32_t num_pkts = 0;
+
+	/* Check interrupt vectors and call packet processing */
+	status = emac_read(EMAC_MACINVECTOR);
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+	if (status & mask) {
+		num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
+					  EMAC_DEF_TX_MAX_SERVICE);
+	} /* TX processing */
+
+	/*if (num_pkts)
+		//return budget;
+		return RT_NULL;*/
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+	if (status & mask) {
+		num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, EMAC_DEF_RX_MAX_SERVICE);
+	} /* RX processing */
+
+	/*if (num_pkts < EMAC_DEF_RX_MAX_SERVICE) {
+		emac_int_enable(priv);
+	}*/
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+	if (status & mask) {
+		rt_uint32_t ch, cause;
+		rt_kprintf("DaVinci EMAC: Fatal Hardware Error\n");
+
+		status = emac_read(EMAC_MACSTATUS);
+		cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+			 EMAC_MACSTATUS_TXERRCODE_SHIFT);
+		if (cause) {
+			ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+			      EMAC_MACSTATUS_TXERRCH_SHIFT);
+		}
+		cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+			 EMAC_MACSTATUS_RXERRCODE_SHIFT);
+		if (cause) {
+			ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+			      EMAC_MACSTATUS_RXERRCH_SHIFT);
+		}
+	} /* Host error processing */
+
+	//return num_pkts;
+	//return p;
+	emac_int_enable(priv);
+	return RT_NULL;
+}
+
+
+/**
+ * emac_set_type0addr: Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the Device
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
+{
+	rt_uint32_t val;
+	val = ((mac_addr[5] << 8) | (mac_addr[4]));
+	emac_write(EMAC_MACSRCADDRLO, val);
+
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACSRCADDRHI, val);
+	val = emac_read(EMAC_RXUNICASTSET);
+	val |= BIT(ch);
+	emac_write(EMAC_RXUNICASTSET, val);
+	val = emac_read(EMAC_RXUNICASTCLEAR);
+	val &= ~BIT(ch);
+	emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr: Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the Device
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
+{
+	rt_uint32_t val;
+	emac_write(EMAC_MACINDEX, ch);
+	val = ((mac_addr[5] << 8) | mac_addr[4]);
+	emac_write(EMAC_MACADDRLO, val);
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACADDRHI, val);
+	emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr: Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the Device
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, rt_uint32_t ch,
+			       char *mac_addr, int index, int match)
+{
+	rt_uint32_t val;
+	emac_write(EMAC_MACINDEX, index);
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACADDRHI, val);
+	val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+	       (match << 19) | BIT(20));
+	emac_write(EMAC_MACADDRLO, val);
+	emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac: Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the Device
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
+{
+	if (priv->rx_addr_type == 0) {
+		emac_set_type0addr(priv, ch, mac_addr);
+	} else if (priv->rx_addr_type == 1) {
+		rt_uint32_t cnt;
+		for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+			emac_set_type1addr(priv, ch, mac_addr);
+	} else if (priv->rx_addr_type == 2) {
+		emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+		emac_set_type0addr(priv, ch, mac_addr);
+	} else {
+		rt_kprintf("DaVinci EMAC: Wrong addressing\n");
+	}
+}
+
+
+/** EMAC on-chip buffer descriptor memory
+ *
+ * WARNING: Please note that the on chip memory is used for both TX and RX
+ * buffer descriptor queues and is equally divided between TX and RX desc's
+ * If the number of TX or RX descriptors change this memory pointers need
+ * to be adjusted. If external memory is allocated then these pointers can
+ * pointer to the memory
+ *
+ */
+#define EMAC_TX_BD_MEM(priv)	((priv)->emac_ctrl_ram)
+#define EMAC_RX_BD_MEM(priv)	((priv)->emac_ctrl_ram + \
+				(((priv)->ctrl_ram_size) >> 1))
+
+/**
+ * emac_init_txch: TX channel initialization
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ *
+ * Called during device init to setup a TX channel (allocate buffer desc
+ * create free pool and keep ready for transmission
+ *
+ * Returns success(0) or mem alloc failures error code
+ */
+static int emac_init_txch(struct emac_priv *priv, rt_uint32_t ch)
+{
+	rt_uint32_t cnt, bd_size;
+	void __iomem *mem;
+	struct emac_tx_bd __iomem *curr_bd;
+	struct emac_txch *txch = RT_NULL;
+
+	txch = rt_malloc(sizeof(struct emac_txch));
+	if (RT_NULL == txch) {
+		rt_kprintf("DaVinci EMAC: TX Ch mem alloc failed");
+		return -RT_ENOMEM;
+	}
+	rt_memset(txch, 0, sizeof(struct emac_txch));
+	priv->txch[ch] = txch;
+	txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
+	txch->active_queue_head = RT_NULL;
+	txch->active_queue_tail = RT_NULL;
+	txch->queue_active = 0;
+	txch->teardown_pending = 0;
+
+	/* allocate memory for TX CPPI channel on a 4 byte boundry */
+	txch->tx_complete = rt_malloc(txch->service_max * sizeof(rt_uint32_t));
+	if (RT_NULL == txch->tx_complete) {
+		rt_kprintf("DaVinci EMAC: Tx service mem alloc failed");
+		rt_free(txch);
+		return -RT_ENOMEM;
+	}
+	memset(txch->tx_complete, 0, txch->service_max * sizeof(rt_uint32_t));
+
+	/* allocate buffer descriptor pool align every BD on four word
+	 * boundry for future requirements */
+	bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
+	txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
+	txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
+
+	/* alloc TX BD memory */
+	txch->bd_mem = EMAC_TX_BD_MEM(priv);
+	rt_memset((void *)txch->bd_mem, 0, txch->alloc_size);
+
+	/* initialize the BD linked list */
+	mem = (void __iomem *)
+			(((rt_uint32_t) txch->bd_mem + 0xF) & ~0xF);
+	txch->bd_pool_head = RT_NULL;
+	for (cnt = 0; cnt < txch->num_bd; cnt++) {
+		curr_bd = mem + (cnt * bd_size);
+		curr_bd->next = txch->bd_pool_head;
+		txch->bd_pool_head = curr_bd;
+	}
+
+	/* reset statistics counters */
+	txch->out_of_tx_bd = 0;
+	txch->no_active_pkts = 0;
+	txch->active_queue_count = 0;
+
+	return 0;
+}
+
+/**
+ * emac_init_rxch: RX channel initialization
+ * @priv: The DaVinci EMAC driver private structure
+ * @ch: RX channel number
+ * @param: mac address for RX channel
+ *
+ * Called during device init to setup a RX channel (allocate buffers and
+ * buffer descriptors, create queue and keep ready for reception
+ *
+ * Returns success(0) or mem alloc failures error code
+ */
+static int emac_init_rxch(struct emac_priv *priv, rt_uint32_t ch, char *param)
+{
+	rt_uint32_t cnt, bd_size;
+	void __iomem *mem;
+	struct emac_rx_bd __iomem *curr_bd;
+	struct emac_rxch *rxch = RT_NULL;
+
+	rxch = rt_malloc(sizeof(struct emac_rxch));
+	if (RT_NULL == rxch) {
+		rt_kprintf("DaVinci EMAC: RX Ch mem alloc failed");
+		return -ENOMEM;
+	}
+	rt_memset(rxch, 0, sizeof(struct emac_rxch));
+	priv->rxch[ch] = rxch;
+	rxch->buf_size = priv->rx_buf_size;
+	rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
+	rxch->queue_active = 0;
+	rxch->teardown_pending = 0;
+
+	/* save mac address */
+	for (cnt = 0; cnt < 6; cnt++)
+		rxch->mac_addr[cnt] = param[cnt];
+
+	/* allocate buffer descriptor pool align every BD on four word
+	 * boundry for future requirements */
+	bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
+	rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
+	rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
+	rxch->bd_mem = EMAC_RX_BD_MEM(priv);
+
+	rt_memset((void *)rxch->bd_mem, 0, rxch->alloc_size);
+	rxch->pkt_queue.buf_list = &rxch->buf_queue;
+
+	/* allocate RX buffer and initialize the BD linked list */
+	mem = (void __iomem *)
+			(((rt_uint32_t) rxch->bd_mem + 0xF) & ~0xF);
+	rxch->active_queue_head = RT_NULL;
+	rxch->active_queue_tail = mem;
+	for (cnt = 0; cnt < rxch->num_bd; cnt++) {
+		curr_bd = mem + (cnt * bd_size);
+		/* for future use the last parameter contains the BD ptr */
+		curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
+				    rxch->buf_size,
+				    (void **)&curr_bd->buf_token,
+				    EMAC_DEF_RX_CH);
+		if (curr_bd->data_ptr == RT_NULL) {
+			rt_kprintf("DaVinci EMAC: RX buf mem alloc " \
+				"failed for ch %d\n", ch);
+			rt_free(rxch);
+			return -RT_ENOMEM;
+		}
+
+		/* populate the hardware descriptor */
+		curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
+		curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
+		curr_bd->off_b_len = rxch->buf_size;
+		curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
+
+		/* write back to hardware memory */
+		BD_CACHE_WRITEBACK_INVALIDATE((rt_uint32_t) curr_bd,
+					      EMAC_BD_LENGTH_FOR_CACHE);
+		curr_bd->next = rxch->active_queue_head;
+		rxch->active_queue_head = curr_bd;
+	}
+
+	/* At this point rxCppi->activeQueueHead points to the first
+	   RX BD ready to be given to RX HDP and rxch->active_queue_tail
+	   points to the last RX BD
+	 */
+	return 0;
+}
+
+
+/**
+ * emac_int_disable: Disable EMAC module interrupt
+ * @priv: The DaVinci EMAC driver private structure
+ *
+ * Disable EMAC interrupt
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+	if (priv->version == EMAC_VERSION_2) {
+		unsigned long flags;
+
+		rt_interrupt_enter();
+
+		/* Program C0_Int_En to zero to turn off
+		* interrupts to the CPU */
+		emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+		/* NOTE: Rx Threshold and Misc interrupts are not disabled */
+
+		rt_interrupt_leave();
+
+	} else {
+		/* Set DM644x control registers for interrupt control */
+		emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+	}
+}
+
+
+/**
+ * emac_int_enable: Enable EMAC module interrupt
+ * @priv: The DaVinci EMAC driver private structure
+ *
+ * Enable EMAC interrupt
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+	if (priv->version == EMAC_VERSION_2) {
+		/*if (priv->int_enable)
+			priv->int_enable();*/
+
+		emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+		/* In addition to turning on interrupt Enable, we need
+		 * ack by writing appropriate values to the EOI
+		 * register */
+
+		/* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+		/* ack rxen only then a new pulse will be generated */
+		emac_write(EMAC_DM646X_MACEOIVECTOR,
+			EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+		/* ack txen- only then a new pulse will be generated */
+		emac_write(EMAC_DM646X_MACEOIVECTOR,
+			EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+	} else {
+		/* Set DM644x control registers for interrupt control */
+		emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+	}
+}
+
+
+/**
+ * emac_irq: EMAC interrupt handler
+ * @irq: interrupt number
+ * @param: EMAC isr parameters
+ *
+ */
+static void emac_irq(int irq, void *param)
+{
+	struct emac_priv *priv = param;
+
+	++priv->isr_count;
+	emac_int_disable(priv);
+	eth_device_ready(&priv->parent);
+}
+
+
+
+/**
+ * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+	rt_uint32_t ch, val, mbp_enable, mac_control;
+
+	/* Soft reset */
+	emac_write(EMAC_SOFTRESET, 1);
+	while (emac_read(EMAC_SOFTRESET));
+
+	/* Disable interrupt & Set pacing for more interrupts initially */
+	emac_int_disable(priv);
+
+	/* Full duplex enable bit set when auto negotiation happens */
+	mac_control =
+		(((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+		((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+		((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+		((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+	emac_write(EMAC_MACCONTROL, mac_control);
+
+	mbp_enable =
+		(((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+		((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+		 ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+		 ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_PROMCH_SHIFT) |
+		 ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+		 ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_BROADCH_SHIFT) |
+		 ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+		 ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_MULTICH_SHIFT));
+	emac_write(EMAC_RXMBPENABLE, mbp_enable);
+	emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+				   EMAC_RX_MAX_LEN_MASK));
+	emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+					 EMAC_RX_BUFFER_OFFSET_MASK));
+	emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+	emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+	priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+	val = emac_read(EMAC_TXCONTROL);
+	val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
+	emac_write(EMAC_TXCONTROL, val);
+	val = emac_read(EMAC_RXCONTROL);
+	val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
+	emac_write(EMAC_RXCONTROL, val);
+	emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+	for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
+		emac_write(EMAC_TXHDP(ch), 0);
+		emac_write(EMAC_TXINTMASKSET, BIT(ch));
+	}
+	for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
+		struct emac_rxch *rxch = priv->rxch[ch];
+		emac_setmac(priv, ch, rxch->mac_addr);
+		emac_write(EMAC_RXINTMASKSET, BIT(ch));
+		rxch->queue_active = 1;
+		emac_write(EMAC_RXHDP(ch),
+			   rxch->active_queue_head); /* physcal addr */
+	}
+
+	/* Enable MII */
+	val = emac_read(EMAC_MACCONTROL);
+	val |= (EMAC_MACCONTROL_GMIIEN);
+	emac_write(EMAC_MACCONTROL, val);
+
+	/* Enable interrupts */
+	emac_int_enable(priv);
+	return 0;
+
+}
+
+/**
+ * emac_dev_getnetstats: EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct emac_priv *priv)
+{
+	rt_uint32_t mac_control;
+	rt_uint32_t stats_clear_mask;
+
+	/* update emac hardware stats and reset the registers*/
+
+	mac_control = emac_read(EMAC_MACCONTROL);
+
+	if (mac_control & EMAC_MACCONTROL_GMIIEN)
+		stats_clear_mask = EMAC_STATS_CLR_MASK;
+	else
+		stats_clear_mask = 0;
+
+	priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+	emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
+
+	priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+					   emac_read(EMAC_TXSINGLECOLL) +
+					   emac_read(EMAC_TXMULTICOLL));
+	emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+	emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+	emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
+
+	priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+						emac_read(EMAC_RXJABBER) +
+						emac_read(EMAC_RXUNDERSIZED));
+	emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+	emac_write(EMAC_RXJABBER, stats_clear_mask);
+	emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
+
+	priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+					       emac_read(EMAC_RXMOFOVERRUNS));
+	emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+	emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
+
+	priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+	emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
+
+	priv->net_dev_stats.tx_carrier_errors +=
+		emac_read(EMAC_TXCARRIERSENSE);
+	emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
+
+	priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
+	emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+
+	return &priv->net_dev_stats;
+}
+
+
+/* RT-Thread Device Interface */
+/* initialize the interface */
+
+static rt_err_t rt_davinci_emac_init(rt_device_t dev)
+{
+	struct emac_priv *priv = dev->user_data;
+	unsigned long paddr;
+	rt_uint32_t ch, rc;
+	int i;
+
+	/* Configuration items */
+	priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+	/* Clear basic hardware */
+	for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
+		emac_write(EMAC_TXHDP(ch), 0);
+		emac_write(EMAC_RXHDP(ch), 0);
+		emac_write(EMAC_RXHDP(ch), 0);
+		emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
+		emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
+	}
+	priv->mac_hash1 = 0;
+	priv->mac_hash2 = 0;
+	emac_write(EMAC_MACHASH1, 0);
+	emac_write(EMAC_MACHASH2, 0);
+
+	/* multi ch not supported - open 1 TX, 1RX ch by default */
+	rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
+	if (0 != rc) {
+		rt_kprintf("DaVinci EMAC: emac_init_txch() failed");
+		return rc;
+	}
+	rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+	if (0 != rc) {
+		rt_kprintf("DaVinci EMAC: emac_init_rxch() failed");
+		return rc;
+	}
+
+	rt_hw_interrupt_install(IRQ_DM365_EMAC_RXPULSE, emac_irq, 
+							(void *)priv, "EMAC_RXPULSE");
+	rt_hw_interrupt_umask(IRQ_DM365_EMAC_RXPULSE);
+	rt_hw_interrupt_install(IRQ_DM365_EMAC_TXPULSE, emac_irq, 
+							(void *)priv, "EMAC_TXPULSE");
+	rt_hw_interrupt_umask(IRQ_DM365_EMAC_TXPULSE);
+	rt_hw_interrupt_install(IRQ_DM365_EMAC_RXTHRESH, emac_irq, 
+							(void *)priv, "EMAC_RXTHRESH");
+	rt_hw_interrupt_umask(IRQ_DM365_EMAC_RXTHRESH);
+	rt_hw_interrupt_install(IRQ_DM365_EMAC_MISCPULSE, emac_irq, 
+							(void *)priv, "EMAC_MISCPULSE");
+	rt_hw_interrupt_umask(IRQ_DM365_EMAC_MISCPULSE);
+
+	emac_mii_reset(priv);
+	davinci_emac_phy_init(dev);
+
+	/* Start/Enable EMAC hardware */
+	emac_hw_enable(priv);
+
+	rt_timer_init(&priv->timer, "link_timer", 
+		davinci_emac_update_link, 
+		(void *)priv, 
+		RT_TICK_PER_SECOND, 
+		RT_TIMER_FLAG_PERIODIC);
+
+	rt_timer_start(&priv->timer);
+	rt_kprintf("davinci emac initialized\n");
+	
+	return RT_EOK;
+}
+
+static rt_err_t rt_davinci_emac_open(rt_device_t dev, rt_uint16_t oflag)
+{
+	return RT_EOK;
+}
+
+static rt_err_t rt_davinci_emac_close(rt_device_t dev)
+{
+	return RT_EOK;
+}
+
+static rt_size_t rt_davinci_emac_read(rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size)
+{
+	rt_set_errno(-RT_ENOSYS);
+	return 0;
+}
+
+static rt_size_t rt_davinci_emac_write (rt_device_t dev, rt_off_t pos, const void* buffer, rt_size_t size)
+{
+	rt_set_errno(-RT_ENOSYS);
+	return 0;
+}
+
+static rt_err_t rt_davinci_emac_control(rt_device_t dev, rt_uint8_t cmd, void *args)
+{
+	struct emac_priv *priv = dev->user_data;
+	switch(cmd)
+	{
+	case NIOCTL_GADDR:
+		/* get mac address */
+		if(args) rt_memcpy(args, priv->mac_addr, 6);
+		else return -RT_ERROR;
+		break;
+
+	default :
+		break;
+	}
+
+	return RT_EOK;
+}
+
+void dm365_emac_gpio_init(void)
+{
+	rt_uint32_t arm_intmux;
+
+	/*
+	 * EMAC interrupts are multiplexed with GPIO interrupts
+	 * Details are available at the DM365 ARM
+	 * Subsystem Users Guide(sprufg5.pdf) pages 133 - 134
+	 */
+	arm_intmux = davinci_readl(DM365_ARM_INTMUX);
+	arm_intmux |= (1 << 14)|(1 << 15)|(1 << 16)|(1 << 17);
+	davinci_writel(arm_intmux, DM365_ARM_INTMUX);
+}
+
+void rt_hw_davinci_emac_init()
+{
+	struct emac_priv *priv = &davinci_emac_device;
+	struct clk  *emac_clk;
+	emac_clk = clk_get("EMACCLK");
+	emac_bus_frequency = clk_get_rate(emac_clk);
+	psc_change_state(DAVINCI_DM365_LPSC_CPGMAC, PSC_ENABLE);
+	dm365_emac_gpio_init();
+	rt_memset(&davinci_emac_device, 0, sizeof(davinci_emac_device));
+	davinci_emac_device.emac_base = DM365_EMAC_CNTRL_BASE;
+	davinci_emac_device.ctrl_base = DM365_EMAC_WRAP_CNTRL_BASE;
+	davinci_emac_device.ctrl_ram_size = DM365_EMAC_CNTRL_RAM_SIZE;
+	davinci_emac_device.emac_ctrl_ram = DM365_EMAC_WRAP_RAM_BASE;
+	davinci_emac_device.mdio_base = DM365_EMAC_MDIO_BASE;
+	davinci_emac_device.version = EMAC_VERSION_2;
+	davinci_emac_device.rmii_en = 0;
+	davinci_emac_device.phy_addr = 0x09;
+	rt_sem_init(&sem_ack, "tx_ack", 0, RT_IPC_FLAG_FIFO);
+	rt_sem_init(&priv->tx_lock, "tx_lock", 1, RT_IPC_FLAG_FIFO);
+	rt_sem_init(&priv->rx_lock, "rx_lock", 1, RT_IPC_FLAG_FIFO);
+
+	davinci_emac_device.mac_addr[0] = 0x00;
+	davinci_emac_device.mac_addr[1] = 0x60;
+	davinci_emac_device.mac_addr[2] = 0x6E;
+	davinci_emac_device.mac_addr[3] = 0x11;
+	davinci_emac_device.mac_addr[4] = 0x22;
+	davinci_emac_device.mac_addr[5] = 0x33;
+	
+	davinci_emac_device.parent.parent.init       = rt_davinci_emac_init;
+	davinci_emac_device.parent.parent.open       = rt_davinci_emac_open;
+	davinci_emac_device.parent.parent.close      = rt_davinci_emac_close;
+	davinci_emac_device.parent.parent.read       = rt_davinci_emac_read;
+	davinci_emac_device.parent.parent.write      = rt_davinci_emac_write;
+	davinci_emac_device.parent.parent.control    = rt_davinci_emac_control;
+	davinci_emac_device.parent.parent.user_data  = &davinci_emac_device;
+
+	davinci_emac_device.parent.eth_rx     = rt_davinci_emac_rx;
+	davinci_emac_device.parent.eth_tx     = rt_davinci_emac_tx;
+
+	eth_device_init(&(davinci_emac_device.parent), "e0");
+	
+}
+
+
+#ifdef RT_USING_FINSH
+#include <finsh.h>
+
+void dump_emac_stats(void)
+{
+	int i;
+	struct emac_priv *emac;
+	struct net_device_stats *stats;
+	rt_device_t dev = rt_device_find("e0");
+
+	if(dev == RT_NULL)
+		return;
+
+	emac = (struct emac_priv *)dev->user_data;
+
+	stats = emac_dev_getnetstats(emac);
+	rt_kprintf("rx_packets      = %d\n"
+				"tx_packets      = %d\n"
+				"rx_bytes        = %d\n"
+				"tx_bytes        = %d\n"
+				"rx_errors       = %d\n"
+				"tx_errors       = %d\n"
+				"rx_dropped      = %d\n"
+				"tx_dropped      = %d\n"
+				"multicast       = %d\n"
+				"collisions      = %d\n",
+		stats->rx_packets,
+		stats->tx_packets,
+		stats->rx_bytes,
+		stats->tx_bytes,
+		stats->rx_errors,
+		stats->tx_errors,
+		stats->rx_dropped,
+		stats->tx_dropped,
+		stats->multicast,
+		stats->collisions);
+
+	rt_kprintf("rx_length_errors = %d\n"
+				"rx_over_errors   = %d\n"
+				"rx_crc_errors    = %d\n"
+				"rx_frame_errors  = %d\n"
+				"rx_fifo_errors   = %d\n"
+				"rx_missed_errors = %d\n",
+		stats->rx_length_errors,
+		stats->rx_over_errors,
+		stats->rx_crc_errors,
+		stats->rx_frame_errors,
+		stats->rx_fifo_errors,
+		stats->rx_missed_errors);
+
+	rt_kprintf("tx_aborted_errors   = %d\n"
+				"tx_carrier_errors   = %d\n"
+				"tx_fifo_errors      = %d\n"
+				"tx_heartbeat_errors = %d\n"
+				"tx_window_errors    = %d\n",
+		stats->tx_aborted_errors,
+		stats->tx_carrier_errors,
+		stats->tx_fifo_errors,
+		stats->tx_heartbeat_errors,
+		stats->tx_window_errors);
+
+	rt_kprintf("rx_compressed  = %d\n"
+				"tx_compressed  = %d\n",
+		stats->rx_compressed,
+		stats->tx_compressed);
+
+	rt_kprintf("\n");
+}
+
+FINSH_FUNCTION_EXPORT(dump_emac_stats, dump emac statistics);
+
+#ifdef FINSH_USING_MSH
+int cmd_dump_emac_stats(int argc, char** argv)
+{
+	dump_emac_stats();
+	return 0;
+}
+FINSH_FUNCTION_EXPORT_ALIAS(cmd_dump_emac_stats, __cmd_dump_emac_stats, dump emac statistics.);
+#endif
+
+#endif

+ 467 - 0
bsp/dm365/drivers/davinci_emac.h

@@ -0,0 +1,467 @@
+/*
+ * File      : davinci_emac.h
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2013-01-30     weety      first version
+ */
+#ifndef _DAVINCI_EMAC_H
+#define _DAVINCI_EMAC_H
+#include <mii.h>
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+enum {
+	EMAC_VERSION_1,	/* DM644x */
+	EMAC_VERSION_2,	/* DM646x */
+};
+
+
+#define __iomem
+
+#define BIT(nr)			(1UL << (nr))
+
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC		(0) /* Do not pass CRC upto frames */
+#define EMAC_DEF_QOS_EN			(0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN		(0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN	(0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN		(0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN		(0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN		(0) /* Promiscous disabled */
+#define EMAC_DEF_PROM_CH		(0) /* Promiscous channel is 0 */
+#define EMAC_DEF_BCAST_EN		(1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH		(0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN		(1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH		(0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED		(1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN		(0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET		(0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE		(60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE		(1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH			(0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH			(0) /* Default 0th channel */
+#define EMAC_DEF_MDIO_TICK_MS		(10) /* typically 1 tick=1 ms) */
+#define EMAC_DEF_MAX_TX_CH		(1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH		(1) /* Max RX channels configured */
+#define EMAC_POLL_WEIGHT		(64) /* Default NAPI poll weight */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE		(32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE		(64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE	(0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS		(64)
+#define EMAC_TEARDOWN_VALUE		(0xFFFFFFFC)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL	(0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL	(0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL	(0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL	(0xFF)
+#define EMAC_INT_MASK_CLEAR		(0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK		BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK		BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK		BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK		BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK		BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK		BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK		BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT		(16)
+#define EMAC_RXMBP_PROMCH_MASK		(0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK		BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT	(8)
+#define EMAC_RXMBP_BROADCH_MASK		(0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK		BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT	(0)
+#define EMAC_RXMBP_MULTICH_MASK		(0x7)
+#define EMAC_RXMBP_CHMASK		(0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC		(0x00200000)
+# define EMAC_MBP_PROMISCCH(ch)		(((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST		(0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch)		(((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST		(0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch)		((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE		BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN	BIT(6)
+#define EMAC_MACCONTROL_GMIIEN		BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN	BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN	BIT(0)
+#define EMAC_MACCONTROL_RMIISPEED_MASK	BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GIG	BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE	BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK	(0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT	(20)
+#define EMAC_MACSTATUS_TXERRCH_MASK	(0x7)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT	(16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK	(0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT	(12)
+#define EMAC_MACSTATUS_RXERRCH_MASK	(0x7)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT	(8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK		(0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK	(0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT	BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT	BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC	BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC	BIT(0)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC	BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC	BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT	BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT	BIT(27)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT		BIT(31)
+#define EMAC_CPPI_EOP_BIT		BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT		BIT(29)
+#define EMAC_CPPI_EOQ_BIT		BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT		BIT(26)
+#define EMAC_RX_BD_BUF_SIZE		(0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE	(16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK	(0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS		 (8)  /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_TXIDVER		0x0
+#define EMAC_TXCONTROL		0x4
+#define EMAC_TXTEARDOWN		0x8
+#define EMAC_RXIDVER		0x10
+#define EMAC_RXCONTROL		0x14
+#define EMAC_RXTEARDOWN		0x18
+#define EMAC_TXINTSTATRAW	0x80
+#define EMAC_TXINTSTATMASKED	0x84
+#define EMAC_TXINTMASKSET	0x88
+#define EMAC_TXINTMASKCLEAR	0x8C
+#define EMAC_MACINVECTOR	0x90
+
+#define EMAC_DM646X_MACEOIVECTOR	0x94
+
+#define EMAC_RXINTSTATRAW	0xA0
+#define EMAC_RXINTSTATMASKED	0xA4
+#define EMAC_RXINTMASKSET	0xA8
+#define EMAC_RXINTMASKCLEAR	0xAC
+#define EMAC_MACINTSTATRAW	0xB0
+#define EMAC_MACINTSTATMASKED	0xB4
+#define EMAC_MACINTMASKSET	0xB8
+#define EMAC_MACINTMASKCLEAR	0xBC
+
+#define EMAC_RXMBPENABLE	0x100
+#define EMAC_RXUNICASTSET	0x104
+#define EMAC_RXUNICASTCLEAR	0x108
+#define EMAC_RXMAXLEN		0x10C
+#define EMAC_RXBUFFEROFFSET	0x110
+#define EMAC_RXFILTERLOWTHRESH	0x114
+
+#define EMAC_MACCONTROL		0x160
+#define EMAC_MACSTATUS		0x164
+#define EMAC_EMCONTROL		0x168
+#define EMAC_FIFOCONTROL	0x16C
+#define EMAC_MACCONFIG		0x170
+#define EMAC_SOFTRESET		0x174
+#define EMAC_MACSRCADDRLO	0x1D0
+#define EMAC_MACSRCADDRHI	0x1D4
+#define EMAC_MACHASH1		0x1D8
+#define EMAC_MACHASH2		0x1DC
+#define EMAC_MACADDRLO		0x500
+#define EMAC_MACADDRHI		0x504
+#define EMAC_MACINDEX		0x508
+
+/* EMAC HDP and Completion registors */
+#define EMAC_TXHDP(ch)		(0x600 + (ch * 4))
+#define EMAC_RXHDP(ch)		(0x620 + (ch * 4))
+#define EMAC_TXCP(ch)		(0x640 + (ch * 4))
+#define EMAC_RXCP(ch)		(0x660 + (ch * 4))
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES	0x200
+#define EMAC_RXBCASTFRAMES	0x204
+#define EMAC_RXMCASTFRAMES	0x208
+#define EMAC_RXPAUSEFRAMES	0x20C
+#define EMAC_RXCRCERRORS	0x210
+#define EMAC_RXALIGNCODEERRORS	0x214
+#define EMAC_RXOVERSIZED	0x218
+#define EMAC_RXJABBER		0x21C
+#define EMAC_RXUNDERSIZED	0x220
+#define EMAC_RXFRAGMENTS	0x224
+#define EMAC_RXFILTERED		0x228
+#define EMAC_RXQOSFILTERED	0x22C
+#define EMAC_RXOCTETS		0x230
+#define EMAC_TXGOODFRAMES	0x234
+#define EMAC_TXBCASTFRAMES	0x238
+#define EMAC_TXMCASTFRAMES	0x23C
+#define EMAC_TXPAUSEFRAMES	0x240
+#define EMAC_TXDEFERRED		0x244
+#define EMAC_TXCOLLISION	0x248
+#define EMAC_TXSINGLECOLL	0x24C
+#define EMAC_TXMULTICOLL	0x250
+#define EMAC_TXEXCESSIVECOLL	0x254
+#define EMAC_TXLATECOLL		0x258
+#define EMAC_TXUNDERRUN		0x25C
+#define EMAC_TXCARRIERSENSE	0x260
+#define EMAC_TXOCTETS		0x264
+#define EMAC_NETOCTETS		0x280
+#define EMAC_RXSOFOVERRUNS	0x284
+#define EMAC_RXMOFOVERRUNS	0x288
+#define EMAC_RXDMAOVERRUNS	0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL		(0x4)
+#define EMAC_CTRL_EWINTTCNT	(0x8)
+
+/* EMAC MDIO related */
+/* Mask & Control defines */
+#define MDIO_CONTROL_CLKDIV	(0xFF)
+#define MDIO_CONTROL_ENABLE	BIT(30)
+#define MDIO_USERACCESS_GO	BIT(31)
+#define MDIO_USERACCESS_WRITE	BIT(30)
+#define MDIO_USERACCESS_READ	(0)
+#define MDIO_USERACCESS_REGADR	(0x1F << 21)
+#define MDIO_USERACCESS_PHYADR	(0x1F << 16)
+#define MDIO_USERACCESS_DATA	(0xFFFF)
+#define MDIO_USERPHYSEL_LINKSEL	BIT(7)
+#define MDIO_VER_MODID		(0xFFFF << 16)
+#define MDIO_VER_REVMAJ		(0xFF   << 8)
+#define MDIO_VER_REVMIN		(0xFF)
+
+#define MDIO_USERACCESS(inst)	(0x80 + (inst * 8))
+#define MDIO_USERPHYSEL(inst)	(0x84 + (inst * 8))
+#define MDIO_CONTROL		(0x04)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMRXINTEN	(0x14)
+#define EMAC_DM646X_CMTXINTEN	(0x18)
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN	(0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN	(0x02)
+
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK    (0xFFFFFFFF)
+
+/** net_buf_obj: EMAC network bufferdata structure
+ *
+ * EMAC network buffer data structure
+ */
+struct emac_netbufobj {
+	void *buf_token;
+	char *data_ptr;
+	int length;
+};
+
+/** net_pkt_obj: EMAC network packet data structure
+ *
+ * EMAC network packet data structure - supports buffer list (for future)
+ */
+struct emac_netpktobj {
+	void *pkt_token; /* data token may hold tx/rx chan id */
+	struct emac_netbufobj *buf_list; /* array of network buffer objects */
+	int num_bufs;
+	int pkt_length;
+};
+
+/** emac_tx_bd: EMAC TX Buffer descriptor data structure
+ *
+ * EMAC TX Buffer descriptor data structure
+ */
+struct emac_tx_bd {
+	int h_next;
+	int buff_ptr;
+	int off_b_len;
+	int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
+	struct emac_tx_bd __iomem *next;
+	void *buf_token;
+};
+
+/** emac_txch: EMAC TX Channel data structure
+ *
+ * EMAC TX Channel data structure
+ */
+struct emac_txch {
+	/* Config related */
+	rt_uint32_t num_bd;
+	rt_uint32_t service_max;
+
+	/* CPPI specific */
+	rt_uint32_t alloc_size;
+	void __iomem *bd_mem;
+	struct emac_tx_bd __iomem *bd_pool_head;
+	struct emac_tx_bd __iomem *active_queue_head;
+	struct emac_tx_bd __iomem *active_queue_tail;
+	struct emac_tx_bd __iomem *last_hw_bdprocessed;
+	rt_uint32_t queue_active;
+	rt_uint32_t teardown_pending;
+	rt_uint32_t *tx_complete;
+
+	/** statistics */
+	rt_uint32_t proc_count;     /* TX: # of times emac_tx_bdproc is called */
+	rt_uint32_t mis_queued_packets;
+	rt_uint32_t queue_reinit;
+	rt_uint32_t end_of_queue_add;
+	rt_uint32_t out_of_tx_bd;
+	rt_uint32_t no_active_pkts; /* IRQ when there were no packets to process */
+	rt_uint32_t active_queue_count;
+};
+
+/** emac_rx_bd: EMAC RX Buffer descriptor data structure
+ *
+ * EMAC RX Buffer descriptor data structure
+ */
+struct emac_rx_bd {
+	int h_next;
+	int buff_ptr;
+	int off_b_len;
+	int mode;
+	struct emac_rx_bd __iomem *next;
+	void *data_ptr;
+	void *buf_token;
+};
+
+/** emac_rxch: EMAC RX Channel data structure
+ *
+ * EMAC RX Channel data structure
+ */
+struct emac_rxch {
+	/* configuration info */
+	rt_uint32_t num_bd;
+	rt_uint32_t service_max;
+	rt_uint32_t buf_size;
+	char mac_addr[6];
+
+	/** CPPI specific */
+	rt_uint32_t alloc_size;
+	void __iomem *bd_mem;
+	struct emac_rx_bd __iomem *bd_pool_head;
+	struct emac_rx_bd __iomem *active_queue_head;
+	struct emac_rx_bd __iomem *active_queue_tail;
+	rt_uint32_t queue_active;
+	rt_uint32_t teardown_pending;
+
+	/* packet and buffer objects */
+	struct emac_netpktobj pkt_queue;
+	struct emac_netbufobj buf_queue;
+
+	/** statistics */
+	rt_uint32_t proc_count; /* number of times emac_rx_bdproc is called */
+	rt_uint32_t processed_bd;
+	rt_uint32_t recycled_bd;
+	rt_uint32_t out_of_rx_bd;
+	rt_uint32_t out_of_rx_buffers;
+	rt_uint32_t queue_reinit;
+	rt_uint32_t end_of_queue_add;
+	rt_uint32_t end_of_queue;
+	rt_uint32_t mis_queued_packets;
+};
+
+struct net_device_stats
+{
+	unsigned long	rx_packets;		/* total packets received	*/
+	unsigned long	tx_packets;		/* total packets transmitted	*/
+	unsigned long	rx_bytes;		/* total bytes received 	*/
+	unsigned long	tx_bytes;		/* total bytes transmitted	*/
+	unsigned long	rx_errors;		/* bad packets received		*/
+	unsigned long	tx_errors;		/* packet transmit problems	*/
+	unsigned long	rx_dropped;		/* no space in linux buffers	*/
+	unsigned long	tx_dropped;		/* no space available in linux	*/
+	unsigned long	multicast;		/* multicast packets received	*/
+	unsigned long	collisions;
+
+	/* detailed rx_errors: */
+	unsigned long	rx_length_errors;
+	unsigned long	rx_over_errors;		/* receiver ring buff overflow	*/
+	unsigned long	rx_crc_errors;		/* recved pkt with crc error	*/
+	unsigned long	rx_frame_errors;	/* recv'd frame alignment error */
+	unsigned long	rx_fifo_errors;		/* recv'r fifo overrun		*/
+	unsigned long	rx_missed_errors;	/* receiver missed packet	*/
+
+	/* detailed tx_errors */
+	unsigned long	tx_aborted_errors;
+	unsigned long	tx_carrier_errors;
+	unsigned long	tx_fifo_errors;
+	unsigned long	tx_heartbeat_errors;
+	unsigned long	tx_window_errors;
+	
+	/* for cslip etc */
+	unsigned long	rx_compressed;
+	unsigned long	tx_compressed;
+};
+
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+#define MAX_ADDR_LEN 6
+
+struct emac_priv {
+	/* inherit from ethernet device */
+	struct eth_device parent;
+
+	/* interface address info. */
+	rt_uint8_t  mac_addr[MAX_ADDR_LEN];		/* hw address	*/
+	unsigned short		phy_addr;
+	
+	struct rt_semaphore tx_lock;
+	struct rt_semaphore rx_lock;
+	void __iomem *remap_addr;
+	rt_uint32_t emac_base_phys;
+	void __iomem *emac_base;
+	void __iomem *ctrl_base;
+	void __iomem *emac_ctrl_ram;
+	void __iomem *mdio_base;
+	rt_uint32_t ctrl_ram_size;
+	rt_uint32_t hw_ram_addr;
+	struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
+	struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
+	rt_uint32_t link; /* 1=link on, 0=link off */
+	rt_uint32_t speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+	rt_uint32_t duplex; /* Link duplex: 0=Half, 1=Full */
+	rt_uint32_t rx_buf_size;
+	rt_uint32_t isr_count;
+	rt_uint8_t rmii_en;
+	rt_uint8_t version;
+	struct net_device_stats net_dev_stats;
+	rt_uint32_t mac_hash1;
+	rt_uint32_t mac_hash2;
+	rt_uint32_t multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+	rt_uint32_t rx_addr_type;
+	/* periodic timer required for MDIO polling */
+	struct rt_timer  timer;
+	rt_uint32_t periodic_ticks;
+	rt_uint32_t timer_active;
+	rt_uint32_t phy_mask;
+	/* mii_bus,phy members */
+	struct rt_semaphore lock;
+};
+
+
+#endif /* _DAVINCI_EMAC_H */
+

+ 268 - 0
bsp/dm365/drivers/davinci_serial.c

@@ -0,0 +1,268 @@
+#include <rtthread.h>
+#include <rthw.h>
+#include <dm36x.h>
+#include <rtdevice.h>
+
+static struct rt_serial_device davinci_serial_dev0;
+
+static struct rt_serial_device davinci_serial_dev1;
+
+
+#define LSR_DR		0x01		/* Data ready */
+#define LSR_THRE	0x20		/* Xmit holding register empty */
+//#define	USTAT_TXB_EMPTY		0x02   	/* tx buffer empty */
+#define BPS					115200	/* serial baudrate */
+
+typedef struct uartport
+{
+	volatile rt_uint32_t rbr;
+	volatile rt_uint32_t ier;
+	volatile rt_uint32_t fcr;
+	volatile rt_uint32_t lcr;
+	volatile rt_uint32_t mcr;
+	volatile rt_uint32_t lsr;
+	volatile rt_uint32_t msr;
+	volatile rt_uint32_t scr;
+	volatile rt_uint32_t dll;
+	volatile rt_uint32_t dlh;
+	
+	volatile rt_uint32_t res[2];
+	volatile rt_uint32_t pwremu_mgmt;
+	volatile rt_uint32_t mdr;
+}uartport;
+
+#define thr rbr
+#define iir fcr
+
+#define UART0	((struct uartport *)DAVINCI_UART0_BASE)
+
+#define UART1	((struct uartport *)DM365_UART1_BASE)
+
+
+/**
+ * This function will handle serial
+ */
+void rt_davinci_serial_handler(int vector, void *param)
+{
+	struct rt_serial_device *dev = (struct rt_serial_device *)param;
+	rt_hw_serial_isr(dev, RT_SERIAL_EVENT_RX_IND);
+}
+
+/**
+* UART device in RT-Thread
+*/
+static rt_err_t davinci_uart_configure(struct rt_serial_device *serial,
+                                struct serial_configure *cfg)
+{
+    return RT_EOK;
+}
+
+static rt_err_t davinci_uart_control(struct rt_serial_device *serial,
+                              int cmd, void *arg)
+{
+	uartport *uart = serial->parent.user_data;
+
+    switch (cmd)
+    {
+    case RT_DEVICE_CTRL_CLR_INT:
+        /* disable rx irq */
+		if (uart == UART0)
+			rt_hw_interrupt_mask(IRQ_UARTINT0);
+		else if (uart == UART1)
+			rt_hw_interrupt_mask(IRQ_UARTINT1);
+        break;
+    case RT_DEVICE_CTRL_SET_INT:
+        /* enable rx irq */
+		if (uart == UART0)
+			rt_hw_interrupt_umask(IRQ_UARTINT0);
+		else if (uart == UART1)
+			rt_hw_interrupt_umask(IRQ_UARTINT1);
+        break;
+    }
+
+    return RT_EOK;
+}
+
+static int davinci_uart_putc(struct rt_serial_device *serial, char c)
+{
+    rt_uint32_t level;
+	uartport *uart = serial->parent.user_data;
+
+    while (!(uart->lsr & LSR_THRE));
+	uart->thr = c;
+
+    return 1;
+}
+
+static int davinci_uart_getc(struct rt_serial_device *serial)
+{
+    int result;
+	uartport *uart = serial->parent.user_data;
+
+    if (uart->lsr & LSR_DR)
+	{
+		result = uart->rbr & 0xff;
+	}
+	else
+	{
+		result = -1;
+	}
+
+    return result;
+}
+
+static const struct rt_uart_ops davinci_uart_ops =
+{
+    davinci_uart_configure,
+    davinci_uart_control,
+    davinci_uart_putc,
+    davinci_uart_getc,
+};
+
+void davinci_uart0_init(void)
+{
+	rt_uint32_t divisor;
+
+	divisor = (24000000 + (115200 * (16 / 2))) / (16 * 115200);
+	UART0->ier = 0;
+	UART0->lcr = 0x83; //8N1
+	UART0->dll = 0;
+	UART0->dlh = 0;
+	UART0->lcr = 0x03;
+	UART0->mcr = 0x03; //RTS,CTS
+	UART0->fcr = 0x07; //FIFO
+	UART0->lcr = 0x83;
+	UART0->dll = divisor & 0xff;
+	UART0->dlh = (divisor >> 8) & 0xff;
+	UART0->lcr = 0x03;
+	UART0->mdr = 0; //16x over-sampling
+	UART0->pwremu_mgmt = 0x6000;
+	rt_hw_interrupt_install(IRQ_UARTINT0, rt_davinci_serial_handler, 
+							(void *)&davinci_serial_dev0, "UART0");
+	rt_hw_interrupt_mask(IRQ_UARTINT0);
+	UART0->ier = 0x05;
+}
+
+void davinci_uart_gpio_init()
+{
+	rt_uint32_t val;
+
+	val = davinci_readl(PINMUX3);
+	val &= 0xf3ffffff; /* gio23 RS485_CTRL */
+	val |= 0x60000000; /*UART1_TXD (gio25)*/
+	davinci_writel(val, PINMUX3);
+	val = davinci_readl(PINMUX4);
+	val |= 0x0000c000; /* UART1_RXD (gio34) */
+	davinci_writel(val, PINMUX4);
+
+	val = davinci_readl(DAVINCI_GPIO_BASE + 0x10);
+	val &= ~(1 << 23);
+	davinci_writel(val, DAVINCI_GPIO_BASE + 0x10);
+	davinci_writel((1<<23), DAVINCI_GPIO_BASE + 0x1C);
+}
+
+void davinci_uart1_init(void)
+{
+	rt_uint32_t divisor;
+	rt_uint32_t freq;
+	rt_uint32_t baudrate;
+	struct clk *clk;
+
+	davinci_uart_gpio_init();
+	psc_change_state(DAVINCI_DM365_LPSC_UART1, PSC_ENABLE);
+	clk = clk_get("UART1");
+	freq = clk_get_rate(clk);
+
+	baudrate = 9600;
+	divisor = (freq + (baudrate * (16 / 2))) / (16 * baudrate);
+	UART1->ier = 0;
+	UART1->lcr = 0x87; //8N2, 0x83 8N1
+	UART1->dll = 0;
+	UART1->dlh = 0;
+	UART1->lcr = 0x07;
+	UART1->mcr = 0x03; //RTS,CTS
+	UART1->fcr = 0x07; //FIFO
+	UART1->lcr = 0x87;
+	UART1->dll = divisor & 0xff;
+	UART1->dlh = (divisor >> 8) & 0xff;
+	UART1->lcr = 0x07;
+	UART1->mdr = 0; //16x over-sampling
+	UART1->pwremu_mgmt = 0x6000;
+	
+	rt_hw_interrupt_install(IRQ_UARTINT1, rt_davinci_serial_handler, 
+							(void *)&davinci_serial_dev1, "UART1");
+	rt_hw_interrupt_mask(IRQ_UARTINT1);
+	UART1->ier = 0x05;
+}
+
+
+/**
+ * This function will handle init uart
+ */
+void rt_hw_uart_init(void)
+{
+	davinci_serial_dev0.ops = &davinci_uart_ops;
+    //davinci_serial_dev0.config = RT_SERIAL_CONFIG_DEFAULT;
+	davinci_serial_dev0.config.baud_rate = BAUD_RATE_115200;
+    davinci_serial_dev0.config.bit_order = BIT_ORDER_LSB;
+    davinci_serial_dev0.config.data_bits = DATA_BITS_8;
+    davinci_serial_dev0.config.parity = PARITY_NONE;
+    davinci_serial_dev0.config.stop_bits = STOP_BITS_1;
+    davinci_serial_dev0.config.invert = NRZ_NORMAL;
+	davinci_serial_dev0.config.bufsz = RT_SERIAL_RB_BUFSZ;
+
+    /* register vcom device */
+    rt_hw_serial_register(&davinci_serial_dev0, "uart0",
+                          RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX | RT_DEVICE_FLAG_STREAM,
+                          UART0);
+	davinci_uart0_init();
+
+	davinci_serial_dev1.ops = &davinci_uart_ops;
+    //davinci_serial_dev1.config = RT_SERIAL_CONFIG_DEFAULT;
+	davinci_serial_dev1.config.baud_rate = BAUD_RATE_115200;
+    davinci_serial_dev1.config.bit_order = BIT_ORDER_LSB;
+    davinci_serial_dev1.config.data_bits = DATA_BITS_8;
+    davinci_serial_dev1.config.parity = PARITY_NONE;
+    davinci_serial_dev1.config.stop_bits = STOP_BITS_1;
+    davinci_serial_dev1.config.invert = NRZ_NORMAL;
+	davinci_serial_dev1.config.bufsz = RT_SERIAL_RB_BUFSZ;
+
+    /* register vcom device */
+    rt_hw_serial_register(&davinci_serial_dev1, "uart1",
+                          RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX | RT_DEVICE_FLAG_STREAM,
+                          UART1);
+	davinci_uart1_init();
+
+}
+
+#include <fcntl.h>
+void read_485(void)
+{
+	char value;
+	int ret;
+	int uart1_fd = open("/dev/uart1", O_RDWR, 0777);
+
+	if (uart1_fd < 0)
+	{
+		rt_kprintf("open uart1 error\n");
+	}
+
+	while (1)
+	{
+		ret = read(uart1_fd, &value, 1);
+		if (ret == 1)
+		{
+			rt_kprintf("0x%02x\n", value);
+		}
+	}
+	close(uart1_fd);
+}
+
+#ifdef RT_USING_FINSH
+#include <finsh.h>
+FINSH_FUNCTION_EXPORT(read_485, read 485 data);
+
+#endif
+
+
+

+ 152 - 0
bsp/dm365/drivers/gpio.c

@@ -0,0 +1,152 @@
+#include <rtthread.h>
+#include "gpio.h"
+
+#define GPIO0_BASE			(DAVINCI_GPIO_BASE + 0x10)
+#define GPIO1_BASE			(DAVINCI_GPIO_BASE + 0x38)
+#define GPIO2_BASE			(DAVINCI_GPIO_BASE + 0x60)
+#define GPIO3_BASE			(DAVINCI_GPIO_BASE + 0x88)
+
+
+static unsigned int dm365_gpio_base = (unsigned int)GPIO0_BASE;
+
+#define GPIO_OE			(dm365_gpio_base + 0x00)
+#define GPIO_DATAIN		(dm365_gpio_base + 0x10)
+#define GPIO_DATAOUT		(dm365_gpio_base + 0x04)
+#define GPIO_CLROUT		(dm365_gpio_base + 0x0C)
+#define GPIO_SETOUT		(dm365_gpio_base + 0x08)
+
+#define gpio_dirin(n)		*(volatile unsigned int *)((GPIO_OE)) |= 1<<(n)
+#define gpio_dirout(n)	*(volatile unsigned int *)((GPIO_OE)) &= ~(1u<<(n))
+#define gpio_set(n)		*(volatile unsigned int *)((GPIO_SETOUT)) = 1<<(n)
+#define gpio_clr(n)		*(volatile unsigned int *)((GPIO_CLROUT)) = 1<<(n)
+#define gpio_get(n)		( ( *(volatile unsigned int *)((GPIO_DATAIN)) & (1<<(n)) ) ? 1 : 0 )
+
+ #define GPIO_GRP_MASK (5)
+ 
+static int gpio_to_base(unsigned int gpio)
+{
+	unsigned int grp_idx;
+	int ret;
+
+	grp_idx = gpio >> GPIO_GRP_MASK;  
+
+	switch (grp_idx) {
+		case 0:
+			dm365_gpio_base = (unsigned int)GPIO0_BASE;
+			ret = 0;
+			break;
+		case 1:
+			dm365_gpio_base = (unsigned int)GPIO1_BASE;
+			ret = 0;
+			break;
+		case 2:
+			dm365_gpio_base = (unsigned int)GPIO2_BASE;
+			ret = 0;
+			break;    
+		case 3:
+			dm365_gpio_base = (unsigned int)GPIO3_BASE;
+			ret = 0;
+			break;   
+		default:
+			ret =-RT_EIO;
+			break;      
+	}
+	return ret;
+}
+
+
+int gpio_direction_input(unsigned int gpio)
+{
+	unsigned int offset;
+	int ret=0;
+
+	rt_ubase_t temp = rt_hw_interrupt_disable();
+	ret = gpio_to_base(gpio); 
+	if (ret < 0) {
+		goto gpio_free;
+	}
+	offset =  gpio & ((1 << GPIO_GRP_MASK) -1);
+
+	gpio_dirin(offset);
+
+gpio_free:
+	rt_hw_interrupt_enable(temp);
+
+	return ret;
+}
+
+int gpio_direction_output(unsigned int gpio, int value)
+{
+	unsigned int offset;
+	int ret=0;
+
+	rt_ubase_t temp = rt_hw_interrupt_disable();
+	ret = gpio_to_base(gpio); 
+	if (ret < 0) {
+		goto gpio_free;
+	}
+
+	offset =  gpio & ((1 << GPIO_GRP_MASK) -1);
+
+	if (value) {
+		gpio_set(offset);
+	}
+	else {
+		gpio_clr(offset);
+	}
+
+	gpio_dirout(offset);
+
+gpio_free:
+	rt_hw_interrupt_enable(temp);
+
+	return ret;
+}
+
+int  gpio_set_value(unsigned int gpio, int value)
+{
+	unsigned int offset;
+	int ret=0;
+
+	rt_ubase_t temp = rt_hw_interrupt_disable();
+	ret = gpio_to_base(gpio); 
+	if (ret < 0) {
+		goto gpio_free;
+	}
+
+	offset =  gpio & ((1 << GPIO_GRP_MASK) -1);
+
+	if (value) {
+		gpio_set(offset);
+	}
+	else {
+		gpio_clr(offset);
+	}
+
+gpio_free:
+	rt_hw_interrupt_enable(temp);
+
+	return ret;
+}
+
+int gpio_get_value(unsigned int gpio)
+{
+	unsigned int offset;
+	int ret=0;
+
+	rt_ubase_t temp = rt_hw_interrupt_disable();
+	ret = gpio_to_base(gpio); 
+	if (ret < 0) {
+		goto gpio_free;
+	}
+
+	offset =  gpio & ((1 << GPIO_GRP_MASK) -1);
+	ret = gpio_get(offset);
+
+gpio_free:
+	rt_hw_interrupt_enable(temp);
+
+	return ret;
+}
+
+

+ 61 - 0
bsp/dm365/drivers/gpio.h

@@ -0,0 +1,61 @@
+/*
+ * TI DaVinci GPIO Support
+ *
+ * Copyright (c) 2006 David Brownell
+ * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef	__DM365_GPIO_H
+#define	__DM365_GPIO_H
+#include <dm36x.h>
+
+#define	GPIO(X)		(X)		
+
+
+#define get_io(r)			*((volatile u_int *)(TI81XX_L4_SLOW_IO_ADDRESS(r)))
+#define set_io(r,v)		*((volatile u_int *)(TI81XX_L4_SLOW_IO_ADDRESS(r))) = (v)
+#define and_io(r,v)		*((volatile u_int *)(TI81XX_L4_SLOW_IO_ADDRESS(r))) &= (v)
+#define or_io(r,v)			*((volatile u_int *)(TI81XX_L4_SLOW_IO_ADDRESS(r))) |= (v)
+
+#define v_get_io(r)			*((volatile u_int *)(r))
+#define v_set_io(r,v)		*((volatile u_int *)(r)) = (v)
+#define v_and_io(r,v)		*((volatile u_int *)(r)) &= (v)
+#define v_or_io(r,v)			*((volatile u_int *)(r)) |= (v)
+
+enum gpio_intr_mode
+{
+    LEVELDETECT_LOW = 0,
+    LEVELDETECT_HIGH,
+    RISINGDETECT,  
+    FALLINGDETECT,
+    EDGEDETECT	//both rising-edge and falling-edge detect
+};
+
+enum gpio_intr_req
+{
+    INTR_REQ_A = 0,
+    INTR_REQ_B
+};
+
+enum gpio_intr_num
+{
+	GPIOINT0A = 96,
+	GPIOINT0B,
+	GPIOINT1A,
+	GPIOINT1B, 
+};
+
+enum pin_func_mod
+{
+    GPIO_MOD = 0x80,
+    SPI_MOD =0x01,
+    VP_MOD=0x04,
+    IIC_MOD=0x20
+};
+
+#endif				/* __TI814X_GPIO_H */

+ 626 - 0
bsp/dm365/drivers/i2c-davinci.c

@@ -0,0 +1,626 @@
+#include <rtthread.h>
+#include <drivers/i2c.h>
+#include <dm36x.h>
+
+/* ----- global defines ----------------------------------------------- */
+#define BIT(nr)			(1UL << (nr))
+
+#define DAVINCI_I2C_TIMEOUT	(1*RT_TICK_PER_SECOND)
+#define DAVINCI_I2C_MAX_TRIES	2
+#define I2C_DAVINCI_INTR_ALL    (DAVINCI_I2C_IMR_AAS | \
+				 DAVINCI_I2C_IMR_SCD | \
+				 DAVINCI_I2C_IMR_ARDY | \
+				 DAVINCI_I2C_IMR_NACK | \
+				 DAVINCI_I2C_IMR_AL)
+
+#define DAVINCI_I2C_OAR_REG	0x00
+#define DAVINCI_I2C_IMR_REG	0x04
+#define DAVINCI_I2C_STR_REG	0x08
+#define DAVINCI_I2C_CLKL_REG	0x0c
+#define DAVINCI_I2C_CLKH_REG	0x10
+#define DAVINCI_I2C_CNT_REG	0x14
+#define DAVINCI_I2C_DRR_REG	0x18
+#define DAVINCI_I2C_SAR_REG	0x1c
+#define DAVINCI_I2C_DXR_REG	0x20
+#define DAVINCI_I2C_MDR_REG	0x24
+#define DAVINCI_I2C_IVR_REG	0x28
+#define DAVINCI_I2C_EMDR_REG	0x2c
+#define DAVINCI_I2C_PSC_REG	0x30
+
+#define DAVINCI_I2C_IVR_AAS	0x07
+#define DAVINCI_I2C_IVR_SCD	0x06
+#define DAVINCI_I2C_IVR_XRDY	0x05
+#define DAVINCI_I2C_IVR_RDR	0x04
+#define DAVINCI_I2C_IVR_ARDY	0x03
+#define DAVINCI_I2C_IVR_NACK	0x02
+#define DAVINCI_I2C_IVR_AL	0x01
+
+#define DAVINCI_I2C_STR_BB	BIT(12)
+#define DAVINCI_I2C_STR_RSFULL	BIT(11)
+#define DAVINCI_I2C_STR_SCD	BIT(5)
+#define DAVINCI_I2C_STR_ARDY	BIT(2)
+#define DAVINCI_I2C_STR_NACK	BIT(1)
+#define DAVINCI_I2C_STR_AL	BIT(0)
+
+#define DAVINCI_I2C_MDR_NACK	BIT(15)
+#define DAVINCI_I2C_MDR_STT	BIT(13)
+#define DAVINCI_I2C_MDR_STP	BIT(11)
+#define DAVINCI_I2C_MDR_MST	BIT(10)
+#define DAVINCI_I2C_MDR_TRX	BIT(9)
+#define DAVINCI_I2C_MDR_XA	BIT(8)
+#define DAVINCI_I2C_MDR_RM	BIT(7)
+#define DAVINCI_I2C_MDR_IRS	BIT(5)
+
+#define DAVINCI_I2C_IMR_AAS	BIT(6)
+#define DAVINCI_I2C_IMR_SCD	BIT(5)
+#define DAVINCI_I2C_IMR_XRDY	BIT(4)
+#define DAVINCI_I2C_IMR_RRDY	BIT(3)
+#define DAVINCI_I2C_IMR_ARDY	BIT(2)
+#define DAVINCI_I2C_IMR_NACK	BIT(1)
+#define DAVINCI_I2C_IMR_AL	BIT(0)
+
+#ifdef RT_EDMA_DEBUG
+#define i2c_dbg(fmt, ...)  rt_kprintf(fmt, ##__VA_ARGS__)
+#else
+#define i2c_dbg(fmt, ...)
+#endif
+
+
+struct davinci_i2c_dev {
+	void		*base;
+	struct rt_semaphore  completion;
+	struct clk  *clk;
+	int         cmd_err;
+	rt_uint8_t  *buf;
+	rt_uint32_t      buf_len;
+	int         irq;
+	int         stop;
+	rt_uint8_t  terminate;
+	rt_uint32_t bus_freq;
+	rt_uint32_t bus_delay;
+	struct rt_i2c_bus_device *bus;
+};
+
+static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
+					 int reg, rt_uint16_t val)
+{
+	davinci_writew(val, i2c_dev->base + reg);
+}
+
+static inline rt_uint16_t davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
+{
+	return davinci_readw(i2c_dev->base + reg);
+}
+
+static void udelay (rt_uint32_t us)
+{
+	rt_int32_t i;
+	for (; us > 0; us--)
+	{
+		i = 50000;
+		while(i > 0)
+		{
+			i--;
+		}
+	}
+}
+
+
+#if 0
+/* Generate a pulse on the i2c clock pin. */
+static void generic_i2c_clock_pulse(unsigned int scl_pin)
+{
+	rt_uint16_t i;
+
+	if (scl_pin) {
+		/* Send high and low on the SCL line */
+		for (i = 0; i < 9; i++) {
+			gpio_set_value(scl_pin, 0);
+			udelay(20);
+			gpio_set_value(scl_pin, 1);
+			udelay(20);
+		}
+	}
+}
+#endif
+
+/* This routine does i2c bus recovery as specified in the
+ * i2c protocol Rev. 03 section 3.16 titled "Bus clear"
+ */
+static void i2c_recover_bus(struct davinci_i2c_dev *dev)
+{
+	rt_uint32_t flag = 0;
+
+	i2c_dbg("initiating i2c bus recovery\n");
+	/* Send NACK to the slave */
+	flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+	flag |=  DAVINCI_I2C_MDR_NACK;
+	/* write the data into mode register */
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+#if 0
+	if (pdata)
+		generic_i2c_clock_pulse(pdata->scl_pin);
+#endif
+	/* Send STOP */
+	flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+	flag |= DAVINCI_I2C_MDR_STP;
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+}
+
+static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
+								int val)
+{
+	rt_uint16_t w;
+
+	w = davinci_i2c_read_reg(i2c_dev, DAVINCI_I2C_MDR_REG);
+	if (!val)	/* put I2C into reset */
+		w &= ~DAVINCI_I2C_MDR_IRS;
+	else		/* take I2C out of reset */
+		w |= DAVINCI_I2C_MDR_IRS;
+
+	davinci_i2c_write_reg(i2c_dev, DAVINCI_I2C_MDR_REG, w);
+}
+
+static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
+{
+	rt_uint16_t psc;
+	rt_uint32_t clk;
+	rt_uint32_t d;
+	rt_uint32_t clkh;
+	rt_uint32_t clkl;
+	rt_uint32_t input_clock = clk_get_rate(dev->clk);
+
+	/* NOTE: I2C Clock divider programming info
+	 * As per I2C specs the following formulas provide prescaler
+	 * and low/high divider values
+	 * input clk --> PSC Div -----------> ICCL/H Div --> output clock
+	 *                       module clk
+	 *
+	 * output clk = module clk / (PSC + 1) [ (ICCL + d) + (ICCH + d) ]
+	 *
+	 * Thus,
+	 * (ICCL + ICCH) = clk = (input clk / ((psc +1) * output clk)) - 2d;
+	 *
+	 * where if PSC == 0, d = 7,
+	 *       if PSC == 1, d = 6
+	 *       if PSC > 1 , d = 5
+	 */
+
+	/* get minimum of 7 MHz clock, but max of 12 MHz */
+	psc = (input_clock / 7000000) - 1;
+	if ((input_clock / (psc + 1)) > 12000000)
+		psc++;	/* better to run under spec than over */
+	d = (psc >= 2) ? 5 : 7 - psc;
+
+	clk = ((input_clock / (psc + 1)) / (dev->bus_freq * 1000)) - (d << 1);
+	clkh = clk >> 1;
+	clkl = clk - clkh;
+
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_PSC_REG, psc);
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh);
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl);
+
+	i2c_dbg("input_clock = %d, CLK = %d\n", input_clock, clk);
+}
+
+/*
+ * This function configures I2C and brings I2C out of reset.
+ * This function is called during I2C init function. This function
+ * also gets called if I2C encounters any errors.
+ */
+static int i2c_davinci_init(struct davinci_i2c_dev *dev)
+{
+	/* put I2C into reset */
+	davinci_i2c_reset_ctrl(dev, 0);
+
+	/* compute clock dividers */
+	i2c_davinci_calc_clk_dividers(dev);
+
+	/* Respond at reserved "SMBus Host" slave address" (and zero);
+	 * we seem to have no option to not respond...
+	 */
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, 0x08);
+
+	i2c_dbg("PSC  = %d\n",
+		davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG));
+	i2c_dbg("CLKL = %d\n",
+		davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG));
+	i2c_dbg("CLKH = %d\n",
+		davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG));
+	i2c_dbg("bus_freq = %dkHz, bus_delay = %d\n",
+		dev->bus_freq, dev->bus_delay);
+
+	/* Take the I2C module out of reset: */
+	davinci_i2c_reset_ctrl(dev, 1);
+
+	/* Enable interrupts */
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, I2C_DAVINCI_INTR_ALL);
+
+	return 0;
+}
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
+					 char allow_sleep)
+{
+	unsigned long timeout;
+	static rt_uint16_t to_cnt;
+	RT_ASSERT(dev != RT_NULL);
+	RT_ASSERT(dev->bus != RT_NULL);
+
+	timeout = rt_tick_get() + dev->bus->timeout;
+	while (davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG)
+	       & DAVINCI_I2C_STR_BB) {
+		if (to_cnt <= DAVINCI_I2C_MAX_TRIES) {
+			if (rt_tick_get() >= timeout) {
+				rt_kprintf("timeout waiting for bus ready\n");
+				to_cnt++;
+				return -RT_ETIMEOUT;
+			} else {
+				to_cnt = 0;
+				i2c_recover_bus(dev);
+				i2c_davinci_init(dev);
+			}
+		}
+		if (allow_sleep)
+			rt_thread_delay(2);
+	}
+
+	return 0;
+}
+
+/*
+ * Low level master read/write transaction. This function is called
+ * from i2c_davinci_xfer.
+ */
+static int
+i2c_davinci_xfer_msg(struct rt_i2c_bus_device *bus, struct rt_i2c_msg *msg, int stop)
+{
+	struct davinci_i2c_dev *dev = bus->priv;
+	rt_uint32_t flag;
+	rt_uint16_t w;
+	int r;
+
+	/* Introduce a delay, required for some boards (e.g Davinci EVM) */
+	if (dev->bus_delay)
+		udelay(dev->bus_delay);
+
+	/* set the slave address */
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr);
+
+	dev->buf = msg->buf;
+	dev->buf_len = msg->len;
+	dev->stop = stop;
+
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
+
+	//INIT_COMPLETION(dev->cmd_complete);
+	dev->cmd_err = 0;
+
+	/* Take I2C out of reset and configure it as master */
+	flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST;
+
+	/* if the slave address is ten bit address, enable XA bit */
+	if (msg->flags & RT_I2C_ADDR_10BIT)
+		flag |= DAVINCI_I2C_MDR_XA;
+	if (!(msg->flags & RT_I2C_RD))
+		flag |= DAVINCI_I2C_MDR_TRX;
+	if (msg->len == 0)
+		flag |= DAVINCI_I2C_MDR_RM;
+
+	/* Enable receive or transmit interrupts */
+	w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
+	if (msg->flags & RT_I2C_RD)
+		w |= DAVINCI_I2C_IMR_RRDY;
+	else
+		w |= DAVINCI_I2C_IMR_XRDY;
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w);
+
+	dev->terminate = 0;
+
+	/*
+	 * Write mode register first as needed for correct behaviour
+	 * on OMAP-L138, but don't set STT yet to avoid a race with XRDY
+	 * occurring before we have loaded DXR
+	 */
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
+	/*
+	 * First byte should be set here, not after interrupt,
+	 * because transmit-data-ready interrupt can come before
+	 * NACK-interrupt during sending of previous message and
+	 * ICDXR may have wrong data
+	 * It also saves us one interrupt, slightly faster
+	 */
+	if ((!(msg->flags & RT_I2C_RD)) && dev->buf_len)
+	{
+		davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
+		dev->buf_len--;
+	}
+
+	/* Set STT to begin transmit now DXR is loaded */
+	flag |= DAVINCI_I2C_MDR_STT;
+	if (stop && msg->len != 0)
+		flag |= DAVINCI_I2C_MDR_STP;
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
+	r = rt_sem_take(&dev->completion, dev->bus->timeout);
+	if (r == -RT_ETIMEOUT)
+	{
+		rt_kprintf("controller timed out\n");
+		i2c_recover_bus(dev);
+		i2c_davinci_init(dev);
+		dev->buf_len = 0;
+		return -RT_ETIMEOUT;
+	}
+	if (dev->buf_len)
+	{
+		/* This should be 0 if all bytes were transferred
+		 * or dev->cmd_err denotes an error.
+		 * A signal may have aborted the transfer.
+		 */
+		if (r == RT_EOK)
+		{
+			rt_kprintf("abnormal termination buf_len=%i\n",
+				dev->buf_len);
+			r = -RT_EIO;
+		}
+		dev->terminate = 1;
+		dev->buf_len = 0;
+	}
+	if (r < 0)
+		return r;
+
+	/* no error */
+	if (!dev->cmd_err)
+		return msg->len;
+
+	/* We have an error */
+	if (dev->cmd_err & DAVINCI_I2C_STR_AL)
+	{
+		i2c_davinci_init(dev);
+		return -RT_EIO;
+	}
+
+	if (dev->cmd_err & DAVINCI_I2C_STR_NACK)
+	{
+		if (msg->flags & RT_I2C_IGNORE_NACK)
+			return msg->len;
+		if (stop)
+		{
+			w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+			w |= DAVINCI_I2C_MDR_STP;
+			davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+		}
+		return -RT_EIO;
+	}
+	return -RT_EIO;
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_davinci_xfer_msg
+ */
+static int
+i2c_davinci_xfer(struct rt_i2c_bus_device *bus, struct rt_i2c_msg msgs[], int num)
+{
+	struct davinci_i2c_dev *dev = bus->priv;
+	int i;
+	int ret;
+
+	i2c_dbg("%s: msgs: %d\n", __func__, num);
+
+	ret = i2c_davinci_wait_bus_not_busy(dev, 1);
+	if (ret < 0)
+	{
+		i2c_dbg("timeout waiting for bus ready\n");
+		return ret;
+	}
+
+	for (i = 0; i < num; i++)
+	{
+		ret = i2c_davinci_xfer_msg(bus, &msgs[i], (i == (num - 1)));
+		i2c_dbg("%s [%d/%d] ret: %d\n", __func__, i + 1, num,
+			ret);
+		if (ret < 0)
+			return ret;
+	}
+
+
+	return num;
+}
+
+
+static void terminate_read(struct davinci_i2c_dev *dev)
+{
+	rt_uint16_t w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+	w |= DAVINCI_I2C_MDR_NACK;
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+
+	/* Throw away data */
+	davinci_i2c_read_reg(dev, DAVINCI_I2C_DRR_REG);
+	if (!dev->terminate)
+		rt_kprintf("RDR IRQ while no data requested\n");
+}
+static void terminate_write(struct davinci_i2c_dev *dev)
+{
+	rt_uint16_t w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+	w |= DAVINCI_I2C_MDR_RM | DAVINCI_I2C_MDR_STP;
+	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+
+	if (!dev->terminate)
+		i2c_dbg("TDR IRQ while no data to send\n");
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+static void i2c_davinci_isr(int irq, void *param)
+{
+	struct davinci_i2c_dev *dev = (struct davinci_i2c_dev *)param;
+	rt_uint32_t stat;
+	int count = 0;
+	rt_uint16_t w;
+
+	while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
+		i2c_dbg("%s: stat=0x%x\n", __func__, stat);
+		if (count++ == 100) {
+			rt_kprintf("Too much work in one IRQ\n");
+			break;
+		}
+
+		switch (stat) {
+		case DAVINCI_I2C_IVR_AL:
+			/* Arbitration lost, must retry */
+			dev->cmd_err |= DAVINCI_I2C_STR_AL;
+			dev->buf_len = 0;
+			rt_sem_release(&dev->completion);
+			break;
+
+		case DAVINCI_I2C_IVR_NACK:
+			dev->cmd_err |= DAVINCI_I2C_STR_NACK;
+			dev->buf_len = 0;
+			rt_sem_release(&dev->completion);
+			break;
+
+		case DAVINCI_I2C_IVR_ARDY:
+			davinci_i2c_write_reg(dev,
+				DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_ARDY);
+			if (((dev->buf_len == 0) && (dev->stop != 0)) ||
+			    (dev->cmd_err & DAVINCI_I2C_STR_NACK)) {
+				w = davinci_i2c_read_reg(dev,
+							 DAVINCI_I2C_MDR_REG);
+				w |= DAVINCI_I2C_MDR_STP;
+				davinci_i2c_write_reg(dev,
+						      DAVINCI_I2C_MDR_REG, w);
+			}
+			rt_sem_release(&dev->completion);
+			break;
+
+		case DAVINCI_I2C_IVR_RDR:
+			if (dev->buf_len) {
+				*dev->buf++ =
+				    davinci_i2c_read_reg(dev,
+							 DAVINCI_I2C_DRR_REG);
+				dev->buf_len--;
+				if (dev->buf_len)
+					continue;
+
+				davinci_i2c_write_reg(dev,
+					DAVINCI_I2C_STR_REG,
+					DAVINCI_I2C_IMR_RRDY);
+			} else {
+				/* signal can terminate transfer */
+				terminate_read(dev);
+			}
+			break;
+
+		case DAVINCI_I2C_IVR_XRDY:
+			if (dev->buf_len) {
+				davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG,
+						      *dev->buf++);
+				dev->buf_len--;
+				if (dev->buf_len)
+					continue;
+
+				w = davinci_i2c_read_reg(dev,
+							 DAVINCI_I2C_IMR_REG);
+				w &= ~DAVINCI_I2C_IMR_XRDY;
+				davinci_i2c_write_reg(dev,
+						      DAVINCI_I2C_IMR_REG,
+						      w);
+			} else {
+				/* signal can terminate transfer */
+				terminate_write(dev);
+			}
+			break;
+
+		case DAVINCI_I2C_IVR_SCD:
+			davinci_i2c_write_reg(dev,
+				DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_SCD);
+			rt_sem_release(&dev->completion);
+			break;
+
+		case DAVINCI_I2C_IVR_AAS:
+			i2c_dbg("Address as slave interrupt\n");
+			break;
+
+		default:
+			i2c_dbg("Unrecognized irq stat %d\n", stat);
+			break;
+		}
+	}
+
+}
+
+
+
+static struct rt_i2c_bus_device_ops bus_ops = {
+	.master_xfer	= i2c_davinci_xfer,
+};
+
+int davinci_i2c_init(char *bus_name)
+{
+	struct rt_i2c_bus_device *bus;
+	struct davinci_i2c_dev *dev;
+	int r;
+
+	bus = rt_malloc(sizeof(struct rt_i2c_bus_device));
+	if (bus == RT_NULL)
+	{
+		rt_kprintf("rt_malloc failed\n");
+		return -RT_ENOMEM;
+	}
+	
+	rt_memset((void *)bus, 0, sizeof(struct rt_i2c_bus_device));
+
+	bus->ops = &bus_ops;
+	bus->timeout = DAVINCI_I2C_TIMEOUT;
+
+	dev = rt_malloc(sizeof(struct davinci_i2c_dev));
+	if (!dev) 
+	{
+		r = -RT_ENOMEM;
+		goto err;
+	}
+
+	rt_memset((void *)dev, 0, sizeof(struct davinci_i2c_dev));
+
+	rt_sem_init(&dev->completion, "i2c_ack", 0, RT_IPC_FLAG_FIFO);
+
+	dev->irq = IRQ_I2C;
+
+	dev->clk = clk_get("I2CCLK");
+	if (dev->clk == RT_NULL) {
+		r = -RT_ERROR;
+		goto err1;
+	}
+
+	psc_change_state(DAVINCI_DM365_LPSC_I2C, 3);
+
+	dev->base = DAVINCI_I2C_BASE;
+	dev->bus_freq = 100;
+	dev->bus_delay = 0;
+	dev->bus = bus;
+
+	bus->priv = dev;
+
+	i2c_davinci_init(dev);
+
+	rt_hw_interrupt_install(dev->irq, i2c_davinci_isr, (void *)dev, "I2C");
+	rt_hw_interrupt_umask(dev->irq);
+
+	return rt_i2c_bus_device_register(bus, bus_name);
+
+err1:
+	rt_free(dev);
+
+err:
+	rt_free(bus);
+
+	return r;
+}
+
+

+ 188 - 0
bsp/dm365/drivers/mii.h

@@ -0,0 +1,188 @@
+/*
+ * File      : mii.h
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rt-thread.org/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2011-03-18     weety      first version
+ */
+
+#ifndef __MII_H__
+#define __MII_H__
+
+/* Generic MII registers. */
+
+#define MII_BMCR            0x00        /* Basic mode control register */
+#define MII_BMSR            0x01        /* Basic mode status register  */
+#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
+#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
+#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
+#define MII_LPA             0x05        /* Link partner ability reg    */
+#define MII_EXPANSION       0x06        /* Expansion register          */
+#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
+#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+#define MII_ESTATUS	    0x0f	/* Extended Status */
+#define MII_DCOUNTER        0x12        /* Disconnect counter          */
+#define MII_FCSCOUNTER      0x13        /* False carrier counter       */
+#define MII_NWAYTEST        0x14        /* N-way auto-neg test reg     */
+#define MII_RERRCOUNTER     0x15        /* Receive error counter       */
+#define MII_SREVISION       0x16        /* Silicon revision            */
+#define MII_RESV1           0x17        /* Reserved...                 */
+#define MII_LBRERROR        0x18        /* Lpback, rx, bypass error    */
+#define MII_PHYADDR         0x19        /* PHY address                 */
+#define MII_RESV2           0x1a        /* Reserved...                 */
+#define MII_TPISTATUS       0x1b        /* TPI status for 10mbps       */
+#define MII_NCONFIG         0x1c        /* Network interface config    */
+
+/* Basic mode control register. */
+#define BMCR_RESV               0x003f  /* Unused...                   */
+#define BMCR_SPEED1000		0x0040  /* MSB of Speed (1000)         */
+#define BMCR_CTST               0x0080  /* Collision test              */
+#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
+#define BMCR_ANRESTART          0x0200  /* Auto negotiation restart    */
+#define BMCR_ISOLATE            0x0400  /* Disconnect DP83840 from MII */
+#define BMCR_PDOWN              0x0800  /* Powerdown the DP83840       */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+#define BMCR_SPEED100           0x2000  /* Select 100Mbps              */
+#define BMCR_LOOPBACK           0x4000  /* TXD loopback bits           */
+#define BMCR_RESET              0x8000  /* Reset the DP83840           */
+
+/* Basic mode status register. */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_JCD                0x0002  /* Jabber detected             */
+#define BMSR_LSTATUS            0x0004  /* Link status                 */
+#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
+#define BMSR_RFAULT             0x0010  /* Remote fault detected       */
+#define BMSR_ANEGCOMPLETE       0x0020  /* Auto-negotiation complete   */
+#define BMSR_RESV               0x00c0  /* Unused...                   */
+#define BMSR_ESTATEN		0x0100	/* Extended Status in R15 */
+#define BMSR_100HALF2           0x0200  /* Can do 100BASE-T2 HDX */
+#define BMSR_100FULL2           0x0400  /* Can do 100BASE-T2 FDX */
+#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
+#define BMSR_100BASE4           0x8000  /* Can do 100mbps, 4k packets  */
+
+/* Advertisement control register. */
+#define ADVERTISE_SLCT          0x001f  /* Selector bits               */
+#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_1000XFULL     0x0020  /* Try for 1000BASE-X full-duplex */
+#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_1000XHALF     0x0040  /* Try for 1000BASE-X half-duplex */
+#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_1000XPAUSE    0x0080  /* Try for 1000BASE-X pause    */
+#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_1000XPSE_ASYM 0x0100  /* Try for 1000BASE-X asym pause */
+#define ADVERTISE_100BASE4      0x0200  /* Try for 100mbps 4k packets  */
+#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymetric pause     */
+#define ADVERTISE_RESV          0x1000  /* Unused...                   */
+#define ADVERTISE_RFAULT        0x2000  /* Say we can detect faults    */
+#define ADVERTISE_LPACK         0x4000  /* Ack link partners response  */
+#define ADVERTISE_NPAGE         0x8000  /* Next page bit               */
+
+#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+			ADVERTISE_CSMA)
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+
+/* Link partner ability register. */
+#define LPA_SLCT                0x001f  /* Same as advertise selector  */
+#define LPA_10HALF              0x0020  /* Can do 10mbps half-duplex   */
+#define LPA_1000XFULL           0x0020  /* Can do 1000BASE-X full-duplex */
+#define LPA_10FULL              0x0040  /* Can do 10mbps full-duplex   */
+#define LPA_1000XHALF           0x0040  /* Can do 1000BASE-X half-duplex */
+#define LPA_100HALF             0x0080  /* Can do 100mbps half-duplex  */
+#define LPA_1000XPAUSE          0x0080  /* Can do 1000BASE-X pause     */
+#define LPA_100FULL             0x0100  /* Can do 100mbps full-duplex  */
+#define LPA_1000XPAUSE_ASYM     0x0100  /* Can do 1000BASE-X pause asym*/
+#define LPA_100BASE4            0x0200  /* Can do 100mbps 4k packets   */
+#define LPA_PAUSE_CAP           0x0400  /* Can pause                   */
+#define LPA_PAUSE_ASYM          0x0800  /* Can pause asymetrically     */
+#define LPA_RESV                0x1000  /* Unused...                   */
+#define LPA_RFAULT              0x2000  /* Link partner faulted        */
+#define LPA_LPACK               0x4000  /* Link partner acked us       */
+#define LPA_NPAGE               0x8000  /* Next page bit               */
+
+#define LPA_DUPLEX		(LPA_10FULL | LPA_100FULL)
+#define LPA_100			(LPA_100FULL | LPA_100HALF | LPA_100BASE4)
+
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_NWAY          0x0001  /* Can do N-way auto-nego      */
+#define EXPANSION_LCWP          0x0002  /* Got new RX page code word   */
+#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
+#define EXPANSION_NPCAPABLE     0x0008  /* Link partner supports npage */
+#define EXPANSION_MFAULTS       0x0010  /* Multiple faults detected    */
+#define EXPANSION_RESV          0xffe0  /* Unused...                   */
+
+#define ESTATUS_1000_TFULL	0x2000	/* Can do 1000BT Full */
+#define ESTATUS_1000_THALF	0x1000	/* Can do 1000BT Half */
+
+/* N-way test register. */
+#define NWAYTEST_RESV1          0x00ff  /* Unused...                   */
+#define NWAYTEST_LOOPBACK       0x0100  /* Enable loopback for N-way   */
+#define NWAYTEST_RESV2          0xfe00  /* Unused...                   */
+
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF      0x0100  /* Advertise 1000BASE-T half duplex */
+
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK       0x2000  /* Link partner local receiver status */
+#define LPA_1000REMRXOK         0x1000  /* Link partner remote receiver status */
+#define LPA_1000FULL            0x0800  /* Link partner 1000BASE-T full duplex */
+#define LPA_1000HALF            0x0400  /* Link partner 1000BASE-T half duplex */
+
+/* Flow control flags */
+#define FLOW_CTRL_TX		0x01
+#define FLOW_CTRL_RX		0x02
+
+/**
+ * mii_nway_result
+ * @negotiated: value of MII ANAR and'd with ANLPAR
+ *
+ * Given a set of MII abilities, check each bit and returns the
+ * currently supported media, in the priority order defined by
+ * IEEE 802.3u.  We use LPA_xxx constants but note this is not the
+ * value of LPA solely, as described above.
+ *
+ * The one exception to IEEE 802.3u is that 100baseT4 is placed
+ * between 100T-full and 100T-half.  If your phy does not support
+ * 100T4 this is fine.  If your phy places 100T4 elsewhere in the
+ * priority order, you will need to roll your own function.
+ */
+rt_inline unsigned int mii_nway_result (unsigned int negotiated)
+{
+        unsigned int ret;
+
+        if (negotiated & LPA_100FULL)
+                ret = LPA_100FULL;
+        else if (negotiated & LPA_100BASE4)
+                ret = LPA_100BASE4;
+        else if (negotiated & LPA_100HALF)
+                ret = LPA_100HALF;
+        else if (negotiated & LPA_10FULL)
+                ret = LPA_10FULL;
+        else
+                ret = LPA_10HALF;
+
+        return ret;
+}
+
+/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
+#define SPEED_10		10
+#define SPEED_100		100
+#define SPEED_1000		1000
+#define SPEED_2500		2500
+#define SPEED_10000		10000
+
+
+#endif /* __MII_H__ */
+

+ 1456 - 0
bsp/dm365/drivers/mmcsd.c

@@ -0,0 +1,1456 @@
+#include <rtthread.h>
+#include <rthw.h>
+#include <string.h>
+#include <drivers/mmcsd_core.h>
+#include <dm36x.h>
+#include "mmcsd.h"
+//#include "pinmux.h"
+#include "edma.h"
+
+#define RT_USING_MMCSD0
+
+#define MMCSD_DEBUG			0	
+#if MMCSD_DEBUG
+#define mmc_dbg(fmt, ...)	rt_kprintf(fmt, ##__VA_ARGS__)
+#else
+#define mmc_dbg(fmt, ...)
+#endif
+
+#define MMU_NOCACHE_ADDR(a)      	((rt_uint32_t)a | (1UL<<29))
+#define CACHE_LINE_SIZE	32
+
+
+extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
+extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
+
+#define EVT_QUEUE_NUM				0	/* EDMA3 Event queue number. */
+
+static unsigned rw_threshold = 32;
+static rt_bool_t use_dma = RT_TRUE;
+
+enum DATA_DIR_TYPE
+{
+	DM365_MMC_DATADIR_NONE = 0,
+	DM365_MMC_DATADIR_READ,
+	DM365_MMC_DATADIR_WRITE,
+};
+
+struct mmc_dm365_host 
+{
+	struct rt_mmcsd_host		*mmc;
+	struct rt_mmcsd_req			*req;
+	struct rt_mmcsd_data		*data;
+	struct rt_mmcsd_cmd			*cmd;
+	struct edmacc_param 	tx_template;
+	struct edmacc_param 	rx_template;
+
+	rt_uint32_t	mmc_input_clk;
+	rt_uint32_t ns_in_one_cycle;	/* for ns in one cycle calculation */
+
+	mmcsd_regs_t *mmcsd_regs;
+	rt_uint8_t	bus_mode;
+
+	enum DATA_DIR_TYPE	data_dir;
+
+	rt_uint32_t	rxdma;
+	rt_uint32_t	txdma;
+	rt_bool_t	use_dma;
+	rt_bool_t	do_dma;
+
+	rt_uint8_t *buffer;
+	rt_uint32_t buffer_bytes_left;
+	rt_uint32_t bytes_left;
+
+	rt_uint8_t *dma_buffer;
+	rt_bool_t   use_dma_buffer;
+	rt_bool_t   sdio_int;
+};
+
+void *mmc_priv(struct rt_mmcsd_host *host)
+{
+	return (void *)host->private_data;
+}
+
+static void delay_us(rt_uint32_t us)
+{
+    rt_uint32_t len;
+    for (;us > 0; us --)
+        for (len = 0; len < 10; len++ );
+}
+
+/*******************************************************************************************************
+** 函数名称: calculate_freq_for_card()
+** 功能描述: 此函数用于计算设置SD卡频率所需的分频数
+** 
+** 输 入: host			->	DM365 MMC host句柄
+**         mmc_req_freq	->	MMC工作频率
+**
+** 输 出: 分频值
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static rt_uint32_t calculate_freq_for_card(struct mmc_dm365_host *host, rt_uint32_t mmc_req_freq)
+{
+	rt_uint32_t mmc_freq = 0;
+	rt_uint32_t mmc_pclk = 0;
+	rt_uint32_t mmc_push_pull_divisor = 0;
+
+	mmc_pclk = host->mmc_input_clk;
+	
+	if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
+		mmc_push_pull_divisor = ((rt_uint32_t)mmc_pclk / (2 * mmc_req_freq)) - 1;
+	else
+		mmc_push_pull_divisor = 0;
+
+	mmc_freq = (rt_uint32_t)mmc_pclk / (2 * (mmc_push_pull_divisor + 1));
+
+	if (mmc_freq > mmc_req_freq)
+		mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
+	
+	/* Convert ns to clock cycles */
+	if (mmc_req_freq <= 400000)
+		host->ns_in_one_cycle = (1000000)/(((mmc_pclk/(2*(mmc_push_pull_divisor+1)))/1000));
+	else
+		host->ns_in_one_cycle = (1000000)/(((mmc_pclk/(2*(mmc_push_pull_divisor+1)))/1000000));
+
+	return mmc_push_pull_divisor;
+}
+
+/*******************************************************************************************************
+** 函数名称: calculate_freq_for_card()
+** 功能描述: 此函数用于计算MMC clock分频数
+** 
+** 输 入: host			->	DM365 MMC host句柄
+**         ios			->	MMC 操作句柄
+**
+** 输 出: 读取到的PHY寄存器值
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void calculate_clk_divider(struct rt_mmcsd_host *mmc, struct rt_mmcsd_io_cfg *ios)
+{
+	rt_uint32_t temp = 0;
+	rt_uint32_t mmc_pclk = 0;
+	rt_uint32_t open_drain_freq = 0;
+	rt_uint32_t mmc_push_pull_freq = 0;
+	struct mmc_dm365_host *host = mmc_priv(mmc);
+
+	mmc_pclk = host->mmc_input_clk;
+
+	if (ios->bus_mode == MMCSD_BUSMODE_OPENDRAIN) 
+	{
+		/* Ignoring the init clock value passed for fixing the inter
+		 * operability with different cards.
+		 */
+		open_drain_freq = ((rt_uint32_t)mmc_pclk / (2 * MMCSD_INIT_CLOCK)) - 1;
+
+		if (open_drain_freq > 0xFF)
+			open_drain_freq = 0xFF;
+
+		temp = host->mmcsd_regs->MMCCLK & ~MMCCLK_CLKRT_MASK; 
+		temp |= open_drain_freq;
+		host->mmcsd_regs->MMCCLK = temp;
+
+		
+		/* Convert ns to clock cycles */
+		host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK / 1000);
+	} 
+	else 
+	{
+		mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
+
+		if (mmc_push_pull_freq > 0xFF)
+			mmc_push_pull_freq = 0xFF;
+
+		temp = host->mmcsd_regs->MMCCLK & ~MMCCLK_CLKEN;
+		host->mmcsd_regs->MMCCLK = temp;
+		
+		delay_us(10);
+
+		temp = host->mmcsd_regs->MMCCLK & ~MMCCLK_CLKRT_MASK;
+		temp |= mmc_push_pull_freq;
+		host->mmcsd_regs->MMCCLK = temp;
+		
+		host->mmcsd_regs->MMCCLK = temp | MMCCLK_CLKEN;
+
+		delay_us(10);
+	}
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_set_ios()
+** 功能描述: 此函数是mmc设置设置
+** 
+** 输 入: mmc			->	mmc host 句柄 
+**         ios			->	mmc 操作句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_set_ios(struct rt_mmcsd_host *mmc, struct rt_mmcsd_io_cfg *ios)
+{
+	struct mmc_dm365_host *host = mmc_priv(mmc);
+
+	mmc_dbg("clock %dHz busmode %d powermode %d Vdd %04x\n", ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
+
+	switch (ios->bus_width) 
+    {
+	case MMCSD_BUS_WIDTH_8:
+        mmc_dbg("Enabling 8 bit mode\n");
+		host->mmcsd_regs->MMCCTL = (host->mmcsd_regs->MMCCTL & ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT;
+		break;
+	case MMCSD_BUS_WIDTH_4:
+        mmc_dbg("Enabling 4 bit mode\n");
+		host->mmcsd_regs->MMCCTL = (host->mmcsd_regs->MMCCTL & ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT;
+		break;
+	case MMCSD_BUS_WIDTH_1:
+        mmc_dbg("Enabling 1 bit mode\n");
+		host->mmcsd_regs->MMCCTL = host->mmcsd_regs->MMCCTL & ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT);
+		break;
+	}
+
+	calculate_clk_divider(mmc, ios);
+
+	host->bus_mode = ios->bus_mode;
+	if (ios->power_mode == MMCSD_POWER_UP) 
+    {
+		unsigned long timeout = rt_tick_get() + 500;
+		rt_bool_t lose = 1;
+
+		host->mmcsd_regs->MMCARGHL = 0;
+		host->mmcsd_regs->MMCCMD = MMCCMD_INITCK;
+		
+		while (rt_tick_get() < timeout) 
+		{
+			rt_uint32_t tmp = host->mmcsd_regs->MMCST0;
+
+			if (tmp & MMCST0_RSPDNE) 
+			{
+				lose = 0;
+				break;
+			}
+		}
+		if (lose)
+			mmc_dbg("powerup timeout\n");
+	}
+}
+
+/*******************************************************************************************************
+** 函数名称: dm365_fifo_data_trans()
+** 功能描述: 此函数是fifo模式传输
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         n			->	传输字节数
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void dm365_fifo_data_trans(struct mmc_dm365_host *host, rt_uint32_t n)
+{
+	rt_uint8_t *p;
+	rt_uint32_t i;
+
+	p = host->buffer;
+
+	if (host->bytes_left < n)
+		n = host->bytes_left;
+	
+	host->bytes_left -= n;
+
+	/* NOTE:  we never transfer more than rw_threshold bytes
+	 * to/from the fifo here; there's no I/O overlap.
+	 * This also assumes that access width( i.e. ACCWD) is 4 bytes
+	 */
+	if (host->data_dir == DM365_MMC_DATADIR_WRITE) 
+	{
+		for (i = 0; i < (n >> 2); i++) 
+		{
+			host->mmcsd_regs->MMCDXR = *((rt_uint32_t *)p);
+			p = p + 4;
+		}
+		if (n & 3) 
+		{
+			rt_kprintf("to do ... \n");
+//			iowrite8_rep(host->base + MMCSD_MMCDXR, p, (n & 3));
+			p = p + (n & 3);
+		}
+	} 
+	else 
+	{		
+		for (i = 0; i < (n >> 2); i++) 
+		{
+			*((rt_uint32_t *)p) = host->mmcsd_regs->MMCDRR;	
+			p  = p + 4;
+		}
+		if (n & 3) 
+		{
+            rt_kprintf("to do ... \n");
+//			ioread8_rep(host->base + MMCSD_MMCDRR, p, (n & 3));
+			p = p + (n & 3);
+		}
+	}
+	host->buffer = p;
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_start_command()
+** 功能描述: 此函数是开始发送SD命令
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         cmd			->	SD命令句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_start_command(struct mmc_dm365_host *host, struct rt_mmcsd_cmd *cmd)
+{
+	rt_uint32_t cmd_reg = 0;
+	rt_uint32_t im_val;
+	
+	host->cmd = cmd;
+
+	switch (resp_type(cmd)) 
+	{
+	case RESP_R1B:
+		/* There's some spec confusion about when R1B is
+		 * allowed, but if the card doesn't issue a BUSY
+		 * then it's harmless for us to allow it.
+		 */
+		cmd_reg |= MMCCMD_BSYEXP;
+		/* FALLTHROUGH */
+	case RESP_R1:		/* 48 bits, CRC */
+    case RESP_R4:
+	case RESP_R5:
+    case RESP_R6:
+    case RESP_R7:        
+		cmd_reg |= MMCCMD_RSPFMT_R1456;
+		break;
+	case RESP_R2:		/* 136 bits, CRC */
+		cmd_reg |= MMCCMD_RSPFMT_R2;
+		break;
+	case RESP_R3:		/* 48 bits, no CRC */
+		cmd_reg |= MMCCMD_RSPFMT_R3;
+		break;
+	default:
+		cmd_reg |= MMCCMD_RSPFMT_NONE;
+		mmc_dbg("unknown resp_type %04x\n", resp_type(cmd));
+		break;
+	}
+
+	/* Set command index */
+	cmd_reg |= cmd->cmd_code;
+
+	/* Enable EDMA transfer triggers */
+	if (host->do_dma == RT_TRUE)
+		cmd_reg |= MMCCMD_DMATRIG;
+
+	if (host->data != RT_NULL && host->data_dir == DM365_MMC_DATADIR_READ)
+		cmd_reg |= MMCCMD_DMATRIG;
+
+	/* Setting whether command involves data transfer or not */
+	if (cmd->data)
+		cmd_reg |= MMCCMD_WDATX;
+
+	/* Setting whether stream or block transfer */
+	if (cmd->flags & MMC_DATA_STREAM)
+	{
+		mmc_dbg("to do\n");
+		cmd_reg |= MMCCMD_STRMTP;
+	}
+
+	/* Setting whether data read or write */
+	if (host->data_dir == DM365_MMC_DATADIR_WRITE)
+		cmd_reg |= MMCCMD_DTRW;
+
+	if (host->bus_mode == MMCSD_BUSMODE_PUSHPULL)
+	{
+		cmd_reg |= MMCCMD_PPLEN;
+	}
+	
+	/* set Command timeout */
+	host->mmcsd_regs->MMCTOR = 0x1FFF;
+	
+	/* Enable interrupt (calculate here, defer until FIFO is stuffed). */
+	im_val =  MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
+	if (host->data_dir == DM365_MMC_DATADIR_WRITE) 
+	{
+		im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
+
+		if (host->do_dma == RT_FALSE)
+			im_val |= MMCST0_DXRDY;
+	} 
+	else if (host->data_dir == DM365_MMC_DATADIR_READ) 
+	{
+		im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
+
+		if (host->do_dma == RT_FALSE)
+			im_val |= MMCST0_DRRDY;
+	}
+
+	/*
+	 * Before non-DMA WRITE commands the controller needs priming:
+	 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
+	 */
+	if ((host->do_dma == RT_FALSE) && (host->data_dir == DM365_MMC_DATADIR_WRITE))
+		dm365_fifo_data_trans(host, rw_threshold);
+
+	host->mmcsd_regs->MMCARGHL = cmd->arg;
+	host->mmcsd_regs->MMCCMD = cmd_reg;
+	host->mmcsd_regs->MMCIM = im_val;
+}
+
+/*******************************************************************************************************
+** 函数名称: dm365_abort_dma()
+** 功能描述: 此函数终止DMA传输
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void dm365_abort_dma(struct mmc_dm365_host *host)
+{
+	int sync_dev;
+
+	if (host->data_dir == DM365_MMC_DATADIR_READ)
+		sync_dev = host->rxdma;
+	else
+		sync_dev = host->txdma;
+
+	//EDMA3DisableTransfer(EDMA0CC0_REG_BASE, sync_dev, EDMA3_TRIG_MODE_EVENT);
+	edma_stop(sync_dev);
+	edma_clean_channel(sync_dev);
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_request_done()
+** 功能描述: 此函数用于结束处理一个MMC请求
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         mrq			->	request 句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+void mmc_request_done(struct rt_mmcsd_host *host, struct rt_mmcsd_req *mrq)
+{
+	struct rt_mmcsd_cmd *cmd = mrq->cmd;
+	int err = cmd->err;
+
+	if (err && cmd->retries) 
+	{
+		mmc_dbg("req failed (CMD%u): %d, retrying...\n", cmd->cmd_code, err);
+
+		cmd->retries--;
+		cmd->err = 0;
+		host->ops->request(host, mrq);
+	} 
+	else 
+	{
+		mmc_dbg("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
+			"dm365 host", cmd->cmd_code, err,
+			cmd->resp[0], cmd->resp[1],
+			cmd->resp[2], cmd->resp[3]);
+
+		if (mrq->data) 
+		{
+			mmc_dbg("%s:     %d bytes transferred: %d\n",
+				"dm365 host",
+				mrq->data->bytes_xfered, mrq->data->err);
+		}
+
+		if (mrq->stop) 
+		{
+			mmc_dbg("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
+				"dm365 host", mrq->stop->cmd_code,
+				mrq->stop->err,
+				mrq->stop->resp[0], mrq->stop->resp[1],
+				mrq->stop->resp[2], mrq->stop->resp[3]);
+		}
+
+		mmcsd_req_complete(host);
+	}
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_xfer_done()
+** 功能描述: 数据传送结束调用此函数
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         data			->	data 句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_xfer_done(struct mmc_dm365_host *host, struct rt_mmcsd_data *data)
+{
+	host->data = RT_NULL;
+
+	if (host->mmc->flags & MMCSD_SUP_SDIO_IRQ) {
+		/*
+		 * SDIO Interrupt Detection work-around as suggested by
+		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+		 */
+		if (host->sdio_int && !(host->mmcsd_regs->SDIOST0 &
+					SDIOST0_DAT1_HI)) {
+			host->mmcsd_regs->SDIOIST = SDIOIST_IOINT;
+			sdio_irq_wakeup(host->mmc);
+		}
+	}
+
+	if (host->do_dma == RT_TRUE)
+	{
+		dm365_abort_dma(host);
+
+		if (data->flags & DATA_DIR_READ)
+		{
+			/* read operation */
+			if (host->use_dma_buffer == RT_TRUE)
+			{
+				/* copy DMA buffer to read buffer */
+				memcpy(data->buf, (void*)MMU_NOCACHE_ADDR(host->dma_buffer), data->blks * data->blksize);
+			}
+			/*else
+			{
+				mmu_invalidate_dcache((rt_uint32_t)data->buf, data->blks * data->blksize);
+			}*/
+		}
+
+		host->do_dma = RT_FALSE;
+	}
+	
+	host->data_dir = DM365_MMC_DATADIR_NONE;
+
+	if (!data->stop || (host->cmd && host->cmd->err)) 
+	{
+		mmc_request_done(host->mmc, data->mrq);
+		host->mmcsd_regs->MMCIM = 0;
+	} 
+	else
+		mmc_dm365_start_command(host, data->stop);
+}
+
+static void mmc_dm365_dma_cb(unsigned channel, rt_uint16_t ch_status, void *data)
+{
+	if (DMA_COMPLETE != ch_status) {
+		struct mmc_dm365_host *host = data;
+
+		/* Currently means:  DMA Event Missed, or "null" transfer
+		 * request was seen.  In the future, TC errors (like bad
+		 * addresses) might be presented too.
+		 */
+		mmc_dbg("DMA %s error\n",
+			(host->data->flags & MMC_DATA_WRITE)
+				? "write" : "read");
+		host->data->err = -RT_EIO;
+		mmc_dm365_xfer_done(host, host->data);
+	}
+}
+
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_dma_setup()
+** 功能描述: DMA 设置函数
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         tx			->	布尔变量,用于判断Tx或者是Rx
+**         template		->	用于保存EDMA3CCPaRAMEntry机构数据
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_dma_setup(struct mmc_dm365_host *host, rt_bool_t tx, struct edmacc_param *template)
+{
+	rt_uint32_t       sync_dev;
+	const rt_uint16_t acnt = 4;
+	const rt_uint16_t bcnt = rw_threshold >> 2;
+	const rt_uint16_t	ccnt = 0;
+	rt_uint32_t		src_port = 0;
+	rt_uint32_t		dst_port = 0;
+	rt_int16_t		src_bidx, dst_bidx;
+	rt_int16_t		src_cidx, dst_cidx;
+
+    //edmacc_param paramSet;
+
+	/*
+	 * A-B Sync transfer:  each DMA request is for one "frame" of
+	 * rw_threshold bytes, broken into "acnt"-size chunks repeated
+	 * "bcnt" times.  Each segment needs "ccnt" such frames; since
+	 * we tell the block layer our mmc->max_seg_size limit, we can
+	 * trust (later) that it's within bounds.
+	 *
+	 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
+	 * EDMA will optimize memory operations to use larger bursts.
+	 */
+	if (tx) 
+	{
+		sync_dev = host->txdma;
+
+		/* src_prt, ccnt, and link to be set up later */
+		/*paramSet.srcBIdx = acnt;
+		paramSet.srcCIdx = acnt * bcnt;
+
+		paramSet.destAddr = (rt_uint32_t)&(host->mmcsd_regs->MMCDXR);
+		paramSet.destBIdx = 0;
+		paramSet.destCIdx = 0;*/
+		/* src_prt, ccnt, and link to be set up later */
+		src_bidx = acnt;
+		src_cidx = acnt * bcnt;
+
+		dst_port = (rt_uint32_t)&(host->mmcsd_regs->MMCDXR);
+		dst_bidx = 0;
+		dst_cidx = 0;
+	} 
+	else 
+	{
+		sync_dev = host->rxdma;
+
+		/* dst_prt, ccnt, and link to be set up later */
+		/*paramSet.srcAddr = (rt_uint32_t)&(host->mmcsd_regs->MMCDRR);
+		paramSet.srcBIdx = 0;
+		paramSet.srcCIdx = 0;
+
+		paramSet.destBIdx = acnt;
+		paramSet.destCIdx = acnt * bcnt;*/
+		src_port = (rt_uint32_t)&(host->mmcsd_regs->MMCDRR);
+		src_bidx = 0;
+		src_cidx = 0;
+
+		/* dst_prt, ccnt, and link to be set up later */
+		dst_bidx = acnt;
+		dst_cidx = acnt * bcnt;
+	}
+	/*
+	 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
+	 * are not 256-bit (32-byte) aligned.  So we use INCR, and the W8BIT
+	 * parameter is ignored.
+	 */
+	edma_set_src(sync_dev, src_port, INCR, W8BIT);
+	edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
+
+	edma_set_src_index(sync_dev, src_bidx, src_cidx);
+	edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
+
+	edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
+
+	edma_read_slot(sync_dev, template);
+
+	/* don't bother with irqs or chaining */
+	template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
+	
+#if 0
+	paramSet.opt = 0u;
+	/* Src & Dest are in INCR modes */
+	paramSet.opt &= 0xFFFFFFFCu;
+	/* Program the TCC */
+	paramSet.opt |= ((sync_dev << EDMA3CC_OPT_TCC_SHIFT) & EDMA3CC_OPT_TCC);
+	
+	paramSet.aCnt = acnt;
+	paramSet.bCnt = bcnt;
+	
+	/* AB Sync Transfer Mode */
+	paramSet.opt |= (1 << EDMA3CC_OPT_SYNCDIM_SHIFT);
+	
+	/* Now, write the PaRAM Set. */
+	EDMA3SetPaRAM(EDMA0CC0_REG_BASE, sync_dev, &paramSet);
+
+	EDMA3GetPaRAM(EDMA0CC0_REG_BASE, sync_dev, template);
+#endif
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_send_dma_request()
+** 功能描述: 发送DMA请求
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         data			->	DMA传送数据结构句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_send_dma_request(struct mmc_dm365_host *host, struct rt_mmcsd_data *data)
+{
+	//struct EDMA3CCPaRAMEntry *template;
+	struct edmacc_param	*template;
+	rt_uint32_t	buf_ptr;
+	rt_uint32_t	channel;
+	rt_uint32_t bytes_left = host->bytes_left;
+	rt_uint32_t count = host->bytes_left;	
+	const rt_uint32_t shift = ffs(rw_threshold) - 1;
+
+	if (host->use_dma_buffer == RT_TRUE)
+		buf_ptr = host->dma_buffer;//MMU_NOCACHE_ADDR(host->dma_buffer);
+	else
+		buf_ptr = (rt_uint32_t)data->buf;
+
+	if (host->data_dir == DM365_MMC_DATADIR_WRITE) 
+	{
+		template = &host->tx_template;
+		channel = host->txdma;
+	} 
+	else 
+	{
+		template = &host->rx_template;
+		channel = host->rxdma;
+	}
+
+	template->link_bcntrld = 0xffff;
+	//template->bCntReload = 0x0;
+
+	if (count > bytes_left)
+		count = bytes_left;
+	bytes_left -= count;
+
+	if (host->data_dir == DM365_MMC_DATADIR_WRITE)
+		template->src = buf_ptr;
+	else
+		template->dst = buf_ptr;
+	template->ccnt = count >> shift;
+
+	edma_write_slot(channel, template);
+
+	edma_clear_event(channel);
+
+	/*EDMA3SetPaRAM(EDMA0CC0_REG_BASE, channel, template);
+	EDMA3ClrEvt(EDMA0CC0_REG_BASE, channel);
+	EDMA3EnableTransfer(EDMA0CC0_REG_BASE, channel, EDMA3_TRIG_MODE_EVENT);*/
+	edma_start(channel);
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_start_dma_transfer()
+** 功能描述: 开始DMA传输
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         data			->	DMA传送数据结构句柄
+**
+** 输 出: DMA传输字节数
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static int mmc_dm365_start_dma_transfer(struct mmc_dm365_host *host, struct rt_mmcsd_data *data)
+{
+	/* set initial value */
+	host->use_dma_buffer = RT_FALSE;
+
+	if (!(data->flags & DATA_DIR_READ))
+	{
+		if ((rt_uint32_t)data->buf & (RT_ALIGN_SIZE - 1))
+		{
+			/* not align to basic size, use DMA buffer */
+			host->use_dma_buffer = RT_TRUE;
+			memcpy((void*)MMU_NOCACHE_ADDR(host->dma_buffer), data->buf, data->blks * data->blksize);
+		}
+		else
+		{
+			rt_uint32_t addr;
+			addr = ((rt_uint32_t)data->buf & ~(CACHE_LINE_SIZE - 1));
+			/* write data case, always clean DCache */
+			mmu_clean_dcache(addr, (data->blks + 1)* data->blksize);
+		}
+	}
+	else
+	{
+		/* whether align to cache line in read operation */
+		if (((rt_uint32_t)data->buf) & (CACHE_LINE_SIZE - 1))
+			host->use_dma_buffer = RT_TRUE;
+		else
+			mmu_invalidate_dcache((rt_uint32_t)data->buf, data->blks * data->blksize);
+	}
+
+	host->do_dma = RT_TRUE;
+	mmc_dm365_send_dma_request(host, data);
+
+	return 0;
+}
+
+#if 0
+/*******************************************************************************************************
+** 函数名称: acquire_dma_channels()
+** 功能描述: 获取DMA channel
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**
+** 输 出: DMA 通道号
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static int acquire_dma_channels(struct mmc_dm365_host *host)
+{
+	int r;
+
+	/* Acquire master DMA write channel */
+	r = EDMA3RequestChannel(EDMA0CC0_REG_BASE, EDMA3_CHANNEL_TYPE_DMA, host->txdma, host->txdma, EVT_QUEUE_NUM);
+	if (r < 0) 
+	{
+		rt_kprintf("alloc %s channel err %d\n", "tx", r);
+		return r;
+	}
+	mmc_dm365_dma_setup(host, RT_TRUE, &host->tx_template);
+
+	/* Acquire master DMA read channel */
+	r = EDMA3RequestChannel(EDMA0CC0_REG_BASE, EDMA3_CHANNEL_TYPE_DMA, host->rxdma, host->rxdma, EVT_QUEUE_NUM);
+	if (r < 0) 
+	{
+		rt_kprintf("alloc %s channel err %d\n", "rx", r);
+		goto free_master_write;
+	}
+	mmc_dm365_dma_setup(host, RT_FALSE, &host->rx_template);
+
+	return 0;
+
+free_master_write:
+	EDMA3FreeChannel(EDMA0CC0_REG_BASE, EDMA3_CHANNEL_TYPE_DMA, host->txdma, EDMA3_TRIG_MODE_EVENT, host->txdma, EVT_QUEUE_NUM);
+
+	return r;
+}
+#endif
+static int acquire_dma_channels(struct mmc_dm365_host *host)
+{
+	//u32 link_size;
+	int r, i;
+
+	/* Acquire master DMA write channel */
+	r = edma_alloc_channel(host->txdma, mmc_dm365_dma_cb, host,
+			EVENTQ_DEFAULT);
+	if (r < 0) {
+		mmc_dbg("alloc %s channel err %d\n",
+				"tx", r);
+		return r;
+	}
+	mmc_dm365_dma_setup(host, RT_TRUE, &host->tx_template);
+
+	/* Acquire master DMA read channel */
+	r = edma_alloc_channel(host->rxdma, mmc_dm365_dma_cb, host,
+			EVENTQ_DEFAULT);
+	if (r < 0) {
+		mmc_dbg("alloc %s channel err %d\n",
+				"rx", r);
+		goto free_master_write;
+	}
+	mmc_dm365_dma_setup(host, RT_FALSE, &host->rx_template);
+
+	/* Allocate parameter RAM slots, which will later be bound to a
+	 * channel as needed to handle a scatterlist.
+	 */
+#if 0
+	link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
+	for (i = 0; i < link_size; i++) {
+		r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
+		if (r < 0) {
+			mmc_dbg("dma PaRAM alloc --> %d\n",
+				r);
+			break;
+		}
+		host->links[i] = r;
+	}
+	host->n_link = i;
+#endif
+
+	return 0;
+
+free_master_write:
+	edma_free_channel(host->txdma);
+
+	return r;
+}
+
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_prepare_data()
+** 功能描述: 准备 DMA 数据
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         req			->	SD request 结构句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_prepare_data(struct mmc_dm365_host *host, struct rt_mmcsd_req *req)
+{
+	int timeout;
+	int fifo_lev;
+	struct rt_mmcsd_data *data = req->data;
+
+	fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
+
+	host->data = data;
+	if (data == RT_NULL) 
+	{
+		host->data_dir = DM365_MMC_DATADIR_NONE;
+		host->mmcsd_regs->MMCBLEN = 0;
+		host->mmcsd_regs->MMCNBLK = 0;
+		return;
+	}
+
+	mmc_dbg("%s %s, %d blocks of %d bytes\n",
+		(data->flags & DATA_STREAM) ? "stream" : "block",
+		(data->flags & DATA_DIR_WRITE) ? "write" : "read",
+		data->blks, data->blksize);
+	mmc_dbg("  DTO %d cycles + %d ns\n",
+		data->timeout_clks, data->timeout_ns);
+	timeout = data->timeout_clks + (data->timeout_ns / host->ns_in_one_cycle);
+	if (timeout > 0xffff)
+		timeout = 0xffff;
+
+	host->mmcsd_regs->MMCTOD = timeout;
+	host->mmcsd_regs->MMCNBLK = data->blks;
+	host->mmcsd_regs->MMCBLEN = data->blksize;
+	
+	/* Configure the FIFO */
+	switch (data->flags & DATA_DIR_WRITE) 
+	{
+	case DATA_DIR_WRITE:
+		host->data_dir = DM365_MMC_DATADIR_WRITE;
+		host->mmcsd_regs->MMCFIFOCTL = fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST;
+		host->mmcsd_regs->MMCFIFOCTL = fifo_lev | MMCFIFOCTL_FIFODIR_WR;
+		break;
+
+	default:
+		host->data_dir = DM365_MMC_DATADIR_READ;
+		host->mmcsd_regs->MMCFIFOCTL = fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST;
+		host->mmcsd_regs->MMCFIFOCTL = fifo_lev | MMCFIFOCTL_FIFODIR_RD;
+		break;
+	}
+
+	host->buffer = RT_NULL;
+	host->bytes_left = data->blks * data->blksize;
+
+	/* For now we try to use DMA whenever we won't need partial FIFO
+	 * reads or writes, either for the whole transfer (as tested here)
+	 * or for any individual scatterlist segment (tested when we call
+	 * start_dma_transfer).
+	 *
+	 * While we *could* change that, unusual block sizes are rarely
+	 * used.  The occasional fallback to PIO should't hurt.
+	 */
+	if ((host->use_dma == RT_TRUE) && (host->bytes_left & (rw_threshold - 1)) == 0 &&
+			mmc_dm365_start_dma_transfer(host, data) == 0)
+	{
+		/* zero this to ensure we take no PIO paths */
+		host->bytes_left = 0;
+	} 
+	else 
+	{
+		/* Revert to CPU Copy */
+		host->buffer = (rt_uint8_t*)req->data->buf;
+	}
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_request()
+** 功能描述: 此函数实现SD request操作
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         req			->	SD request 结构句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_request(struct rt_mmcsd_host *mmc, struct rt_mmcsd_req *req)
+{
+	struct mmc_dm365_host *host = mmc_priv(mmc);
+	unsigned long timeout = rt_tick_get() + 900;
+	rt_uint32_t mmcst1 = 0;
+
+	/* Card may still be sending BUSY after a previous operation,
+	 * typically some kind of write.  If so, we can't proceed yet.
+	 */
+	while (rt_tick_get() < timeout) 
+    {
+		mmcst1  = host->mmcsd_regs->MMCST1;
+		if (!(mmcst1 & MMCST1_BUSY))
+			break;
+	}
+	if (mmcst1 & MMCST1_BUSY) 
+    {
+		mmc_dbg("still BUSY? bad ... \n");
+		req->cmd->err = -RT_ETIMEOUT;
+		mmc_request_done(mmc, req);
+		return;
+	}
+
+	host->do_dma = RT_FALSE;
+	mmc_dm365_prepare_data(host, req);
+	mmc_dm365_start_command(host, req->cmd);
+}
+
+static void mmc_dm365_enable_sdio_irq(struct rt_mmcsd_host *mmc, rt_int32_t enable)
+{
+	struct mmc_dm365_host *host = mmc_priv(mmc);
+
+	if (enable) 
+	{
+		if (!(host->mmcsd_regs->SDIOST0 & SDIOST0_DAT1_HI)) 
+		{
+			host->mmcsd_regs->SDIOIST = SDIOIST_IOINT;
+			sdio_irq_wakeup(host->mmc);
+		} 
+		else 
+		{
+			host->sdio_int = RT_TRUE;
+			host->mmcsd_regs->SDIOIEN |= SDIOIEN_IOINTEN;
+		}
+	} 
+	else 
+	{
+		host->sdio_int = RT_FALSE;
+		host->mmcsd_regs->SDIOIEN &= ~SDIOIEN_IOINTEN;
+	}
+}
+
+
+static const struct rt_mmcsd_host_ops mmc_dm365_ops = 
+{
+	mmc_dm365_request,
+	mmc_dm365_set_ios,
+    RT_NULL,
+    mmc_dm365_enable_sdio_irq
+};
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_reset_ctrl()
+** 功能描述: 此函数用于reset mmc控制器
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         val			->	判断做reset还是enable
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_reset_ctrl(struct mmc_dm365_host *host, int val)
+{
+	rt_uint32_t temp;
+
+	temp = host->mmcsd_regs->MMCCTL;
+	
+	if (val)	/* reset */
+		temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
+	else		/* enable */
+		temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
+
+	host->mmcsd_regs->MMCCTL = temp;
+
+	delay_us(10);
+}
+
+/*******************************************************************************************************
+** 函数名称: init_mmcsd_host()
+** 功能描述: 此函数用于初始化DM365 MMCSD控制器
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void init_mmcsd_host(struct mmc_dm365_host *host)
+{
+	mmc_dm365_reset_ctrl(host, 1);
+
+	host->mmcsd_regs->MMCCLK = 0;
+	host->mmcsd_regs->MMCCLK = MMCCLK_CLKEN;
+
+	host->mmcsd_regs->MMCTOR = 0x1FFF;
+	host->mmcsd_regs->MMCTOD = 0xFFFF;
+	
+	mmc_dm365_reset_ctrl(host, 0);
+}
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_cmd_done()
+** 功能描述: 结束SD 命令后调用此函数
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         cmd			->	SD 命令结构句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_cmd_done(struct mmc_dm365_host *host, struct rt_mmcsd_cmd *cmd)
+{
+	host->cmd = RT_NULL;
+
+	if (resp_type(cmd) != RESP_NONE) 
+	{
+		if (resp_type(cmd) == RESP_R2) 
+		{
+			/* response type 2 */
+			cmd->resp[3] = host->mmcsd_regs->MMCRSP01;
+			cmd->resp[2] = host->mmcsd_regs->MMCRSP23;
+			cmd->resp[1] = host->mmcsd_regs->MMCRSP45;
+			cmd->resp[0] = host->mmcsd_regs->MMCRSP67;
+		} 
+		else 
+		{
+			/* response types 1, 1b, 3, 4, 5, 6 */
+			cmd->resp[0] = host->mmcsd_regs->MMCRSP67;
+		}
+	}
+
+	if (host->data == RT_NULL || cmd->err) 
+	{
+		if (cmd->err == -RT_ETIMEOUT)
+			cmd->mrq->cmd->retries = 0;
+		mmc_request_done(host->mmc, cmd->mrq);
+		host->mmcsd_regs->MMCIM = 0;
+	}
+}
+
+/*******************************************************************************************************
+** 函数名称: dm365_abort_data()
+** 功能描述: 此函数用于终止数据传输
+** 
+** 输 入: host			->	DM365 mmc host 句柄 
+**         data			->	data 结构句柄
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void dm365_abort_data(struct mmc_dm365_host *host, struct rt_mmcsd_data *data)
+{
+	mmc_dm365_reset_ctrl(host, 1);
+	mmc_dm365_reset_ctrl(host, 0);
+}
+
+static void mmc_dm365_sdio_irq(int irq, void *param)
+{
+	struct mmc_dm365_host *host = (struct mmc_dm365_host *)param;
+	rt_uint32_t status;
+
+	status = host->mmcsd_regs->SDIOIST;//readl(host->base + DAVINCI_SDIOIST);
+	if (status & SDIOIST_IOINT) {
+		mmc_dbg("SDIO interrupt status %x\n", status);
+		//writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+		host->mmcsd_regs->SDIOIST = status | SDIOIST_IOINT;
+		sdio_irq_wakeup(host->mmc);
+	}
+}
+
+
+/*******************************************************************************************************
+** 函数名称: mmc_dm365_irq()
+** 功能描述: MMCSD的中断处理程序
+** 
+** 输 入: irq	->中断向量号
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void mmc_dm365_irq(int irq, void *param)
+{
+	struct mmc_dm365_host *host = (struct mmc_dm365_host *)param;
+	rt_uint32_t status, qstatus;
+	int end_command = 0;
+	int end_transfer = 0;
+	struct rt_mmcsd_data *data = host->data;
+
+	if (host->cmd == RT_NULL && host->data == RT_NULL) 
+	{
+		status = host->mmcsd_regs->MMCST0;
+		mmc_dbg("Spurious interrupt 0x%04x\n", status);
+		/* Disable the interrupt from mmcsd */
+		host->mmcsd_regs->MMCIM = 0;
+		return;
+	}
+
+	status = host->mmcsd_regs->MMCST0;
+	qstatus = status;
+	
+	/* handle FIFO first when using PIO for data.
+	 * bytes_left will decrease to zero as I/O progress and status will
+	 * read zero over iteration because this controller status
+	 * register(MMCST0) reports any status only once and it is cleared
+	 * by read. So, it is not unbouned loop even in the case of
+	 * non-dma.
+	 */
+	while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) 
+	{
+		dm365_fifo_data_trans(host, rw_threshold);
+		status = host->mmcsd_regs->MMCST0;
+		if (!status)
+			break;
+		qstatus |= status;
+	}
+
+	if (qstatus & MMCST0_DATDNE) 
+	{
+		/* All blocks sent/received, and CRC checks passed */
+		if (data != RT_NULL) 
+		{
+			if ((host->do_dma == RT_FALSE) && (host->bytes_left > 0))
+			{
+				/* if datasize < rw_threshold
+				 * no RX ints are generated
+				 */
+				rt_kprintf("to do! host->bytes_left=0x%x\n", host->bytes_left);
+				dm365_fifo_data_trans(host, host->bytes_left);
+			}
+			end_transfer = 1;
+			data->bytes_xfered = data->blks* data->blksize;
+		} 
+		else 
+		{
+			mmc_dbg("DATDNE with no host->data\n");
+		}
+	}
+
+	if (qstatus & MMCST0_TOUTRD) 
+	{
+		/* Read data timeout */
+		data->err = -RT_ETIMEOUT;
+		end_transfer = 1;
+
+		mmc_dbg("read data timeout, status %x\n", qstatus);
+		rt_kprintf("read data timeout, status %x\n", qstatus);
+
+		dm365_abort_data(host, data);
+	}
+
+	if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) 
+	{
+		/* Data CRC error */
+		data->err = -RT_ERROR;
+		end_transfer = 1;
+
+		/* NOTE:  this controller uses CRCWR to report both CRC
+		 * errors and timeouts (on writes).  MMCDRSP values are
+		 * only weakly documented, but 0x9f was clearly a timeout
+		 * case and the two three-bit patterns in various SD specs
+		 * (101, 010) aren't part of it ...
+		 */
+		if (qstatus & MMCST0_CRCWR) 
+		{
+			rt_uint32_t temp = host->mmcsd_regs->MMCDRSP;
+
+			if (temp == 0x9f)
+				data->err = -RT_ETIMEOUT;
+		}
+		mmc_dbg("data %s %s error\n", (qstatus & MMCST0_CRCWR) ? "write" : "read", (data->err == -110) ? "timeout" : "CRC");
+
+		rt_kprintf("data %s %s error\n", (qstatus & MMCST0_CRCWR) ? "write" : "read", (data->err == -110) ? "timeout" : "CRC");
+
+		dm365_abort_data(host, data);
+	}
+
+	if (qstatus & MMCST0_TOUTRS) 
+	{
+		/* Command timeout */
+		if (host->cmd) 
+		{
+			mmc_dbg("CMD%d timeout, status %x\n", host->cmd->cmd_code, qstatus);
+			host->cmd->err = -RT_ETIMEOUT;
+			if (data) 
+			{
+				end_transfer = 1;
+				dm365_abort_data(host, data);
+			}
+			else
+				end_command = 1;
+		}
+	}
+
+	if (qstatus & MMCST0_CRCRS) 
+	{
+		/* Command CRC error */
+		mmc_dbg("Command CRC error\n");
+		if (host->cmd) 
+		{
+			host->cmd->err = -RT_ERROR;
+			end_command = 1;
+		}
+	}
+
+	if (qstatus & MMCST0_RSPDNE) 
+	{
+		/* End of command phase */
+		end_command = (int) host->cmd;
+	}
+
+	if (end_command)
+		mmc_dm365_cmd_done(host, host->cmd);
+	if (end_transfer)
+		mmc_dm365_xfer_done(host, data);
+
+	return;
+}
+#if 0
+
+/*******************************************************************************************************
+** 函数名称: rt_hw_edma_init()
+** 功能描述: 此函数用于初始化EDMA3
+** 
+** 输 入: 无
+**
+** 输 出: 无
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+static void rt_hw_edma_init(void)
+{
+	psc_transition(PSC0, DOMAIN0, LPSC_TPCC, PSC_ENABLE);
+	psc_transition(PSC0, DOMAIN0, LPSC_TPTC0, PSC_ENABLE);
+
+    /* Initialization of EDMA3 */    
+    edma3_init(EDMA0CC0_REG_BASE, EVT_QUEUE_NUM);
+   
+    /* Register EDMA3 Interrupts */
+//	ConfigureAINTCIntEDMA3();
+}
+#endif
+/*******************************************************************************************************
+** 函数名称: rt_hw_mmcsd_init()
+** 功能描述: 此函数用于初始化MMC驱动模块
+** 
+** 输 入: 无
+**
+** 输 出: 如果初始化成功,返回0;如果初始化失败,返回-RT_ENOMEM
+**         
+** 全局变量: 
+** 调用模块: 无
+**
+********************************************************************************************************/
+rt_int32_t rt_hw_mmcsd_init(void)
+{
+	struct clk  *clk;
+	struct mmc_dm365_host *dm365_host;
+	struct rt_mmcsd_host *mmc = RT_NULL;
+
+	mmc = mmcsd_alloc_host();
+	if (!mmc) 
+	{
+		mmc_dbg("alloc mmc failed\n");
+		return -RT_ERROR;
+	}
+
+	dm365_host = rt_malloc(sizeof(struct mmc_dm365_host));
+	if (!dm365_host) 
+	{
+		mmc_dbg("alloc mci failed\n");
+		goto err;
+	}
+
+	rt_memset(dm365_host, 0, sizeof(struct mmc_dm365_host));
+
+#ifdef RT_USING_MMCSD0
+	//psc_transition(PSC0, DOMAIN0, LPSC_MMCSD0, PSC_ENABLE);
+	//pinmux_config(PINMUX_MMCSD0_REG, PINMUX_MMCSD0_MASK, PINMUX_MMCSD0_VAL);
+	psc_change_state(DAVINCI_DM365_LPSC_MMC_SD0, PSC_ENABLE);
+	dm365_host->mmcsd_regs = (mmcsd_regs_t *)DM365_MMC_SD0_BASE;
+#else
+#ifdef RT_USING_MMCSD1
+	psc_transition(PSC1, DOMAIN0, LPSC_MMCSD1, PSC_ENABLE);
+	pinmux_config(PINMUX_MMCSD1_REG, PINMUX_MMCSD1_MASK, PINMUX_MMCSD1_VAL);
+	dm365_host->mmcsd_regs = MMCSD1;
+#endif
+#endif
+
+	//rt_hw_edma_init();
+
+	clk = clk_get("MMCSDCLK0");
+	dm365_host->mmc_input_clk = clk_get_rate(clk);
+	dm365_host->rxdma = DM365_DMA_MMC0RXEVT;
+	dm365_host->txdma = DM365_DMA_MMC0TXEVT;
+	dm365_host->use_dma = use_dma;
+	if ((dm365_host->use_dma == RT_TRUE)&& acquire_dma_channels(dm365_host) != 0)
+	{
+		dm365_host->use_dma = RT_FALSE;
+	}
+	else
+	{
+		dm365_host->dma_buffer = (rt_uint8_t*)rt_malloc_align(64*1024, 32);
+		if (dm365_host->dma_buffer == RT_NULL)
+			dm365_host->use_dma = RT_FALSE;
+	}
+
+	mmc->ops = &mmc_dm365_ops;
+	mmc->freq_min = 312500;
+	mmc->freq_max = 25000000;
+	mmc->valid_ocr = VDD_32_33 | VDD_33_34;
+	mmc->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE;
+	mmc->flags |= MMCSD_SUP_SDIO_IRQ;
+
+	dm365_host->mmc = mmc;
+	mmc->private_data = dm365_host;
+
+	/* install interrupt */
+#ifdef RT_USING_MMCSD0
+	rt_hw_interrupt_install(IRQ_DM3XX_MMCINT0, mmc_dm365_irq, 
+							(void *)dm365_host, "MMC0");
+	rt_hw_interrupt_umask(IRQ_DM3XX_MMCINT0);
+	rt_hw_interrupt_install(IRQ_DM3XX_SDIOINT0, mmc_dm365_sdio_irq, 
+							(void *)dm365_host, "SDIO0");
+	rt_hw_interrupt_umask(IRQ_DM3XX_SDIOINT0);
+#endif
+#ifdef RT_USING_MMCSD1
+	rt_hw_interrupt_install(MMCSD_INT1, mmc_dm365_irq, 
+							(void *)dm365_host, "MMC1");
+	rt_hw_interrupt_umask(MMCSD_INT1);
+#endif
+
+	init_mmcsd_host(dm365_host);
+
+	mmcsd_change(mmc);
+
+	return 0;
+
+err:
+	mmcsd_free_host(mmc);
+
+	return -RT_ENOMEM;
+}

+ 122 - 0
bsp/dm365/drivers/mmcsd.h

@@ -0,0 +1,122 @@
+
+#ifndef __DAVINCI_MMC_H__
+#define __DAVINCI_MMC_H__
+
+/* DAVINCI_MMCCTL definitions */
+#define MMCCTL_DATRST         (1 << 0)
+#define MMCCTL_CMDRST         (1 << 1)
+#define MMCCTL_WIDTH_8_BIT    (1 << 8)
+#define MMCCTL_WIDTH_4_BIT    (1 << 2)
+#define MMCCTL_DATEG_DISABLED (0 << 6)
+#define MMCCTL_DATEG_RISING   (1 << 6)
+#define MMCCTL_DATEG_FALLING  (2 << 6)
+#define MMCCTL_DATEG_BOTH     (3 << 6)
+#define MMCCTL_PERMDR_LE      (0 << 9)
+#define MMCCTL_PERMDR_BE      (1 << 9)
+#define MMCCTL_PERMDX_LE      (0 << 10)
+#define MMCCTL_PERMDX_BE      (1 << 10)
+
+/* DAVINCI_MMCCLK definitions */
+#define MMCCLK_CLKEN          (1 << 8)
+#define MMCCLK_CLKRT_MASK     (0xFF << 0)
+
+/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
+#define MMCST0_DATDNE         (1 << 0)	/* data done */
+#define MMCST0_BSYDNE         (1 << 1)	/* busy done */
+#define MMCST0_RSPDNE         (1 << 2)	/* command done */
+#define MMCST0_TOUTRD         (1 << 3)	/* data read timeout */
+#define MMCST0_TOUTRS         (1 << 4)	/* command response timeout */
+#define MMCST0_CRCWR          (1 << 5)	/* data write CRC error */
+#define MMCST0_CRCRD          (1 << 6)	/* data read CRC error */
+#define MMCST0_CRCRS          (1 << 7)	/* command response CRC error */
+#define MMCST0_DXRDY          (1 << 9)	/* data transmit ready (fifo empty) */
+#define MMCST0_DRRDY          (1 << 10)	/* data receive ready (data in fifo)*/
+#define MMCST0_DATED          (1 << 11)	/* DAT3 edge detect */
+#define MMCST0_TRNDNE         (1 << 12)	/* transfer done */
+
+/* DAVINCI_MMCST1 definitions */
+#define MMCST1_BUSY           (1 << 0)
+
+/* DAVINCI_MMCCMD definitions */
+#define MMCCMD_CMD_MASK       (0x3F << 0)
+#define MMCCMD_PPLEN          (1 << 7)
+#define MMCCMD_BSYEXP         (1 << 8)
+#define MMCCMD_RSPFMT_MASK    (3 << 9)
+#define MMCCMD_RSPFMT_NONE    (0 << 9)
+#define MMCCMD_RSPFMT_R1456   (1 << 9)
+#define MMCCMD_RSPFMT_R2      (2 << 9)
+#define MMCCMD_RSPFMT_R3      (3 << 9)
+#define MMCCMD_DTRW           (1 << 11)
+#define MMCCMD_STRMTP         (1 << 12)
+#define MMCCMD_WDATX          (1 << 13)
+#define MMCCMD_INITCK         (1 << 14)
+#define MMCCMD_DCLR           (1 << 15)
+#define MMCCMD_DMATRIG        (1 << 16)
+
+/* DAVINCI_MMCFIFOCTL definitions */
+#define MMCFIFOCTL_FIFORST    (1 << 0)
+#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
+#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
+#define MMCFIFOCTL_FIFOLEV    (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
+#define MMCFIFOCTL_ACCWD_4    (0 << 3) /* access width of 4 bytes    */
+#define MMCFIFOCTL_ACCWD_3    (1 << 3) /* access width of 3 bytes    */
+#define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
+#define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
+
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI       (1 << 0)
+#define SDIOST0_INTPRD        (1 << 1)
+#define SDIOST0_RDWTST        (1 << 2)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN       (1 << 0)
+#define SDIOIEN_RWSEN         (1 << 1)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT         (1 << 0)
+#define SDIOIST_RWS           (1 << 1)
+
+/* MMCSD Init clock in Hz in opendrain mode */
+#define MMCSD_INIT_CLOCK		200000
+
+#define MAX_CCNT	((1 << 16) - 1)
+
+#define MAX_NR_SG	16
+
+#define MMC_DATA_WRITE	(1 << 8)
+#define MMC_DATA_READ	(1 << 9)
+#define MMC_DATA_STREAM	(1 << 10)
+
+typedef struct {
+	volatile rt_uint32_t MMCCTL;
+	volatile rt_uint32_t MMCCLK;
+	volatile rt_uint32_t MMCST0;
+	volatile rt_uint32_t MMCST1;
+	volatile rt_uint32_t MMCIM;
+	volatile rt_uint32_t MMCTOR;
+	volatile rt_uint32_t MMCTOD;
+	volatile rt_uint32_t MMCBLEN;
+	volatile rt_uint32_t MMCNBLK;
+	volatile rt_uint32_t MMCNBLC;
+	volatile rt_uint32_t MMCDRR;
+	volatile rt_uint32_t MMCDXR;
+	volatile rt_uint32_t MMCCMD;
+	volatile rt_uint32_t MMCARGHL;
+	volatile rt_uint32_t MMCRSP01;
+	volatile rt_uint32_t MMCRSP23;
+	volatile rt_uint32_t MMCRSP45;
+	volatile rt_uint32_t MMCRSP67;
+	volatile rt_uint32_t MMCDRSP;
+	volatile rt_uint32_t reserved0;
+	volatile rt_uint32_t MMCCIDX;
+	volatile rt_uint32_t reserved1[4];
+	volatile rt_uint32_t SDIOCTL;
+	volatile rt_uint32_t SDIOST0;
+	volatile rt_uint32_t SDIOIEN;
+	volatile rt_uint32_t SDIOIST;
+	volatile rt_uint32_t MMCFIFOCTL;
+}mmcsd_regs_t;
+
+extern rt_int32_t rt_hw_mmcsd_init(void);
+
+#endif

+ 939 - 0
bsp/dm365/drivers/spi-davinci.c

@@ -0,0 +1,939 @@
+#include <rthw.h>
+#include <rtthread.h>
+#include <rtdevice.h>
+#include <dm36x.h>
+#include <edma.h>
+#include "spi-davinci.h"
+
+#define unlikely(x) x
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+#define cpu_relax() barrier()
+
+#define SPI_DEBUG			0	
+#if SPI_DEBUG
+#define spi_dbg(dev, fmt, ...) \
+	do { \
+		rt_kprintf("%s:", dev->parent.name); \
+		rt_kprintf(fmt, ##__VA_ARGS__); \
+	} while(0)
+#else
+#define spi_dbg(dev, fmt, ...)
+#endif
+
+#define SZ_64K 0x10000
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define SPI_NO_RESOURCE		((resource_size_t)-1)
+
+#define SPI_MAX_CHIPSELECT	2
+
+#define CS_DEFAULT	0xFF
+
+#define __iomem
+#define BIT(nr)			(1UL << (nr))
+
+#define SPIFMT_PHASE_MASK	BIT(16)
+#define SPIFMT_POLARITY_MASK	BIT(17)
+#define SPIFMT_DISTIMER_MASK	BIT(18)
+#define SPIFMT_SHIFTDIR_MASK	BIT(20)
+#define SPIFMT_WAITENA_MASK	BIT(21)
+#define SPIFMT_PARITYENA_MASK	BIT(22)
+#define SPIFMT_ODD_PARITY_MASK	BIT(23)
+#define SPIFMT_WDELAY_MASK	0x3f000000u
+#define SPIFMT_WDELAY_SHIFT	24
+#define SPIFMT_PRESCALE_SHIFT	8
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK	BIT(11)		/* MISO */
+#define SPIPC0_DOFUN_MASK	BIT(10)		/* MOSI */
+#define SPIPC0_CLKFUN_MASK	BIT(9)		/* CLK */
+#define SPIPC0_SPIENA_MASK	BIT(8)		/* nREADY */
+
+#define SPIINT_MASKALL		0x0101035F
+#define SPIINT_MASKINT		0x0000015F
+#define SPI_INTLVL_1		0x000001FF
+#define SPI_INTLVL_0		0x00000000
+
+/* SPIDAT1 (upper 16 bit defines) */
+#define SPIDAT1_CSHOLD_MASK	BIT(12)
+
+/* SPIGCR1 */
+#define SPIGCR1_CLKMOD_MASK	BIT(1)
+#define SPIGCR1_MASTER_MASK     BIT(0)
+#define SPIGCR1_POWERDOWN_MASK	BIT(8)
+#define SPIGCR1_LOOPBACK_MASK	BIT(16)
+#define SPIGCR1_SPIENA_MASK	BIT(24)
+
+/* SPIBUF */
+#define SPIBUF_TXFULL_MASK	BIT(29)
+#define SPIBUF_RXEMPTY_MASK	BIT(31)
+
+/* SPIDELAY */
+#define SPIDELAY_C2TDELAY_SHIFT 24
+#define SPIDELAY_C2TDELAY_MASK  (0xFF << SPIDELAY_C2TDELAY_SHIFT)
+#define SPIDELAY_T2CDELAY_SHIFT 16
+#define SPIDELAY_T2CDELAY_MASK  (0xFF << SPIDELAY_T2CDELAY_SHIFT)
+#define SPIDELAY_T2EDELAY_SHIFT 8
+#define SPIDELAY_T2EDELAY_MASK  (0xFF << SPIDELAY_T2EDELAY_SHIFT)
+#define SPIDELAY_C2EDELAY_SHIFT 0
+#define SPIDELAY_C2EDELAY_MASK  0xFF
+
+/* Error Masks */
+#define SPIFLG_DLEN_ERR_MASK		BIT(0)
+#define SPIFLG_TIMEOUT_MASK		BIT(1)
+#define SPIFLG_PARERR_MASK		BIT(2)
+#define SPIFLG_DESYNC_MASK		BIT(3)
+#define SPIFLG_BITERR_MASK		BIT(4)
+#define SPIFLG_OVRRUN_MASK		BIT(6)
+#define SPIFLG_BUF_INIT_ACTIVE_MASK	BIT(24)
+#define SPIFLG_ERROR_MASK		(SPIFLG_DLEN_ERR_MASK \
+				| SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
+				| SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
+				| SPIFLG_OVRRUN_MASK)
+
+#define SPIINT_DMA_REQ_EN	BIT(16)
+
+/* SPI Controller registers */
+#define SPIGCR0		0x00
+#define SPIGCR1		0x04
+#define SPIINT		0x08
+#define SPILVL		0x0c
+#define SPIFLG		0x10
+#define SPIPC0		0x14
+#define SPIDAT1		0x3c
+#define SPIBUF		0x40
+#define SPIDELAY	0x48
+#define SPIDEF		0x4c
+#define SPIFMT0		0x50
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct davinci_spi_dma {
+	int			tx_channel;
+	int			rx_channel;
+	int			dummy_param_slot;
+	enum dma_event_q	eventq;
+};
+
+/* SPI Controller driver's private data. */
+struct davinci_spi {
+	struct rt_spi_bus parent;
+	struct clk		*clk;
+
+	u8			version;
+	void __iomem		*base;
+	u32			irq;
+	struct rt_completion	done;
+
+	const void		*tx;
+	void			*rx;
+#define SMP_CACHE_BYTES 32
+#define SPI_TMP_BUFSZ	(SMP_CACHE_BYTES + 1)
+	u8			rx_tmp_buf[SPI_TMP_BUFSZ];
+	int			rcount;
+	int			wcount;
+	struct davinci_spi_dma	dma;
+
+	void			(*get_rx)(u32 rx_data, struct davinci_spi *);
+	u32			(*get_tx)(struct davinci_spi *);
+
+	u8			bytes_per_word[SPI_MAX_CHIPSELECT];
+	u8			chip_sel[SPI_MAX_CHIPSELECT];
+	struct davinci_spi_config *controller_data;
+	int			cshold_bug;
+};
+
+static struct davinci_spi_config davinci_spi_default_cfg;
+
+extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
+extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
+
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
+{
+	if (dspi->rx) {
+		u8 *rx = dspi->rx;
+		*rx++ = (u8)data;
+		dspi->rx = rx;
+	}
+}
+
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
+{
+	if (dspi->rx) {
+		u16 *rx = dspi->rx;
+		*rx++ = (u16)data;
+		dspi->rx = rx;
+	}
+}
+
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
+{
+	u32 data = 0;
+	if (dspi->tx) {
+		const u8 *tx = dspi->tx;
+		data = *tx++;
+		dspi->tx = tx;
+	}
+	return data;
+}
+
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
+{
+	u32 data = 0;
+	if (dspi->tx) {
+		const u16 *tx = dspi->tx;
+		data = *tx++;
+		dspi->tx = tx;
+	}
+	return data;
+}
+
+static inline void set_io_bits(void __iomem *addr, u32 bits)
+{
+	u32 v = readl(addr);
+
+	v |= bits;
+	writel(v, addr);
+}
+
+static inline void clear_io_bits(void __iomem *addr, u32 bits)
+{
+	u32 v = readl(addr);
+
+	v &= ~bits;
+	writel(v, addr);
+}
+
+/*
+ * Interface to control the chip select signal
+ */
+static void davinci_spi_chipselect(struct rt_spi_device *spi, int value)
+{
+	struct davinci_spi *dspi;
+	u8 chip_sel = (u8)spi->parent.user_data;
+	u16 spidat1 = CS_DEFAULT;
+	bool gpio_chipsel = RT_FALSE;
+
+	dspi = spi->bus->parent.user_data;
+
+	if (chip_sel < SPI_MAX_CHIPSELECT &&
+				dspi->chip_sel[chip_sel] != SPI_INTERN_CS)
+		gpio_chipsel = RT_TRUE;
+
+	/*
+	 * Board specific chip select logic decides the polarity and cs
+	 * line for the controller
+	 */
+	if (gpio_chipsel) {
+		if (value == 0)
+			gpio_set_value(dspi->chip_sel[chip_sel], 0);
+		else
+			gpio_set_value(dspi->chip_sel[chip_sel], 1);
+	} else {
+		spidat1 = readw(dspi->base + SPIDAT1 + 2);
+		if (value == 0) {
+			spidat1 |= SPIDAT1_CSHOLD_MASK;
+			spidat1 &= ~(0x1 << chip_sel);
+		} else {
+			spidat1 &= ~SPIDAT1_CSHOLD_MASK;
+			spidat1 |= 0x03;
+		}
+		rt_kprintf("0x%04x\n", spidat1);
+
+		writew(spidat1, dspi->base + SPIDAT1 + 2);
+	}
+}
+
+/**
+ * davinci_spi_get_prescale - Calculates the correct prescale value
+ * @maxspeed_hz: the maximum rate the SPI clock can run at
+ *
+ * This function calculates the prescale value that generates a clock rate
+ * less than or equal to the specified maximum.
+ *
+ * Returns: calculated prescale - 1 for easy programming into SPI registers
+ * or negative error number if valid prescalar cannot be updated.
+ */
+static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
+							u32 max_speed_hz)
+{
+	int ret;
+
+	ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
+
+	if (ret < 3) {
+		rt_kprintf("spi clock freq too high\n");
+		ret = 3;
+	}
+	if (ret > 256) {
+		rt_kprintf("spi clock freq too litter\n");
+		ret = 256;
+	}
+
+	/*if (ret < 3 || ret > 256)
+		return -RT_ERROR;*/
+
+	return ret - 1;
+}
+
+/**
+ * davinci_spi_setup_transfer - This functions will determine transfer method
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function determines data transfer method (8/16/32 bit transfer).
+ * It will also set the SPI Clock Control register according to
+ * SPI slave device freq.
+ */
+static int davinci_spi_setup_transfer(struct rt_spi_device *spi,
+		struct rt_spi_configuration *cfg)
+{
+
+	struct davinci_spi *dspi;
+	struct davinci_spi_config *spicfg;
+	u8 bits_per_word = 0;
+	u32 hz = 0, spifmt = 0, prescale = 0;
+	u8 chip_select = (u8)spi->parent.user_data;
+
+	dspi = spi->bus->parent.user_data;
+
+	bits_per_word = cfg->data_width;
+	hz = cfg->max_hz;
+
+	/*
+	 * Assign function pointer to appropriate transfer method
+	 * 8bit, 16bit or 32bit transfer
+	 */
+	if (bits_per_word <= 8 && bits_per_word >= 2) {
+		dspi->get_rx = davinci_spi_rx_buf_u8;
+		dspi->get_tx = davinci_spi_tx_buf_u8;
+		dspi->bytes_per_word[chip_select] = 1;
+	} else if (bits_per_word <= 16 && bits_per_word >= 2) {
+		dspi->get_rx = davinci_spi_rx_buf_u16;
+		dspi->get_tx = davinci_spi_tx_buf_u16;
+		dspi->bytes_per_word[chip_select] = 2;
+	} else
+		return -RT_ERROR;
+
+	/* Set up SPIFMTn register, unique to this chipselect. */
+
+	prescale = davinci_spi_get_prescale(dspi, hz);
+	if (prescale < 0)
+		return prescale;
+
+	spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
+
+	if (!(cfg->mode & RT_SPI_MSB))
+		spifmt |= SPIFMT_SHIFTDIR_MASK;
+
+	if (cfg->mode & RT_SPI_CPOL)
+		spifmt |= SPIFMT_POLARITY_MASK;
+
+	if (!(cfg->mode & RT_SPI_CPHA))
+		spifmt |= SPIFMT_PHASE_MASK;
+
+	/*
+	 * Version 1 hardware supports two basic SPI modes:
+	 *  - Standard SPI mode uses 4 pins, with chipselect
+	 *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
+	 *	(distinct from SPI_3WIRE, with just one data wire;
+	 *	or similar variants without MOSI or without MISO)
+	 *
+	 * Version 2 hardware supports an optional handshaking signal,
+	 * so it can support two more modes:
+	 *  - 5 pin SPI variant is standard SPI plus SPI_READY
+	 *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
+	 */
+
+	if (dspi->version == SPI_VERSION_2) {
+
+		u32 delay = 0;
+
+		spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
+							& SPIFMT_WDELAY_MASK);
+
+		if (spicfg->odd_parity)
+			spifmt |= SPIFMT_ODD_PARITY_MASK;
+
+		if (spicfg->parity_enable)
+			spifmt |= SPIFMT_PARITYENA_MASK;
+
+		if (spicfg->timer_disable) {
+			spifmt |= SPIFMT_DISTIMER_MASK;
+		} else {
+			delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
+						& SPIDELAY_C2TDELAY_MASK;
+			delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
+						& SPIDELAY_T2CDELAY_MASK;
+		}
+
+		if (cfg->mode & RT_SPI_READY) {
+			spifmt |= SPIFMT_WAITENA_MASK;
+			delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
+						& SPIDELAY_T2EDELAY_MASK;
+			delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
+						& SPIDELAY_C2EDELAY_MASK;
+		}
+
+		writel(delay, dspi->base + SPIDELAY);
+	}
+
+	writel(spifmt, dspi->base + SPIFMT0);
+
+	return 0;
+}
+
+#if 0
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+static int davinci_spi_setup(struct spi_device *spi)
+{
+	int retval = 0;
+	struct davinci_spi *dspi;
+	struct davinci_spi_platform_data *pdata;
+
+	dspi = spi_master_get_devdata(spi->master);
+	pdata = dspi->pdata;
+
+	/* if bits per word length is zero then set it default 8 */
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	if (!(spi->mode & SPI_NO_CS)) {
+		if ((pdata->chip_sel == NULL) ||
+		    (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
+			set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+
+	}
+
+	if (spi->mode & SPI_READY)
+		set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
+
+	if (spi->mode & SPI_LOOP)
+		set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+	else
+		clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+
+	return retval;
+}
+#endif
+
+static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
+{
+	struct rt_device *sdev = &dspi->parent.parent;
+
+	if (int_status & SPIFLG_TIMEOUT_MASK) {
+		spi_dbg(sdev, "SPI Time-out Error\n");
+		return -RT_ETIMEOUT;
+	}
+	if (int_status & SPIFLG_DESYNC_MASK) {
+		spi_dbg(sdev, "SPI Desynchronization Error\n");
+		return -RT_EIO;
+	}
+	if (int_status & SPIFLG_BITERR_MASK) {
+		spi_dbg(sdev, "SPI Bit error\n");
+		return -RT_EIO;
+	}
+
+	if (dspi->version == SPI_VERSION_2) {
+		if (int_status & SPIFLG_DLEN_ERR_MASK) {
+			spi_dbg(sdev, "SPI Data Length Error\n");
+			return -RT_EIO;
+		}
+		if (int_status & SPIFLG_PARERR_MASK) {
+			spi_dbg(sdev, "SPI Parity Error\n");
+			return -RT_EIO;
+		}
+		if (int_status & SPIFLG_OVRRUN_MASK) {
+			spi_dbg(sdev, "SPI Data Overrun error\n");
+			return -RT_EIO;
+		}
+		if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
+			spi_dbg(sdev, "SPI Buffer Init Active\n");
+			return -RT_EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * davinci_spi_process_events - check for and handle any SPI controller events
+ * @dspi: the controller data
+ *
+ * This function will check the SPIFLG register and handle any events that are
+ * detected there
+ */
+static int davinci_spi_process_events(struct davinci_spi *dspi)
+{
+	u32 buf, status, errors = 0, spidat1;
+
+	buf = readl(dspi->base + SPIBUF);
+
+	if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
+		dspi->get_rx(buf & 0xFFFF, dspi);
+		dspi->rcount--;
+	}
+
+	status = readl(dspi->base + SPIFLG);
+
+	if (unlikely(status & SPIFLG_ERROR_MASK)) {
+		errors = status & SPIFLG_ERROR_MASK;
+		goto out;
+	}
+
+	if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
+		spidat1 = readl(dspi->base + SPIDAT1);
+		dspi->wcount--;
+		spidat1 &= ~0xFFFF;
+		spidat1 |= 0xFFFF & dspi->get_tx(dspi);
+		writel(spidat1, dspi->base + SPIDAT1);
+	}
+
+out:
+	return errors;
+}
+
+static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
+{
+	struct davinci_spi *dspi = data;
+	struct davinci_spi_dma *dma = &dspi->dma;
+
+	edma_stop(lch);
+
+	if (status == DMA_COMPLETE) {
+		if (lch == dma->rx_channel)
+			dspi->rcount = 0;
+		if (lch == dma->tx_channel)
+			dspi->wcount = 0;
+	}
+
+	if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
+		rt_completion_done(&dspi->done);
+}
+
+/**
+ * davinci_spi_bufs - functions which will handle transfer data
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function will put data to be transferred into data register
+ * of SPI controller and then wait until the completion will be marked
+ * by the IRQ Handler.
+ */
+static int davinci_spi_bufs(struct rt_spi_device *spi, struct rt_spi_message *msg)
+{
+	struct davinci_spi *dspi;
+	int data_type, ret;
+	u32 tx_data, spidat1;
+	u32 errors = 0;
+	struct davinci_spi_config *spicfg;
+	unsigned rx_buf_count;
+	struct rt_device *sdev;
+	u8 chip_select = (u8)spi->parent.user_data;
+
+	dspi = spi->bus->parent.user_data;
+	spicfg = (struct davinci_spi_config *)dspi->controller_data;
+	if (!spicfg)
+		spicfg = &davinci_spi_default_cfg;
+	sdev = &dspi->parent.parent;
+
+	/* convert len to words based on bits_per_word */
+	data_type = dspi->bytes_per_word[chip_select];
+
+	dspi->tx = msg->send_buf;
+	dspi->rx = msg->recv_buf;
+	dspi->wcount = msg->length / data_type;
+	dspi->rcount = dspi->wcount;
+
+	spidat1 = readl(dspi->base + SPIDAT1);
+
+	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+	rt_completion_init(&(dspi->done));
+
+	if (msg->cs_take)
+		davinci_spi_chipselect(spi, 0);
+
+	if (spicfg->io_type == SPI_IO_TYPE_INTR)
+		set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+
+	if (msg->length > 0) {
+		if (spicfg->io_type != SPI_IO_TYPE_DMA) {
+			/* start the transfer */
+			dspi->wcount--;
+			tx_data = dspi->get_tx(dspi);
+			spidat1 &= 0xFFFF0000;
+			spidat1 |= tx_data & 0xFFFF;
+			writel(spidat1, dspi->base + SPIDAT1);
+		} else {
+			struct davinci_spi_dma *dma;
+			unsigned long tx_reg, rx_reg;
+			struct edmacc_param param;
+			void *rx_buf;
+			int b, c;
+
+			dma = &dspi->dma;
+
+			tx_reg = (unsigned long)dspi->base + SPIDAT1;
+			rx_reg = (unsigned long)dspi->base + SPIBUF;
+
+			/*
+			 * Transmit DMA setup
+			 *
+			 * If there is transmit data, map the transmit buffer, set it
+			 * as the source of data and set the source B index to data
+			 * size. If there is no transmit data, set the transmit register
+			 * as the source of data, and set the source B index to zero.
+			 *
+			 * The destination is always the transmit register itself. And
+			 * the destination never increments.
+			 */
+
+			if (msg->send_buf) {
+				mmu_clean_dcache((rt_uint32_t)msg->send_buf, (rt_uint32_t)msg->length);
+			}
+
+			/*
+			 * If number of words is greater than 65535, then we need
+			 * to configure a 3 dimension transfer.  Use the BCNTRLD
+			 * feature to allow for transfers that aren't even multiples
+			 * of 65535 (or any other possible b size) by first transferring
+			 * the remainder amount then grabbing the next N blocks of
+			 * 65535 words.
+			 */
+
+			c = dspi->wcount / (SZ_64K - 1);	/* N 65535 Blocks */
+			b = dspi->wcount - c * (SZ_64K - 1);	/* Remainder */
+			if (b)
+				c++;
+			else
+				b = SZ_64K - 1;
+
+			param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
+			param.src = msg->send_buf ? msg->send_buf : tx_reg;
+			param.a_b_cnt = b << 16 | data_type;
+			param.dst = tx_reg;
+			param.src_dst_bidx = msg->send_buf ? data_type : 0;
+			param.link_bcntrld = 0xffffffff;
+			param.src_dst_cidx = msg->send_buf ? data_type : 0;
+			param.ccnt = c;
+			edma_write_slot(dma->tx_channel, &param);
+			edma_link(dma->tx_channel, dma->dummy_param_slot);
+
+			/*
+			 * Receive DMA setup
+			 *
+			 * If there is receive buffer, use it to receive data. If there
+			 * is none provided, use a temporary receive buffer. Set the
+			 * destination B index to 0 so effectively only one byte is used
+			 * in the temporary buffer (address does not increment).
+			 *
+			 * The source of receive data is the receive data register. The
+			 * source address never increments.
+			 */
+
+			if (msg->recv_buf) {
+				rx_buf = msg->recv_buf;
+				rx_buf_count = msg->length;
+			} else {
+				rx_buf = dspi->rx_tmp_buf;
+				rx_buf_count = sizeof(dspi->rx_tmp_buf);
+			}
+
+			mmu_invalidate_dcache((rt_uint32_t)rx_buf, (rt_uint32_t)rx_buf_count);
+
+			param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
+			param.src = rx_reg;
+			param.a_b_cnt = b << 16 | data_type;
+			param.dst = rx_buf;
+			param.src_dst_bidx = (msg->recv_buf ? data_type : 0) << 16;
+			param.link_bcntrld = 0xffffffff;
+			param.src_dst_cidx = (msg->recv_buf ? data_type : 0) << 16;
+			param.ccnt = c;
+			edma_write_slot(dma->rx_channel, &param);
+
+			if (dspi->cshold_bug)
+				writew(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
+
+			edma_start(dma->rx_channel);
+			edma_start(dma->tx_channel);
+			set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+		}
+
+		/* Wait for the transfer to complete */
+		if (spicfg->io_type != SPI_IO_TYPE_POLL) {
+			rt_completion_wait(&(dspi->done), RT_WAITING_FOREVER);
+		} else {
+			while (dspi->rcount > 0 || dspi->wcount > 0) {
+				errors = davinci_spi_process_events(dspi);
+				if (errors)
+					break;
+				cpu_relax();
+			}
+		}
+	}
+
+	if (msg->cs_release)
+		davinci_spi_chipselect(spi, 1);
+
+	clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
+	if (spicfg->io_type == SPI_IO_TYPE_DMA) {
+		clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+	}
+
+	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+	/*
+	 * Check for bit error, desync error,parity error,timeout error and
+	 * receive overflow errors
+	 */
+	if (errors) {
+		ret = davinci_spi_check_error(dspi, errors);
+		rt_kprintf("%s: error reported but no error found!\n",
+							spi->bus->parent.parent.name);
+		return ret;
+	}
+
+	if (dspi->rcount != 0 || dspi->wcount != 0) {
+		spi_dbg(sdev, "SPI data transfer error\n");
+		return -RT_EIO;
+	}
+
+	return msg->length;
+}
+
+/**
+ * davinci_spi_irq - Interrupt handler for SPI Master Controller
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * ISR will determine that interrupt arrives either for READ or WRITE command.
+ * According to command it will do the appropriate action. It will check
+ * transfer length and if it is not zero then dispatch transfer command again.
+ * If transfer length is zero then it will indicate the COMPLETION so that
+ * davinci_spi_bufs function can go ahead.
+ */
+static void davinci_spi_irq(int irq, void *data)
+{
+	struct davinci_spi *dspi = data;
+	int status;
+
+	status = davinci_spi_process_events(dspi);
+	if (unlikely(status != 0))
+		clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+
+	if ((!dspi->rcount && !dspi->wcount) || status)
+		rt_completion_done(&dspi->done);
+}
+
+static int davinci_spi_request_dma(struct davinci_spi *dspi)
+{
+	int r;
+	struct davinci_spi_dma *dma = &dspi->dma;
+
+	r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
+								dma->eventq);
+	if (r < 0) {
+		rt_kprintf("Unable to request DMA channel for SPI RX\n");
+		r = -RT_EFULL;
+		goto rx_dma_failed;
+	}
+
+	r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
+								dma->eventq);
+	if (r < 0) {
+		rt_kprintf("Unable to request DMA channel for SPI TX\n");
+		r = -RT_EFULL;
+		goto tx_dma_failed;
+	}
+
+	r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
+	if (r < 0) {
+		rt_kprintf("Unable to request SPI TX DMA param slot\n");
+		r = -RT_EFULL;
+		goto param_failed;
+	}
+	dma->dummy_param_slot = r;
+	edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
+
+	return 0;
+param_failed:
+	edma_free_channel(dma->tx_channel);
+tx_dma_failed:
+	edma_free_channel(dma->rx_channel);
+rx_dma_failed:
+	return r;
+}
+
+static rt_err_t configure(struct rt_spi_device *device,
+                          struct rt_spi_configuration *configuration)
+{
+	return davinci_spi_setup_transfer(device, configuration);
+}
+
+static rt_uint32_t xfer(struct rt_spi_device *device, struct rt_spi_message *message)
+{
+	return davinci_spi_bufs(device, message);
+};
+
+
+
+static struct rt_spi_ops davinci_spi_ops =
+{
+    configure,
+    xfer
+};
+
+static void udelay (volatile rt_uint32_t us)
+{
+	volatile rt_int32_t i;
+	for (; us > 0; us--)
+	{
+		i = 5000;
+		while(i > 0)
+		{
+			i--;
+		}
+	}
+}
+
+void spi_pin_cfg(void)
+{
+	rt_uint32_t val;
+
+	val = davinci_readl(PINMUX3);
+	val |= 0x80000000; /* SPI1 */
+	davinci_writel(val, PINMUX3);
+
+	val = davinci_readl(PINMUX4);
+	val &= 0xffffffc0; /* SPI1 */
+	val |= 0x05;//0x00000015; /* SPI1 */
+	davinci_writel(val, PINMUX4);
+}
+
+/**
+ * davinci_spi_probe - probe function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * According to Linux Device Model this function will be invoked by Linux
+ * with platform_device struct which contains the device specific info.
+ * This function will map the SPI controller's memory, register IRQ,
+ * Reset SPI controller and setting its registers to default value.
+ * It will invoke spi_bitbang_start to create work queue so that client driver
+ * can register transfer method to work queue.
+ */
+static int davinci_spi_probe(struct davinci_spi *dspi, char *spi_bus_name)
+{
+	int i = 0, ret = 0;
+	u32 spipc0;
+
+	spi_pin_cfg();
+	psc_change_state(DAVINCI_DM365_LPSC_SPI1, PSC_ENABLE);
+
+	dspi->base = DM3XX_SPI1_BASE;//spi;
+
+	dspi->irq = IRQ_DM3XX_SPINT1_0;
+
+	rt_hw_interrupt_install(dspi->irq, davinci_spi_irq, dspi, spi_bus_name);
+	rt_hw_interrupt_umask(dspi->irq);
+
+	dspi->clk = clk_get("SPICLK");
+
+	dspi->version = SPI_VERSION_1;
+	dspi->chip_sel[0] = 29;//SPI_INTERN_CS;
+	dspi->chip_sel[1] = 0;//GPIO0
+
+	dspi->dma.rx_channel = 15;
+	dspi->dma.tx_channel = 14;
+	dspi->dma.eventq = EVENTQ_3;
+
+	ret = davinci_spi_request_dma(dspi);
+	if (ret)
+		goto err;
+
+	rt_kprintf("%s: DMA: supported\n", spi_bus_name);
+	rt_kprintf("%s: DMA: RX channel: %d, TX channel: %d, "
+			"event queue: %d\n", spi_bus_name, dspi->dma.rx_channel, 
+			dspi->dma.tx_channel, dspi->dma.eventq);
+
+	dspi->get_rx = davinci_spi_rx_buf_u8;
+	dspi->get_tx = davinci_spi_tx_buf_u8;
+
+	rt_completion_init(&dspi->done);
+
+	/* Reset In/OUT SPI module */
+	writel(0, dspi->base + SPIGCR0);
+	udelay(100);
+	writel(1, dspi->base + SPIGCR0);
+
+	/* Set up SPIPC0.  CS and ENA init is done in davinci_spi_setup */
+	spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
+	writel(spipc0, dspi->base + SPIPC0);
+
+	/* initialize chip selects */
+	for (i = 0; i < SPI_MAX_CHIPSELECT; i++) {
+		if (dspi->chip_sel[i] != SPI_INTERN_CS)
+			gpio_direction_output(dspi->chip_sel[i], 1);
+	}
+
+	if (0)
+		writel(SPI_INTLVL_1, dspi->base + SPILVL);
+	else
+		writel(SPI_INTLVL_0, dspi->base + SPILVL);
+
+	writel(CS_DEFAULT, dspi->base + SPIDEF);
+
+	/* master mode default */
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+	//set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);//LOOP BACK mode
+
+	rt_kprintf("%s: Controller at 0x%p\n", spi_bus_name, dspi->base);
+
+	dspi->parent.parent.user_data = dspi;
+
+	return rt_spi_bus_register(&dspi->parent, spi_bus_name, &davinci_spi_ops);
+
+	return ret;
+
+free_dma:
+	edma_free_channel(dspi->dma.tx_channel);
+	edma_free_channel(dspi->dma.rx_channel);
+	edma_free_slot(dspi->dma.dummy_param_slot);
+
+err:
+	return ret;
+}
+
+
+int rt_hw_spi_init(void)
+{
+    /* register spi bus */
+    {
+        static struct davinci_spi dspi;
+		rt_memset(&dspi, 0, sizeof(dspi));
+		davinci_spi_probe(&dspi, "spi1");
+    }
+    /* attach cs */
+    {
+        static struct rt_spi_device spi_device;
+        rt_spi_bus_attach_device(&spi_device, "spi10", "spi1", (void *)0);
+    }
+	{
+        static struct rt_spi_device spi_device;
+        rt_spi_bus_attach_device(&spi_device, "spi11", "spi1", (void *)1);
+    }
+
+    return 0;
+}
+
+

+ 51 - 0
bsp/dm365/drivers/spi-davinci.h

@@ -0,0 +1,51 @@
+#ifndef __DAVINCI_SPI_H
+#define __DAVINCI_SPI_H
+
+typedef unsigned long  u32;
+typedef unsigned short u16;
+typedef unsigned char  u8;
+typedef unsigned int   bool;
+
+#define SPI_INTERN_CS	0xFF
+
+enum {
+	SPI_VERSION_1, /* For DM355/DM365/DM6467 */
+	SPI_VERSION_2, /* For DA8xx */
+};
+
+/**
+ * davinci_spi_config - Per-chip-select configuration for SPI slave devices
+ *
+ * @wdelay:	amount of delay between transmissions. Measured in number of
+ *		SPI module clocks.
+ * @odd_parity:	polarity of parity flag at the end of transmit data stream.
+ *		0 - odd parity, 1 - even parity.
+ * @parity_enable: enable transmission of parity at end of each transmit
+ *		data stream.
+ * @io_type:	type of IO transfer. Choose between polled, interrupt and DMA.
+ * @timer_disable: disable chip-select timers (setup and hold)
+ * @c2tdelay:	chip-select setup time. Measured in number of SPI module clocks.
+ * @t2cdelay:	chip-select hold time. Measured in number of SPI module clocks.
+ * @t2edelay:	transmit data finished to SPI ENAn pin inactive time. Measured
+ *		in number of SPI clocks.
+ * @c2edelay:	chip-select active to SPI ENAn signal active time. Measured in
+ *		number of SPI clocks.
+ */
+struct davinci_spi_config {
+	u8	wdelay;
+	u8	odd_parity;
+	u8	parity_enable;
+#define SPI_IO_TYPE_INTR	0
+#define SPI_IO_TYPE_POLL	1
+#define SPI_IO_TYPE_DMA		2
+	u8	io_type;
+	u8	timer_disable;
+	u8	c2tdelay;
+	u8	t2cdelay;
+	u8	t2edelay;
+	u8	c2edelay;
+};
+
+extern int rt_hw_spi_init(void);
+
+#endif	/* __DAVINCI_SPI_H */

+ 22 - 0
bsp/dm365/platform/SConscript

@@ -0,0 +1,22 @@
+Import('RTT_ROOT')
+from building import *
+
+cwd = GetCurrentDir()
+src = Split("""
+start_gcc.S
+dm365.c
+dma.c
+findbit.S
+interrupt.c
+psc.c
+reset.c
+system_clock.c
+trap.c
+""")
+
+# The set of source files associated with this SConscript file.
+path = [cwd]
+
+group = DefineGroup('Startup', src, depend = [''], CPPPATH = path)
+
+Return('group')

+ 344 - 0
bsp/dm365/platform/dm365.c

@@ -0,0 +1,344 @@
+#include <edma.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+static rt_uint32_t commonrate;
+static rt_uint32_t div_by_four;
+static rt_uint32_t div_by_six;
+static rt_uint32_t armrate;
+static rt_uint32_t fixedrate;
+static rt_uint32_t ddrrate;
+static rt_uint32_t voicerate;
+static rt_uint32_t mmcsdrate;
+static rt_uint32_t vpssrate, vencrate_sd, vencrate_hd;
+
+
+/* Four Transfer Controllers on DM365 */
+static const rt_int8_t
+dm365_queue_tc_mapping[][2] = {
+	/* {event queue no, TC no} */
+	{0, 0},
+	{1, 1},
+	{2, 2},
+	{3, 3},
+	{-1, -1},
+};
+
+static const rt_int8_t
+dm365_queue_priority_mapping[][2] = {
+	/* {event queue no, Priority} */
+	{0, 7},
+	{1, 7},
+	{2, 7},
+	{3, 0},
+	{-1, -1},
+};
+
+static struct edma_soc_info edma_cc0_info = {
+	.n_channel		= 64,
+	.n_region		= 4,
+	.n_slot			= 256,
+	.n_tc			= 4,
+	.n_cc			= 1,
+	.queue_tc_mapping	= dm365_queue_tc_mapping,
+	.queue_priority_mapping	= dm365_queue_priority_mapping,
+	.default_queue		= EVENTQ_3,
+};
+
+static struct edma_soc_info *dm365_edma_info[EDMA_MAX_CC] = {
+	&edma_cc0_info,
+};
+
+static rt_list_t clocks;
+
+struct clk {
+	char name[32];
+	rt_uint32_t *rate_hz;
+	struct clk *parent;
+	rt_list_t  node;
+};
+
+static struct clk davinci_dm365_clks[] = {
+	{
+		.name = "ARMCLK",
+		.rate_hz = &armrate,
+	},
+	{
+		.name = "UART0",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "UART1",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "HPI",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "EMACCLK",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "I2CCLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "McBSPCLK",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "MMCSDCLK0",
+		.rate_hz = &mmcsdrate,
+	},
+	{
+		.name = "MMCSDCLK1",
+		.rate_hz = &mmcsdrate,
+	},
+	{
+		.name = "SPICLK",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "gpio",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "AEMIFCLK",
+		.rate_hz = &commonrate,
+	},
+	{
+		.name = "PWM0_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "PWM1_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "PWM2_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "PWM3_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "USBCLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "VOICECODEC_CLK",
+		.rate_hz = &voicerate,
+	},
+	{
+		.name = "RTC_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "KEYSCAN_CLK",
+		.rate_hz = &fixedrate,
+	},
+	{
+		.name = "ADCIF_CLK",
+		.rate_hz = &fixedrate,
+	},
+};
+
+/* clocks cannot be de-registered no refcounting necessary */
+struct clk *clk_get(const char *id)
+{
+	struct clk *clk;
+	rt_list_t *list;
+	
+	for (list = (&clocks)->next; list != &clocks; list = list->next)
+	{
+		clk = (struct clk *)rt_list_entry(list, struct clk, node);
+		if (rt_strcmp(id, clk->name) == 0)
+			return clk;
+	}
+
+	return RT_NULL;
+}
+
+rt_uint32_t clk_get_rate(struct clk *clk)
+{
+	rt_uint32_t	flags;
+	rt_uint32_t	*rate;
+
+	for (;;) {
+		rate = clk->rate_hz;
+		if (rate || !clk->parent)
+			break;
+		clk = clk->parent;
+	}
+	return *rate;
+}
+
+void clk_register(struct clk *clk)
+{
+	rt_list_insert_after(&clocks, &clk->node);
+}
+
+int davinci_register_clks(struct clk *clk_list, int num_clks)
+{
+	struct clk *clkp;
+	int i;
+
+	for (i = 0, clkp = clk_list; i < num_clks; i++, clkp++)
+	{
+		//rt_kprintf("1:%s\n", clkp->name);
+		clk_register(clkp);
+		//rt_kprintf("2:%s\n", clkp->name);
+	}
+
+	return 0;
+}
+
+/* PLL/Reset register offsets */
+#define PLLM		0x110
+#define PREDIV		0x114
+#define PLLDIV2		0x11C
+#define POSTDIV		0x128
+#define PLLDIV4		0x160
+#define PLLDIV5		0x164
+#define PLLDIV6		0x168
+#define PLLDIV7		0x16C
+#define PLLDIV8		0x170
+
+
+int davinci_clk_init(void)
+{
+	struct clk *clk_list;
+	int num_clks;
+	rt_uint32_t pll0_mult, pll1_mult;
+
+	unsigned long prediv, postdiv;
+	unsigned long pll_rate;
+	unsigned long pll_div2, pll_div4, pll_div5,
+		pll_div6, pll_div7, pll_div8;
+
+	rt_list_init(&clocks);
+
+	//davinci_psc_register(davinci_psc_base, 1);
+
+	pll0_mult = davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLM);
+	pll1_mult = davinci_readl(DAVINCI_PLL_CNTRL1_BASE + PLLM);
+
+	commonrate = ((pll0_mult + 1) * 27000000) / 6;
+	armrate = ((pll0_mult + 1) * 27000000) / 2;
+
+	fixedrate = 24000000;
+
+	/* Read PLL0 configuration */
+	prediv = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PREDIV) &
+			0x1f) + 1;
+	postdiv = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + POSTDIV) &
+			0x1f) + 1;
+
+	/* PLL0 dividers */
+	pll_div4 = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLDIV4) &
+			0x1f) + 1; /* EDMA, EMAC, config, common */
+	pll_div5 = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLDIV5) &
+			0x1f) + 1; /* VPSS */
+	pll_div6 = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLDIV6) &
+			0x1f) + 1; /* VENC */
+	pll_div7 = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLDIV7) &
+			0x1f) + 1; /* DDR */
+	pll_div8 = (davinci_readl(DAVINCI_PLL_CNTRL0_BASE + PLLDIV8) &
+			0x1f) + 1; /* MMC/SD */
+
+	pll_rate = ((fixedrate / prediv) * (2 * pll0_mult)) / postdiv;
+
+	commonrate = pll_rate / pll_div4; /* 486/4 = 121.5MHz */
+	vpssrate = pll_rate / pll_div5; /* 486/2 = 243MHz */
+	vencrate_sd = pll_rate / pll_div6; /* 486/18 =  27MHz */
+	ddrrate = pll_rate / pll_div7; /* 486/2 = 243MHz */
+	mmcsdrate = pll_rate / pll_div8; /* 486/4 =  121.5MHz */
+
+	rt_kprintf(
+		"PLL0: fixedrate: %d, commonrate: %d, vpssrate: %d\n",
+		fixedrate, commonrate, vpssrate);
+	rt_kprintf(
+		"PLL0: vencrate_sd: %d, ddrrate: %d mmcsdrate: %d\n",
+		vencrate_sd, (ddrrate/2), mmcsdrate);
+
+	/* Read PLL1 configuration */
+	prediv = (davinci_readl(DAVINCI_PLL_CNTRL1_BASE + PREDIV) &
+			0x1f) + 1;
+	postdiv = (davinci_readl(DAVINCI_PLL_CNTRL1_BASE + POSTDIV) &
+			0x1f) + 1;
+	pll_rate = ((fixedrate / prediv) * (2 * pll1_mult)) / postdiv;
+
+	/* PLL1 dividers */
+	pll_div2 = (davinci_readl(DAVINCI_PLL_CNTRL1_BASE + PLLDIV2) &
+			0x1f) + 1; /* ARM */
+	pll_div4 = (davinci_readl(DAVINCI_PLL_CNTRL1_BASE + PLLDIV4) &
+			0x1f) + 1; /* VOICE */
+	pll_div5 = (davinci_readl(DAVINCI_PLL_CNTRL1_BASE + PLLDIV5) &
+			0x1f) + 1; /* VENC */
+
+	armrate = pll_rate / pll_div2; /* 594/2 = 297MHz */
+	voicerate = pll_rate / pll_div4; /* 594/6 = 99MHz */
+	vencrate_hd = pll_rate / pll_div5; /* 594/8 = 74.25MHz */
+
+	rt_kprintf(
+		"PLL1: armrate: %d, voicerate: %d, vencrate_hd: %d\n",
+		armrate, voicerate, vencrate_hd);
+
+	clk_list = davinci_dm365_clks;
+	num_clks = ARRAY_SIZE(davinci_dm365_clks); 
+
+	return davinci_register_clks(clk_list, num_clks);
+}
+
+void platform_init(void)
+{
+	edma_init(dm365_edma_info);
+}
+
+
+/* Reset board using the watchdog timer */
+void reset_system(void)
+{
+	rt_uint32_t tgcr, wdtcr;
+	rt_uint32_t base = DAVINCI_WDOG_BASE;
+
+	/* Disable, internal clock source */
+	davinci_writel(0, base + TCR);
+
+	/* Reset timer, set mode to 64-bit watchdog, and unreset */
+	davinci_writel(0, base + TGCR);
+	tgcr =	(TGCR_TIMMODE_64BIT_WDOG << TGCR_TIMMODE_SHIFT) |
+		(TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
+		(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
+	davinci_writel(tgcr, base + TGCR);
+
+	/* Clear counter and period regs */
+	davinci_writel(0, base + TIM12);
+	davinci_writel(0, base + TIM34);
+	davinci_writel(0, base + PRD12);
+	davinci_writel(0, base + PRD34);
+
+	/* Enable periodic mode */
+	davinci_writel(TCR_ENAMODE_PERIODIC << ENAMODE12_SHIFT, base + TCR);
+
+	/* Put watchdog in pre-active state */
+	wdtcr = (WDTCR_WDKEY_SEQ0 << WDTCR_WDKEY_SHIFT) |
+		(WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
+	davinci_writel(wdtcr, base + WDTCR);
+
+	/* Put watchdog in active state */
+	wdtcr = (WDTCR_WDKEY_SEQ1 << WDTCR_WDKEY_SHIFT) |
+		(WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
+	davinci_writel(wdtcr, base + WDTCR);
+
+	/*
+	 * Write an invalid value to the WDKEY field to trigger
+	 * a watchdog reset.
+	 */
+	wdtcr = 0xDEADBEEF;
+	davinci_writel(wdtcr, base + WDTCR);
+}
+
+

+ 61 - 0
bsp/dm365/platform/dm365_timer.h

@@ -0,0 +1,61 @@
+/*
+ * DaVinci timer definitions
+ *
+ * Author: Kevin Hilman, MontaVista Software, Inc.
+ * (C) 2007-2008 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __ASM_ARCH_TIME_H
+#define __ASM_ARCH_TIME_H
+
+/* Timer register offsets */
+#define PID12				0x0
+#define TIM12				0x10
+#define TIM34				0x14
+#define PRD12				0x18
+#define PRD34				0x1c
+#define TCR				0x20
+#define TGCR				0x24
+#define WDTCR				0x28
+#define CMP12(n)			(0x60 + ((n) << 2))
+
+/* Timer register bitfields */
+#define ENAMODE12_SHIFT 		6
+#define ENAMODE34_SHIFT 		22
+#define TCR_ENAMODE_DISABLE		0x0
+#define TCR_ENAMODE_ONESHOT		0x1
+#define TCR_ENAMODE_PERIODIC		0x2
+#define TCR_ENAMODE_MASK		0x3
+
+#define TGCR_TIMMODE_SHIFT		2
+#define TGCR_TIMMODE_64BIT_GP		0x0
+#define TGCR_TIMMODE_32BIT_UNCHAINED	0x1
+#define TGCR_TIMMODE_64BIT_WDOG 	0x2
+#define TGCR_TIMMODE_32BIT_CHAINED	0x3
+
+#define TGCR_TIM12RS_SHIFT		0
+#define TGCR_TIM34RS_SHIFT		1
+#define TGCR_RESET			0x0
+#define TGCR_UNRESET			0x1
+#define TGCR_RESET_MASK 		0x3
+
+#define WDTCR_WDEN_SHIFT		14
+#define WDTCR_WDEN_DISABLE		0x0
+#define WDTCR_WDEN_ENABLE		0x1
+#define WDTCR_WDKEY_SHIFT		16
+#define WDTCR_WDKEY_SEQ0		0xA5C6
+#define WDTCR_WDKEY_SEQ1		0xDA7E
+
+enum {
+	T0_BOT,
+	T0_TOP,
+	T1_BOT,
+	T1_TOP,
+	NUM_TIMERS
+};
+
+#endif /* __ASM_ARCH_TIME_H__ */
+

+ 228 - 0
bsp/dm365/platform/dm36x.h

@@ -0,0 +1,228 @@
+#ifndef __DM36X_H__
+#define __DM36X_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <rtthread.h>
+#include "psc.h"
+#include "irqs.h"
+#include "dm365_timer.h"
+
+/**
+ * @addtogroup DM36X
+ */
+/*@{*/
+
+/*
+ * Base register addresses
+ */
+#define DAVINCI_DMA_3PCC_BASE			(0x01C00000)
+#define DAVINCI_DMA_3PTC0_BASE			(0x01C10000)
+#define DAVINCI_DMA_3PTC1_BASE			(0x01C10400)
+#define DAVINCI_I2C_BASE			(0x01C21000)
+#define DAVINCI_TIMER0_BASE			(0x01C21400)
+#define DAVINCI_TIMER1_BASE			(0x01C21800)
+#define DAVINCI_WDOG_BASE			(0x01C21C00)
+#define DAVINCI_PWM0_BASE			(0x01C22000)
+#define DAVINCI_PWM1_BASE			(0x01C22400)
+#define DAVINCI_PWM2_BASE			(0x01C22800)
+#define DAVINCI_SYSTEM_MODULE_BASE		(0x01C40000)
+#define DAVINCI_PLL_CNTRL0_BASE			(0x01C40800)
+#define DAVINCI_PLL_CNTRL1_BASE			(0x01C40C00)
+#define DAVINCI_PWR_SLEEP_CNTRL_BASE		(0x01C41000)
+#define DAVINCI_SYSTEM_DFT_BASE			(0x01C42000)
+#define DAVINCI_IEEE1394_BASE			(0x01C60000)
+#define DAVINCI_USB_OTG_BASE			(0x01C64000)
+#define DAVINCI_CFC_ATA_BASE			(0x01C66000)
+#define DAVINCI_SPI_BASE			(0x01C66800)
+#define DAVINCI_GPIO_BASE			(0x01C67000)
+#define DAVINCI_UHPI_BASE			(0x01C67800)
+#define DAVINCI_VPSS_REGS_BASE			(0x01C70000)
+#define DAVINCI_EMAC_CNTRL_REGS_BASE		(0x01C80000)
+#define DAVINCI_EMAC_WRAPPER_CNTRL_REGS_BASE	(0x01C81000)
+#define DAVINCI_EMAC_WRAPPER_RAM_BASE		(0x01C82000)
+#define DAVINCI_MDIO_CNTRL_REGS_BASE		(0x01C84000)
+#define DAVINCI_IMCOP_BASE			(0x01CC0000)
+#define DAVINCI_ASYNC_EMIF_CNTRL_BASE		(0x01E00000)
+#define DAVINCI_VLYNQ_BASE			(0x01E01000)
+#define DAVINCI_MCBSP_BASE			(0x01E02000)
+#define DAVINCI_MMC_SD_BASE			(0x01E10000)
+#define DAVINCI_MS_BASE				(0x01E20000)
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE	(0x02000000)
+#define DAVINCI_ASYNC_EMIF_DATA_CE1_BASE	(0x04000000)
+#define DAVINCI_ASYNC_EMIF_DATA_CE2_BASE	(0x06000000)
+#define DAVINCI_ASYNC_EMIF_DATA_CE3_BASE	(0x08000000)
+#define DAVINCI_VLYNQ_REMOTE_BASE		(0x0C000000)
+
+
+/*
+ * We can have multiple VLYNQ IPs in our system.
+ * Define 'LOW_VLYNQ_CONTROL_BASE' with the VLYNQ
+ * IP having lowest base address.
+ * Define 'HIGH_VLYNQ_CONTROL_BASE' with the VLYNQ
+ * IP having highest base address.
+ * In case of only one VLYNQ IP, define only the
+ * 'LOW_VLYNQ_CONTROL_BASE'.
+ */
+#define LOW_VLYNQ_CONTROL_BASE			DAVINCI_VLYNQ_BASE
+
+#define DM365_EMAC_BASE			(0x01D07000)
+#define DM365_EMAC_CNTRL_OFFSET		(0x0000)
+#define DM365_EMAC_CNTRL_MOD_OFFSET	(0x3000)
+#define DM365_EMAC_CNTRL_RAM_OFFSET	(0x1000)
+#define DM365_EMAC_MDIO_OFFSET		(0x4000)
+#define DM365_EMAC_CNTRL_RAM_SIZE	(0x2000)
+
+
+/*
+ * Macro to access device power control
+ */
+#define DAVINCI_VDD3P3V_PWDN            (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DAVINCI_VSCLKDIS                (DAVINCI_SYSTEM_MODULE_BASE + 0x6c)
+
+/*
+ * System module registers
+ */
+#define PINMUX0		(DAVINCI_SYSTEM_MODULE_BASE + 0x00)
+#define PINMUX1		(DAVINCI_SYSTEM_MODULE_BASE + 0x04)
+#define PINMUX2		(DAVINCI_SYSTEM_MODULE_BASE + 0x08)
+#define PINMUX3		(DAVINCI_SYSTEM_MODULE_BASE + 0x0c)
+#define PINMUX4		(DAVINCI_SYSTEM_MODULE_BASE + 0x10)
+
+#define DM365_ARM_INTMUX	(DAVINCI_SYSTEM_MODULE_BASE + 0x18)
+#define DM365_EDMA_EVTMUX	(DAVINCI_SYSTEM_MODULE_BASE + 0x1C)
+#define DAVINCI_PUPDCTL1	(DAVINCI_SYSTEM_MODULE_BASE + 0x7C)
+
+
+
+#define ASYNC_EMIF_REVID	0x00
+#define ASYNC_EMIF_AWCCR	0x04
+#define ASYNC_EMIF_A1CR 	0x10
+#define ASYNC_EMIF_A2CR 	0x14
+#define ASYNC_EMIF_A3CR 	0x18
+
+/*
+ * Base register addresses common across DM355 and DM365
+ */
+#define DM3XX_TIMER2_BASE		(0x01C20800)
+#define DM3XX_REALTIME_BASE		(0x01C20C00)
+#define DM3XX_PWM3_BASE			(0x01C22C00)
+#define DM3XX_SPI_BASE			(0x01C66000)
+#define DM3XX_SPI0_BASE			DM3XX_SPI_BASE
+#define DM3XX_SPI1_BASE			(0x01C66800)
+#define DM3XX_SPI2_BASE			(0x01C67800)
+
+
+
+/*
+ * DM365 base register address
+ */
+#define DM365_DMA_3PTC2_BASE		(0x01C10800)
+#define DM365_DMA_3PTC3_BASE		(0x01C10C00)
+#define DM365_TIMER3_BASE		(0x01C23800)
+#define DM365_ADCIF_BASE		(0x01C23C00)
+#define DM365_SPI3_BASE			(0x01C68000)
+#define DM365_SPI4_BASE			(0x01C23000)
+#define DM365_RTC_BASE			(0x01C69000)
+#define DM365_KEYSCAN_BASE		(0x01C69400)
+#define DM365_UHPI_BASE			(0x01C69800)
+#define DM365_IMCOP_BASE		(0x01CA0000)
+#define DM365_MMC_SD1_BASE		(0x01D00000)
+#define DM365_MCBSP_BASE		(0x01D02000)
+#define DM365_UART1_BASE		(0x01D06000)
+#define DM365_EMAC_CNTRL_BASE		(0x01D07000)
+#define DM365_EMAC_WRAP_RAM_BASE	(0x01D08000)
+#define DM365_EMAC_WRAP_CNTRL_BASE	(0x01D0A000)
+#define DM365_EMAC_MDIO_BASE		(0x01D0B000)
+#define DM365_VOICE_CODEC_BASE		(0x01D0C000)
+#define DM365_ASYNC_EMIF_CNTRL_BASE	(0x01D10000)
+#define DM365_MMC_SD0_BASE		(0x01D11000)
+#define DM365_MS_BASE			(0x01D20000)
+#define DM365_KALEIDO_BASE		(0x01E00000)
+
+#define DAVINCI_UART0_BASE		(0x01C20000)
+
+#define PSC_MDCTL_BASE			(0x01c41a00)
+#define PSC_MDSTAT_BASE			(0x01c41800)
+#define PSC_PTCMD			(0x01c41120)
+#define PSC_PTSTAT			(0x01c41128)
+
+#define DM365_EINT_ENABLE0		0x01c48018
+#define DM365_EINT_ENABLE1		0x01c4801c
+
+#define davinci_readb(a)	(*(volatile unsigned char  *)(a))
+#define davinci_readw(a)	(*(volatile unsigned short *)(a))
+#define davinci_readl(a)	(*(volatile unsigned int   *)(a))
+
+#define davinci_writeb(v,a)	(*(volatile unsigned char  *)(a) = (v))
+#define davinci_writew(v,a)	(*(volatile unsigned short *)(a) = (v))
+#define davinci_writel(v,a)	(*(volatile unsigned int   *)(a) = (v))
+
+#define readb(a)	davinci_readb(a)
+#define readw(a)	davinci_readw(a)
+#define readl(a)	davinci_readl(a)
+
+#define write(v,a)	davinci_writeb(v,a)
+#define writew(v,a)	davinci_writew(v,a)
+#define writel(v,a)	davinci_writel(v,a)
+
+/* define timer register struct*/
+typedef struct timer_regs_s {
+	rt_uint32_t pid12;            /* 0x0 */
+	rt_uint32_t emumgt_clksped;   /* 0x4 */
+	rt_uint32_t gpint_en;         /* 0x8 */
+	rt_uint32_t gpdir_dat;        /* 0xC */
+	rt_uint32_t tim12;            /* 0x10 */
+	rt_uint32_t tim34;            /* 0x14 */
+	rt_uint32_t prd12;            /* 0x18 */
+	rt_uint32_t prd34;            /* 0x1C */
+	rt_uint32_t tcr;              /* 0x20 */
+	rt_uint32_t tgcr;             /* 0x24 */
+	rt_uint32_t wdtcr;            /* 0x28 */
+	rt_uint32_t tlgc;             /* 0x2C */
+	rt_uint32_t tlmr;             /* 0x30 */
+} timer_regs_t;
+
+/*****************************/
+/* CPU Mode                  */
+/*****************************/
+#define USERMODE		0x10
+#define FIQMODE			0x11
+#define IRQMODE			0x12
+#define SVCMODE			0x13
+#define ABORTMODE		0x17
+#define UNDEFMODE		0x1b
+#define MODEMASK		0x1f
+#define NOINT			0xc0
+
+struct rt_hw_register
+{
+    rt_uint32_t cpsr;
+    rt_uint32_t r0;
+    rt_uint32_t r1;
+    rt_uint32_t r2;
+    rt_uint32_t r3;
+    rt_uint32_t r4;
+    rt_uint32_t r5;
+    rt_uint32_t r6;
+    rt_uint32_t r7;
+    rt_uint32_t r8;
+    rt_uint32_t r9;
+    rt_uint32_t r10;
+    rt_uint32_t fp;
+    rt_uint32_t ip;
+    rt_uint32_t sp;
+    rt_uint32_t lr;
+    rt_uint32_t pc;
+};
+
+/*@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

+ 1587 - 0
bsp/dm365/platform/dma.c

@@ -0,0 +1,1587 @@
+/*
+ * EDMA3 support for DaVinci
+ *
+ * Copyright (C) 2006-2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <edma.h>
+
+/* Offsets matching "struct edmacc_param" */
+#define PARM_OPT		0x00
+#define PARM_SRC		0x04
+#define PARM_A_B_CNT		0x08
+#define PARM_DST		0x0c
+#define PARM_SRC_DST_BIDX	0x10
+#define PARM_LINK_BCNTRLD	0x14
+#define PARM_SRC_DST_CIDX	0x18
+#define PARM_CCNT		0x1c
+
+#define PARM_SIZE		0x20
+
+/* Offsets for EDMA CC global channel registers and their shadows */
+#define SH_ER		0x00	/* 64 bits */
+#define SH_ECR		0x08	/* 64 bits */
+#define SH_ESR		0x10	/* 64 bits */
+#define SH_CER		0x18	/* 64 bits */
+#define SH_EER		0x20	/* 64 bits */
+#define SH_EECR		0x28	/* 64 bits */
+#define SH_EESR		0x30	/* 64 bits */
+#define SH_SER		0x38	/* 64 bits */
+#define SH_SECR		0x40	/* 64 bits */
+#define SH_IER		0x50	/* 64 bits */
+#define SH_IECR		0x58	/* 64 bits */
+#define SH_IESR		0x60	/* 64 bits */
+#define SH_IPR		0x68	/* 64 bits */
+#define SH_ICR		0x70	/* 64 bits */
+#define SH_IEVAL	0x78
+#define SH_QER		0x80
+#define SH_QEER		0x84
+#define SH_QEECR	0x88
+#define SH_QEESR	0x8c
+#define SH_QSER		0x90
+#define SH_QSECR	0x94
+#define SH_SIZE		0x200
+
+/* Offsets for EDMA CC global registers */
+#define EDMA_REV	0x0000
+#define EDMA_CCCFG	0x0004
+#define EDMA_QCHMAP	0x0200	/* 8 registers */
+#define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
+#define EDMA_QDMAQNUM	0x0260
+#define EDMA_QUETCMAP	0x0280
+#define EDMA_QUEPRI	0x0284
+#define EDMA_EMR	0x0300	/* 64 bits */
+#define EDMA_EMCR	0x0308	/* 64 bits */
+#define EDMA_QEMR	0x0310
+#define EDMA_QEMCR	0x0314
+#define EDMA_CCERR	0x0318
+#define EDMA_CCERRCLR	0x031c
+#define EDMA_EEVAL	0x0320
+#define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
+#define EDMA_QRAE	0x0380	/* 4 registers */
+#define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
+#define EDMA_QSTAT	0x0600	/* 2 registers */
+#define EDMA_QWMTHRA	0x0620
+#define EDMA_QWMTHRB	0x0624
+#define EDMA_CCSTAT	0x0640
+
+#define EDMA_M		0x1000	/* global channel registers */
+#define EDMA_ECR	0x1008
+#define EDMA_ECRH	0x100C
+#define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
+#define EDMA_PARM	0x4000	/* 128 param entries */
+
+#define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
+
+#define EDMA_DCHMAP	0x0100  /* 64 registers */
+#define CHMAP_EXIST	BIT(24)
+
+#define EDMA_MAX_DMACH           64
+#define EDMA_MAX_PARAMENTRY     512
+
+#define EDMA_CC0_BASE_REG	0x01c00000
+#define EDMA_TC0_BASE_REG	0x01c10000
+#define EDMA_TC1_BASE_REG	0x01c10400
+#define EDMA_TC2_BASE_REG	0x01c10800
+#define EDMA_TC3_BASE_REG	0x01c10c00
+
+#define min_t(type, x, y) ({			\
+	type __min1 = (x);			\
+	type __min2 = (y);			\
+	__min1 < __min2 ? __min1: __min2; })
+
+
+
+/*****************************************************************************/
+
+static void volatile *edmacc_regs_base[EDMA_MAX_CC];
+
+static inline unsigned int edma_read(unsigned ctlr, int offset)
+{
+	return (unsigned int)davinci_readl(edmacc_regs_base[ctlr] + offset);
+}
+
+static inline void edma_write(unsigned ctlr, int offset, int val)
+{
+	davinci_writel(val, edmacc_regs_base[ctlr] + offset);
+}
+static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
+		unsigned or)
+{
+	unsigned val = edma_read(ctlr, offset);
+	val &= and;
+	val |= or;
+	edma_write(ctlr, offset, val);
+}
+static inline void edma_and(unsigned ctlr, int offset, unsigned and)
+{
+	unsigned val = edma_read(ctlr, offset);
+	val &= and;
+	edma_write(ctlr, offset, val);
+}
+static inline void edma_or(unsigned ctlr, int offset, unsigned or)
+{
+	unsigned val = edma_read(ctlr, offset);
+	val |= or;
+	edma_write(ctlr, offset, val);
+}
+static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
+{
+	return edma_read(ctlr, offset + (i << 2));
+}
+static inline void edma_write_array(unsigned ctlr, int offset, int i,
+		unsigned val)
+{
+	edma_write(ctlr, offset + (i << 2), val);
+}
+static inline void edma_modify_array(unsigned ctlr, int offset, int i,
+		unsigned and, unsigned or)
+{
+	edma_modify(ctlr, offset + (i << 2), and, or);
+}
+static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
+{
+	edma_or(ctlr, offset + (i << 2), or);
+}
+static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
+		unsigned or)
+{
+	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
+}
+static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
+		unsigned val)
+{
+	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
+}
+static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
+{
+	return edma_read(ctlr, EDMA_SHADOW0 + offset);
+}
+static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
+		int i)
+{
+	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
+}
+static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
+{
+	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
+}
+static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
+		unsigned val)
+{
+	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
+}
+static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
+		int param_no)
+{
+	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
+}
+static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
+		unsigned val)
+{
+	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
+}
+static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
+		unsigned and, unsigned or)
+{
+	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
+}
+static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
+		unsigned and)
+{
+	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
+}
+static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
+		unsigned or)
+{
+	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
+}
+#if 0
+static inline void set_bits(int offset, int len, unsigned long *p)
+{
+	for (; len > 0; len--)
+		set_bit(offset + (len - 1), p);
+}
+
+static inline void clear_bits(int offset, int len, unsigned long *p)
+{
+	for (; len > 0; len--)
+		clear_bit(offset + (len - 1), p);
+}
+#endif
+/*****************************************************************************/
+
+#define BIT(nr)			(1UL << (nr))
+#define BITS_PER_LONG 32
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BITS_PER_BYTE		8
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+
+#define DECLARE_BITMAP(name,bits) \
+	unsigned long name[BITS_TO_LONGS(bits)]
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	rt_base_t level;
+
+	level = rt_hw_interrupt_disable();
+	*p &= ~mask;
+	rt_hw_interrupt_enable(level);
+}
+
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	rt_base_t level;
+
+	level = rt_hw_interrupt_disable();
+	old = *p;
+	*p = old | mask;
+	rt_hw_interrupt_enable(level);
+
+	return (old & mask) != 0;
+}
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	rt_base_t level;
+
+	level = rt_hw_interrupt_disable();
+	*p  |= mask;
+	rt_hw_interrupt_enable(level);
+}
+
+/*
+ * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
+ */
+extern int _find_first_zero_bit_le(const void * p, unsigned size);
+extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off)
+
+
+
+/* actual number of DMA channels and slots on this silicon */
+struct edma {
+	/* how many dma resources of each type */
+	unsigned	num_channels;
+	unsigned	num_region;
+	unsigned	num_slots;
+	unsigned	num_tc;
+	unsigned	num_cc;
+	enum dma_event_q 	default_queue;
+
+	/* list of channels with no even trigger; terminated by "-1" */
+	const rt_int8_t	*noevent;
+
+	/* The edma_inuse bit for each PaRAM slot is clear unless the
+	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
+	 */
+	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
+
+	/* The edma_unused bit for each channel is clear unless
+	 * it is not being used on this platform. It uses a bit
+	 * of SOC-specific initialization code.
+	 */
+	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
+
+	unsigned	irq_res_start;
+	unsigned	irq_res_end;
+
+	struct dma_interrupt_data {
+		void (*callback)(unsigned channel, unsigned short ch_status,
+				void *data);
+		void *data;
+	} intr_data[EDMA_MAX_DMACH];
+};
+
+static struct edma *edma_cc[EDMA_MAX_CC];
+static int arch_num_cc;
+
+/* dummy param set used to (re)initialize parameter RAM slots */
+static const struct edmacc_param dummy_paramset = {
+	.link_bcntrld = 0xffff,
+	.ccnt = 1,
+};
+
+/*****************************************************************************/
+
+static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
+		enum dma_event_q queue_no)
+{
+	int bit = (ch_no & 0x7) * 4;
+
+	/* default to low priority queue */
+	if (queue_no == EVENTQ_DEFAULT)
+		queue_no = edma_cc[ctlr]->default_queue;
+
+	queue_no &= 7;
+	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
+			~(0x7 << bit), queue_no << bit);
+}
+
+static void map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
+{
+	int bit = queue_no * 4;
+	edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
+}
+
+static void assign_priority_to_queue(unsigned ctlr, int queue_no,
+		int priority)
+{
+	int bit = queue_no * 4;
+	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
+			((priority & 0x7) << bit));
+}
+
+/**
+ * map_dmach_param - Maps channel number to param entry number
+ *
+ * This maps the dma channel number to param entry numberter. In
+ * other words using the DMA channel mapping registers a param entry
+ * can be mapped to any channel
+ *
+ * Callers are responsible for ensuring the channel mapping logic is
+ * included in that particular EDMA variant (Eg : dm646x)
+ *
+ */
+static void map_dmach_param(unsigned ctlr)
+{
+	int i;
+	for (i = 0; i < EDMA_MAX_DMACH; i++)
+		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
+}
+
+static inline void
+setup_dma_interrupt(unsigned lch,
+	void (*callback)(unsigned channel, rt_uint16_t ch_status, void *data),
+	void *data)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(lch);
+	lch = EDMA_CHAN_SLOT(lch);
+
+	if (!callback)
+		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
+				BIT(lch & 0x1f));
+
+	edma_cc[ctlr]->intr_data[lch].callback = callback;
+	edma_cc[ctlr]->intr_data[lch].data = data;
+
+	if (callback) {
+		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
+				BIT(lch & 0x1f));
+		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
+				BIT(lch & 0x1f));
+	}
+}
+
+static int irq2ctlr(int irq)
+{
+	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
+		return 0;
+	else if (irq >= edma_cc[1]->irq_res_start &&
+		irq <= edma_cc[1]->irq_res_end)
+		return 1;
+
+	return -1;
+}
+
+/******************************************************************************
+ *
+ * DMA interrupt handler
+ *
+ *****************************************************************************/
+static void dma_irq_handler(int irq, void *data)
+{
+	int i;
+	int ctlr;
+	unsigned int cnt = 0;
+
+	ctlr = irq2ctlr(irq);
+	if (ctlr < 0)
+		return ;
+
+	edma_dbg("dma_irq_handler\n");
+
+	if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
+	    (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
+		return ;
+
+	while (1) {
+		int j;
+		if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
+				edma_shadow0_read_array(ctlr, SH_IER, 0))
+			j = 0;
+		else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
+				edma_shadow0_read_array(ctlr, SH_IER, 1))
+			j = 1;
+		else
+			break;
+		edma_dbg("IPR%d %08x\n", j,
+				edma_shadow0_read_array(ctlr, SH_IPR, j));
+		for (i = 0; i < 32; i++) {
+			int k = (j << 5) + i;
+			if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
+					&& (edma_shadow0_read_array(ctlr,
+							SH_IER, j) & BIT(i))) {
+				/* Clear the corresponding IPR bits */
+				edma_shadow0_write_array(ctlr, SH_ICR, j,
+							BIT(i));
+				if (edma_cc[ctlr]->intr_data[k].callback)
+					edma_cc[ctlr]->intr_data[k].callback(
+						k, DMA_COMPLETE,
+						edma_cc[ctlr]->intr_data[k].
+						data);
+			}
+		}
+		cnt++;
+		if (cnt > 10)
+			break;
+	}
+	edma_shadow0_write(ctlr, SH_IEVAL, 1);
+	return ;
+}
+
+/******************************************************************************
+ *
+ * DMA error interrupt handler
+ *
+ *****************************************************************************/
+static void dma_ccerr_handler(int irq, void *data)
+{
+	int i;
+	int ctlr;
+	unsigned int cnt = 0;
+
+	ctlr = irq2ctlr(irq);
+	if (ctlr < 0)
+		return ;
+
+	edma_dbg("dma_ccerr_handler\n");
+
+	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
+	    (edma_read(ctlr, EDMA_CCERR) == 0))
+		return ;
+
+	while (1) {
+		int j = -1;
+		if (edma_read_array(ctlr, EDMA_EMR, 0))
+			j = 0;
+		else if (edma_read_array(ctlr, EDMA_EMR, 1))
+			j = 1;
+		if (j >= 0) {
+			edma_dbg("EMR%d %08x\n", j,
+					edma_read_array(ctlr, EDMA_EMR, j));
+			for (i = 0; i < 32; i++) {
+				int k = (j << 5) + i;
+				if (edma_read_array(ctlr, EDMA_EMR, j) &
+							BIT(i)) {
+					/* Clear the corresponding EMR bits */
+					edma_write_array(ctlr, EDMA_EMCR, j,
+							BIT(i));
+					/* Clear any SER */
+					edma_shadow0_write_array(ctlr, SH_SECR,
+								j, BIT(i));
+					if (edma_cc[ctlr]->intr_data[k].
+								callback) {
+						edma_cc[ctlr]->intr_data[k].
+						callback(k,
+						DMA_CC_ERROR,
+						edma_cc[ctlr]->intr_data
+						[k].data);
+					}
+				}
+			}
+		} else if (edma_read(ctlr, EDMA_QEMR)) {
+			edma_dbg("QEMR %02x\n",
+				edma_read(ctlr, EDMA_QEMR));
+			for (i = 0; i < 8; i++) {
+				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
+					/* Clear the corresponding IPR bits */
+					edma_write(ctlr, EDMA_QEMCR, BIT(i));
+					edma_shadow0_write(ctlr, SH_QSECR,
+								BIT(i));
+
+					/* NOTE:  not reported!! */
+				}
+			}
+		} else if (edma_read(ctlr, EDMA_CCERR)) {
+			edma_dbg("CCERR %08x\n",
+				edma_read(ctlr, EDMA_CCERR));
+			/* FIXME:  CCERR.BIT(16) ignored!  much better
+			 * to just write CCERRCLR with CCERR value...
+			 */
+			for (i = 0; i < 8; i++) {
+				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
+					/* Clear the corresponding IPR bits */
+					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
+
+					/* NOTE:  not reported!! */
+				}
+			}
+		}
+		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
+		    (edma_read(ctlr, EDMA_CCERR) == 0))
+			break;
+		cnt++;
+		if (cnt > 10)
+			break;
+	}
+	edma_write(ctlr, EDMA_EEVAL, 1);
+	return ;
+}
+
+/******************************************************************************
+ *
+ * Transfer controller error interrupt handlers
+ *
+ *****************************************************************************/
+
+#define tc_errs_handled	RT_FALSE/* disabled as long as they're NOPs */
+
+static void dma_tc0err_handler(int irq, void *data)
+{
+	edma_dbg("dma_tc0err_handler\n");
+	return ;
+}
+
+static void dma_tc1err_handler(int irq, void *data)
+{
+	edma_dbg("dma_tc1err_handler\n");
+	return ;
+}
+
+static int reserve_contiguous_slots(int ctlr, unsigned int id,
+				     unsigned int num_slots,
+				     unsigned int start_slot)
+{
+	int i, j;
+	unsigned int count = num_slots;
+	int stop_slot = start_slot;
+	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
+
+	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
+		j = EDMA_CHAN_SLOT(i);
+		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
+			/* Record our current beginning slot */
+			if (count == num_slots)
+				stop_slot = i;
+
+			count--;
+			set_bit(j, tmp_inuse);
+
+			if (count == 0)
+				break;
+		} else {
+			clear_bit(j, tmp_inuse);
+
+			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
+				stop_slot = i;
+				break;
+			} else {
+				count = num_slots;
+			}
+		}
+	}
+
+	/*
+	 * We have to clear any bits that we set
+	 * if we run out parameter RAM slots, i.e we do find a set
+	 * of contiguous parameter RAM slots but do not find the exact number
+	 * requested as we may reach the total number of parameter RAM slots
+	 */
+	if (i == edma_cc[ctlr]->num_slots)
+		stop_slot = i;
+
+	for (j = start_slot; j < stop_slot; j++)
+		if (test_bit(j, tmp_inuse))
+			clear_bit(j, edma_cc[ctlr]->edma_inuse);
+
+	if (count)
+		return -RT_EBUSY;
+
+	for (j = i - num_slots + 1; j <= i; ++j)
+		rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(j)),
+			&dummy_paramset, PARM_SIZE);
+
+	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
+}
+
+#if 0
+static int prepare_unused_channel_list(struct device *dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	int i, ctlr;
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
+				(int)pdev->resource[i].start >= 0) {
+			ctlr = EDMA_CTLR(pdev->resource[i].start);
+			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
+					edma_cc[ctlr]->edma_unused);
+		}
+	}
+
+	return 0;
+}
+#endif
+
+/*-----------------------------------------------------------------------*/
+
+static rt_bool_t unused_chan_list_done;
+
+/* Resource alloc/free:  dma channels, parameter RAM slots */
+
+/**
+ * edma_alloc_channel - allocate DMA channel and paired parameter RAM
+ * @channel: specific channel to allocate; negative for "any unmapped channel"
+ * @callback: optional; to be issued on DMA completion or errors
+ * @data: passed to callback
+ * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
+ *	Controller (TC) executes requests using this channel.  Use
+ *	EVENTQ_DEFAULT unless you really need a high priority queue.
+ *
+ * This allocates a DMA channel and its associated parameter RAM slot.
+ * The parameter RAM is initialized to hold a dummy transfer.
+ *
+ * Normal use is to pass a specific channel number as @channel, to make
+ * use of hardware events mapped to that channel.  When the channel will
+ * be used only for software triggering or event chaining, channels not
+ * mapped to hardware events (or mapped to unused events) are preferable.
+ *
+ * DMA transfers start from a channel using edma_start(), or by
+ * chaining.  When the transfer described in that channel's parameter RAM
+ * slot completes, that slot's data may be reloaded through a link.
+ *
+ * DMA errors are only reported to the @callback associated with the
+ * channel driving that transfer, but transfer completion callbacks can
+ * be sent to another channel under control of the TCC field in
+ * the option word of the transfer's parameter RAM set.  Drivers must not
+ * use DMA transfer completion callbacks for channels they did not allocate.
+ * (The same applies to TCC codes used in transfer chaining.)
+ *
+ * Returns the number of the channel, else negative errno.
+ */
+int edma_alloc_channel(int channel,
+		void (*callback)(unsigned channel, rt_uint16_t ch_status, void *data),
+		void *data,
+		enum dma_event_q eventq_no)
+{
+	unsigned i, done = 0, ctlr = 0;
+	int ret = 0;
+#if 0
+	if (!unused_chan_list_done) {
+		/*
+		 * Scan all the platform devices to find out the EDMA channels
+		 * used and clear them in the unused list, making the rest
+		 * available for ARM usage.
+		 */
+		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
+				prepare_unused_channel_list);
+		if (ret < 0)
+			return ret;
+
+		unused_chan_list_done = true;
+	}
+#endif
+
+	if (channel >= 0) {
+		ctlr = EDMA_CTLR(channel);
+		channel = EDMA_CHAN_SLOT(channel);
+ 		clear_bit(channel, edma_cc[ctlr]->edma_unused);
+	}
+
+	if (channel < 0) {
+		for (i = 0; i < arch_num_cc; i++) {
+			channel = 0;
+			for (;;) {
+				channel = find_next_bit(edma_cc[i]->edma_unused,
+						edma_cc[i]->num_channels,
+						channel);
+				if (channel == edma_cc[i]->num_channels)
+					break;
+				if (!test_and_set_bit(channel,
+						edma_cc[i]->edma_inuse)) {
+					done = 1;
+					ctlr = i;
+					break;
+				}
+				channel++;
+			}
+			if (done)
+				break;
+		}
+		if (!done)
+			return -RT_ENOMEM;
+	} else if (channel >= edma_cc[ctlr]->num_channels) {
+		return -RT_ERROR;
+	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
+		return -RT_EBUSY;
+	}
+
+	/* ensure access through shadow region 0 */
+	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+
+	/* ensure no events are pending */
+	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
+	rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(channel)),
+			&dummy_paramset, PARM_SIZE);
+
+	if (callback)
+		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
+					callback, data);
+
+	map_dmach_queue(ctlr, channel, eventq_no);
+
+	return EDMA_CTLR_CHAN(ctlr, channel);
+}
+
+
+/**
+ * edma_free_channel - deallocate DMA channel
+ * @channel: dma channel returned from edma_alloc_channel()
+ *
+ * This deallocates the DMA channel and associated parameter RAM slot
+ * allocated by edma_alloc_channel().
+ *
+ * Callers are responsible for ensuring the channel is inactive, and
+ * will not be reactivated by linking, chaining, or software calls to
+ * edma_start().
+ */
+void edma_free_channel(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel >= edma_cc[ctlr]->num_channels)
+		return;
+
+	setup_dma_interrupt(channel, RT_NULL, RT_NULL);
+	/* REVISIT should probably take out of shadow region 0 */
+
+	rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(channel)),
+			&dummy_paramset, PARM_SIZE);
+	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
+}
+
+
+/**
+ * edma_alloc_slot - allocate DMA parameter RAM
+ * @slot: specific slot to allocate; negative for "any unused slot"
+ *
+ * This allocates a parameter RAM slot, initializing it to hold a
+ * dummy transfer.  Slots allocated using this routine have not been
+ * mapped to a hardware DMA channel, and will normally be used by
+ * linking to them from a slot associated with a DMA channel.
+ *
+ * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
+ * slots may be allocated on behalf of DSP firmware.
+ *
+ * Returns the number of the slot, else negative errno.
+ */
+int edma_alloc_slot(unsigned ctlr, int slot)
+{
+	if (slot >= 0)
+		slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < 0) {
+		slot = edma_cc[ctlr]->num_channels;
+		for (;;) {
+			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
+					edma_cc[ctlr]->num_slots, slot);
+			if (slot == edma_cc[ctlr]->num_slots)
+				return -RT_ENOMEM;
+			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
+				break;
+		}
+	} else if (slot < edma_cc[ctlr]->num_channels ||
+			slot >= edma_cc[ctlr]->num_slots) {
+		return -RT_ERROR;
+	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
+		return -RT_EBUSY;
+	}
+
+	rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
+			&dummy_paramset, PARM_SIZE);
+
+	return EDMA_CTLR_CHAN(ctlr, slot);
+}
+
+
+/**
+ * edma_free_slot - deallocate DMA parameter RAM
+ * @slot: parameter RAM slot returned from edma_alloc_slot()
+ *
+ * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
+ * Callers are responsible for ensuring the slot is inactive, and will
+ * not be activated.
+ */
+void edma_free_slot(unsigned slot)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_channels ||
+		slot >= edma_cc[ctlr]->num_slots)
+		return;
+
+	rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
+			&dummy_paramset, PARM_SIZE);
+	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
+}
+
+
+
+/**
+ * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
+ * The API will return the starting point of a set of
+ * contiguous parameter RAM slots that have been requested
+ *
+ * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
+ * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
+ * @count: number of contiguous Paramter RAM slots
+ * @slot  - the start value of Parameter RAM slot that should be passed if id
+ * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
+ *
+ * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
+ * contiguous Parameter RAM slots from parameter RAM 64 in the case of
+ * DaVinci SOCs and 32 in the case of DA8xx SOCs.
+ *
+ * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
+ * set of contiguous parameter RAM slots from the "slot" that is passed as an
+ * argument to the API.
+ *
+ * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
+ * starts looking for a set of contiguous parameter RAMs from the "slot"
+ * that is passed as an argument to the API. On failure the API will try to
+ * find a set of contiguous Parameter RAM slots from the remaining Parameter
+ * RAM slots
+ */
+int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
+{
+	/*
+	 * The start slot requested should be greater than
+	 * the number of channels and lesser than the total number
+	 * of slots
+	 */
+	if ((id != EDMA_CONT_PARAMS_ANY) &&
+		(slot < edma_cc[ctlr]->num_channels ||
+		slot >= edma_cc[ctlr]->num_slots))
+		return -RT_ERROR;
+
+	/*
+	 * The number of parameter RAM slots requested cannot be less than 1
+	 * and cannot be more than the number of slots minus the number of
+	 * channels
+	 */
+	if (count < 1 || count >
+		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
+		return -RT_ERROR;
+
+	switch (id) {
+	case EDMA_CONT_PARAMS_ANY:
+		return reserve_contiguous_slots(ctlr, id, count,
+						 edma_cc[ctlr]->num_channels);
+	case EDMA_CONT_PARAMS_FIXED_EXACT:
+	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
+		return reserve_contiguous_slots(ctlr, id, count, slot);
+	default:
+		return -RT_ERROR;
+	}
+
+}
+
+
+/**
+ * edma_free_cont_slots - deallocate DMA parameter RAM slots
+ * @slot: first parameter RAM of a set of parameter RAM slots to be freed
+ * @count: the number of contiguous parameter RAM slots to be freed
+ *
+ * This deallocates the parameter RAM slots allocated by
+ * edma_alloc_cont_slots.
+ * Callers/applications need to keep track of sets of contiguous
+ * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
+ * API.
+ * Callers are responsible for ensuring the slots are inactive, and will
+ * not be activated.
+ */
+int edma_free_cont_slots(unsigned slot, int count)
+{
+	unsigned ctlr, slot_to_free;
+	int i;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_channels ||
+		slot >= edma_cc[ctlr]->num_slots ||
+		count < 1)
+		return -RT_ERROR;
+
+	for (i = slot; i < slot + count; ++i) {
+		ctlr = EDMA_CTLR(i);
+		slot_to_free = EDMA_CHAN_SLOT(i);
+
+		rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free)),
+			&dummy_paramset, PARM_SIZE);
+		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
+	}
+
+	return 0;
+}
+
+
+/*-----------------------------------------------------------------------*/
+
+/* Parameter RAM operations (i) -- read/write partial slots */
+
+/**
+ * edma_set_src - set initial DMA source address in parameter RAM slot
+ * @slot: parameter RAM slot being configured
+ * @src_port: physical address of source (memory, controller FIFO, etc)
+ * @addressMode: INCR, except in very rare cases
+ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
+ *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
+ *
+ * Note that the source address is modified during the DMA transfer
+ * according to edma_set_src_index().
+ */
+void edma_set_src(unsigned slot, rt_uint32_t src_port,
+				enum address_mode mode, enum fifo_width width)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_slots) {
+		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
+
+		if (mode) {
+			/* set SAM and program FWID */
+			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
+		} else {
+			/* clear SAM */
+			i &= ~SAM;
+		}
+		edma_parm_write(ctlr, PARM_OPT, slot, i);
+
+		/* set the source port address
+		   in source register of param structure */
+		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
+	}
+}
+
+
+/**
+ * edma_set_dest - set initial DMA destination address in parameter RAM slot
+ * @slot: parameter RAM slot being configured
+ * @dest_port: physical address of destination (memory, controller FIFO, etc)
+ * @addressMode: INCR, except in very rare cases
+ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
+ *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
+ *
+ * Note that the destination address is modified during the DMA transfer
+ * according to edma_set_dest_index().
+ */
+void edma_set_dest(unsigned slot, rt_uint32_t dest_port,
+				 enum address_mode mode, enum fifo_width width)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_slots) {
+		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
+
+		if (mode) {
+			/* set DAM and program FWID */
+			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
+		} else {
+			/* clear DAM */
+			i &= ~DAM;
+		}
+		edma_parm_write(ctlr, PARM_OPT, slot, i);
+		/* set the destination port address
+		   in dest register of param structure */
+		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
+	}
+}
+
+
+/**
+ * edma_get_position - returns the current transfer points
+ * @slot: parameter RAM slot being examined
+ * @src: pointer to source port position
+ * @dst: pointer to destination port position
+ *
+ * Returns current source and destination addresses for a particular
+ * parameter RAM slot.  Its channel should not be active when this is called.
+ */
+void edma_get_position(unsigned slot, rt_uint32_t *src, rt_uint32_t *dst)
+{
+	struct edmacc_param temp;
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
+	if (src != RT_NULL)
+		*src = temp.src;
+	if (dst != RT_NULL)
+		*dst = temp.dst;
+}
+
+
+/**
+ * edma_set_src_index - configure DMA source address indexing
+ * @slot: parameter RAM slot being configured
+ * @src_bidx: byte offset between source arrays in a frame
+ * @src_cidx: byte offset between source frames in a block
+ *
+ * Offsets are specified to support either contiguous or discontiguous
+ * memory transfers, or repeated access to a hardware register, as needed.
+ * When accessing hardware registers, both offsets are normally zero.
+ */
+void edma_set_src_index(unsigned slot, rt_int16_t src_bidx, rt_int16_t src_cidx)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
+				0xffff0000, src_bidx);
+		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
+				0xffff0000, src_cidx);
+	}
+}
+
+
+/**
+ * edma_set_dest_index - configure DMA destination address indexing
+ * @slot: parameter RAM slot being configured
+ * @dest_bidx: byte offset between destination arrays in a frame
+ * @dest_cidx: byte offset between destination frames in a block
+ *
+ * Offsets are specified to support either contiguous or discontiguous
+ * memory transfers, or repeated access to a hardware register, as needed.
+ * When accessing hardware registers, both offsets are normally zero.
+ */
+void edma_set_dest_index(unsigned slot, rt_int16_t dest_bidx, rt_int16_t dest_cidx)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
+				0x0000ffff, dest_bidx << 16);
+		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
+				0x0000ffff, dest_cidx << 16);
+	}
+}
+
+
+/**
+ * edma_set_transfer_params - configure DMA transfer parameters
+ * @slot: parameter RAM slot being configured
+ * @acnt: how many bytes per array (at least one)
+ * @bcnt: how many arrays per frame (at least one)
+ * @ccnt: how many frames per block (at least one)
+ * @bcnt_rld: used only for A-Synchronized transfers; this specifies
+ *	the value to reload into bcnt when it decrements to zero
+ * @sync_mode: ASYNC or ABSYNC
+ *
+ * See the EDMA3 documentation to understand how to configure and link
+ * transfers using the fields in PaRAM slots.  If you are not doing it
+ * all at once with edma_write_slot(), you will use this routine
+ * plus two calls each for source and destination, setting the initial
+ * address and saying how to index that address.
+ *
+ * An example of an A-Synchronized transfer is a serial link using a
+ * single word shift register.  In that case, @acnt would be equal to
+ * that word size; the serial controller issues a DMA synchronization
+ * event to transfer each word, and memory access by the DMA transfer
+ * controller will be word-at-a-time.
+ *
+ * An example of an AB-Synchronized transfer is a device using a FIFO.
+ * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
+ * The controller with the FIFO issues DMA synchronization events when
+ * the FIFO threshold is reached, and the DMA transfer controller will
+ * transfer one frame to (or from) the FIFO.  It will probably use
+ * efficient burst modes to access memory.
+ */
+void edma_set_transfer_params(unsigned slot,
+		rt_uint16_t acnt, rt_uint16_t bcnt, rt_uint16_t ccnt,
+		rt_uint16_t bcnt_rld, enum sync_dimension sync_mode)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_cc[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
+				0x0000ffff, bcnt_rld << 16);
+		if (sync_mode == ASYNC)
+			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
+		else
+			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
+		/* Set the acount, bcount, ccount registers */
+		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
+		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
+	}
+}
+
+
+/**
+ * edma_link - link one parameter RAM slot to another
+ * @from: parameter RAM slot originating the link
+ * @to: parameter RAM slot which is the link target
+ *
+ * The originating slot should not be part of any active DMA transfer.
+ */
+void edma_link(unsigned from, unsigned to)
+{
+	unsigned ctlr_from, ctlr_to;
+
+	ctlr_from = EDMA_CTLR(from);
+	from = EDMA_CHAN_SLOT(from);
+	ctlr_to = EDMA_CTLR(to);
+	to = EDMA_CHAN_SLOT(to);
+
+	if (from >= edma_cc[ctlr_from]->num_slots)
+		return;
+	if (to >= edma_cc[ctlr_to]->num_slots)
+		return;
+	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
+				PARM_OFFSET(to));
+}
+
+
+/**
+ * edma_unlink - cut link from one parameter RAM slot
+ * @from: parameter RAM slot originating the link
+ *
+ * The originating slot should not be part of any active DMA transfer.
+ * Its link is set to 0xffff.
+ */
+void edma_unlink(unsigned from)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(from);
+	from = EDMA_CHAN_SLOT(from);
+
+	if (from >= edma_cc[ctlr]->num_slots)
+		return;
+	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
+}
+
+
+/*-----------------------------------------------------------------------*/
+
+/* Parameter RAM operations (ii) -- read/write whole parameter sets */
+
+/**
+ * edma_write_slot - write parameter RAM data for slot
+ * @slot: number of parameter RAM slot being modified
+ * @param: data to be written into parameter RAM slot
+ *
+ * Use this to assign all parameters of a transfer at once.  This
+ * allows more efficient setup of transfers than issuing multiple
+ * calls to set up those parameters in small pieces, and provides
+ * complete control over all transfer options.
+ */
+void edma_write_slot(unsigned slot, const struct edmacc_param *param)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot >= edma_cc[ctlr]->num_slots)
+		return;
+	rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)), param,
+			PARM_SIZE);
+}
+
+
+/**
+ * edma_read_slot - read parameter RAM data from slot
+ * @slot: number of parameter RAM slot being copied
+ * @param: where to store copy of parameter RAM data
+ *
+ * Use this to read data from a parameter RAM slot, perhaps to
+ * save them as a template for later reuse.
+ */
+void edma_read_slot(unsigned slot, struct edmacc_param *param)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot >= edma_cc[ctlr]->num_slots)
+		return;
+	rt_memcpy(param, (void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
+			PARM_SIZE);
+}
+
+
+/*-----------------------------------------------------------------------*/
+
+/* Various EDMA channel control operations */
+
+/**
+ * edma_pause - pause dma on a channel
+ * @channel: on which edma_start() has been called
+ *
+ * This temporarily disables EDMA hardware events on the specified channel,
+ * preventing them from triggering new transfers on its behalf
+ */
+void edma_pause(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_cc[ctlr]->num_channels) {
+		unsigned int mask = BIT(channel & 0x1f);
+
+		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
+	}
+}
+
+
+/**
+ * edma_resume - resumes dma on a paused channel
+ * @channel: on which edma_pause() has been called
+ *
+ * This re-enables EDMA hardware events on the specified channel.
+ */
+void edma_resume(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_cc[ctlr]->num_channels) {
+		unsigned int mask = BIT(channel & 0x1f);
+
+		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
+	}
+}
+
+
+/**
+ * edma_start - start dma on a channel
+ * @channel: channel being activated
+ *
+ * Channels with event associations will be triggered by their hardware
+ * events, and channels without such associations will be triggered by
+ * software.  (At this writing there is no interface for using software
+ * triggers except with channels that don't support hardware triggers.)
+ *
+ * Returns zero on success, else negative errno.
+ */
+int edma_start(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_cc[ctlr]->num_channels) {
+		int j = channel >> 5;
+		unsigned int mask = BIT(channel & 0x1f);
+
+		/* EDMA channels without event association */
+		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
+			edma_dbg("EDMA: ESR%d %08x\n", j,
+				edma_shadow0_read_array(ctlr, SH_ESR, j));
+			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
+			return 0;
+		}
+
+		/* EDMA channel with event association */
+		edma_dbg("EDMA: ER%d %08x\n", j,
+			edma_shadow0_read_array(ctlr, SH_ER, j));
+		/* Clear any pending event or error */
+		edma_write_array(ctlr, EDMA_ECR, j, mask);
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+		/* Clear any SER */
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
+		edma_dbg("EDMA: EER%d %08x\n", j,
+			edma_shadow0_read_array(ctlr, SH_EER, j));
+		return 0;
+	}
+
+	return -RT_ERROR;
+}
+
+
+/**
+ * edma_stop - stops dma on the channel passed
+ * @channel: channel being deactivated
+ *
+ * When @lch is a channel, any active transfer is paused and
+ * all pending hardware events are cleared.  The current transfer
+ * may not be resumed, and the channel's Parameter RAM should be
+ * reinitialized before being reused.
+ */
+void edma_stop(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_cc[ctlr]->num_channels) {
+		int j = channel >> 5;
+		unsigned int mask = BIT(channel & 0x1f);
+
+		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+
+		edma_dbg("EDMA: EER%d %08x\n", j,
+				edma_shadow0_read_array(ctlr, SH_EER, j));
+
+		/* REVISIT:  consider guarding against inappropriate event
+		 * chaining by overwriting with dummy_paramset.
+		 */
+	}
+}
+
+
+/******************************************************************************
+ *
+ * It cleans ParamEntry qand bring back EDMA to initial state if media has
+ * been removed before EDMA has finished.It is usedful for removable media.
+ * Arguments:
+ *      ch_no     - channel no
+ *
+ * Return: zero on success, or corresponding error no on failure
+ *
+ * FIXME this should not be needed ... edma_stop() should suffice.
+ *
+ *****************************************************************************/
+
+void edma_clean_channel(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_cc[ctlr]->num_channels) {
+		int j = (channel >> 5);
+		unsigned int mask = BIT(channel & 0x1f);
+
+		edma_dbg("EDMA: EMR%d %08x\n", j,
+				edma_read_array(ctlr, EDMA_EMR, j));
+		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
+		/* Clear the corresponding EMR bits */
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+		/* Clear any SER */
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
+	}
+}
+
+
+/*
+ * edma_clear_event - clear an outstanding event on the DMA channel
+ * Arguments:
+ *	channel - channel number
+ */
+void edma_clear_event(unsigned channel)
+{
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel >= edma_cc[ctlr]->num_channels)
+		return;
+	if (channel < 32)
+		edma_write(ctlr, EDMA_ECR, BIT(channel));
+	else
+		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
+}
+
+
+/*-----------------------------------------------------------------------*/
+
+int edma_init(struct edma_soc_info **info)
+{
+	//struct edma_soc_info	**info = pdev->dev.platform_data;
+	const rt_int8_t		(*queue_priority_mapping)[2];
+	const rt_int8_t		(*queue_tc_mapping)[2];
+	int			i, j, off, ln, found = 0;
+	int			status = -1;
+	const rt_int16_t		(*rsv_chans)[2];
+	const rt_int16_t		(*rsv_slots)[2];
+	int			irq[EDMA_MAX_CC] = {0, 0};
+	int			err_irq[EDMA_MAX_CC] = {0, 0};
+
+	RT_ASSERT(info != RT_NULL);
+
+	psc_change_state(DAVINCI_DM365_LPSC_TPCC, PSC_ENABLE);
+	psc_change_state(DAVINCI_DM365_LPSC_TPTC0, PSC_ENABLE);
+	psc_change_state(DAVINCI_DM365_LPSC_TPTC1, PSC_ENABLE);
+	psc_change_state(DAVINCI_DM365_LPSC_TPTC2, PSC_ENABLE);
+	psc_change_state(DAVINCI_DM365_LPSC_TPTC3, PSC_ENABLE);
+
+	edmacc_regs_base[0] = (void *)EDMA_CC0_BASE_REG;
+
+	edma_cc[0] = rt_malloc(sizeof(struct edma));
+	if (!edma_cc[0]) {
+		status = -RT_ENOMEM;
+		goto fail1;
+	}
+	rt_memset(edma_cc[0], 0, sizeof(struct edma));
+
+	edma_cc[0]->num_channels = min_t(unsigned, info[0]->n_channel,
+						EDMA_MAX_DMACH);
+	edma_cc[0]->num_slots = min_t(unsigned, info[0]->n_slot,
+						EDMA_MAX_PARAMENTRY);
+	edma_cc[0]->num_cc = min_t(unsigned, info[0]->n_cc,
+						EDMA_MAX_CC);
+
+	edma_cc[0]->default_queue = info[0]->default_queue;
+	if (!edma_cc[0]->default_queue)
+		edma_cc[0]->default_queue = EVENTQ_1;
+
+	edma_dbg("DMA REG BASE ADDR=%p\n",
+		edmacc_regs_base[j]);
+
+	for (i = 0; i < edma_cc[0]->num_slots; i++)
+		rt_memcpy((void *)(edmacc_regs_base[0] + PARM_OFFSET(i)),
+				&dummy_paramset, PARM_SIZE);
+
+	/* Mark all channels as unused */
+	rt_memset(edma_cc[0]->edma_unused, 0xff,
+		sizeof(edma_cc[0]->edma_unused));
+
+	edma_cc[0]->irq_res_start = IRQ_CCINT0;
+	rt_hw_interrupt_install(IRQ_CCINT0, dma_irq_handler, RT_NULL, "edma");
+	rt_hw_interrupt_umask(IRQ_CCINT0);
+
+	edma_cc[0]->irq_res_end = IRQ_CCERRINT;
+	rt_hw_interrupt_install(IRQ_CCERRINT, dma_ccerr_handler, RT_NULL, "edma_error");
+	rt_hw_interrupt_umask(IRQ_CCERRINT);
+
+	/* Everything lives on transfer controller 1 until otherwise
+	 * specified. This way, long transfers on the low priority queue
+	 * started by the codec engine will not cause audio defects.
+	 */
+	for (i = 0; i < edma_cc[0]->num_channels; i++)
+		map_dmach_queue(0, i, EVENTQ_1);
+
+	queue_tc_mapping = info[0]->queue_tc_mapping;
+	queue_priority_mapping = info[0]->queue_priority_mapping;
+
+	/* Event queue to TC mapping */
+	for (i = 0; queue_tc_mapping[i][0] != -1; i++)
+		map_queue_tc(0, queue_tc_mapping[i][0],
+				queue_tc_mapping[i][1]);
+
+	/* Event queue priority mapping */
+	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+		assign_priority_to_queue(0,
+					queue_priority_mapping[i][0],
+					queue_priority_mapping[i][1]);
+
+	/* Map the channel to param entry if channel mapping logic
+	 * exist
+	 */
+	if (edma_read(0, EDMA_CCCFG) & CHMAP_EXIST)
+		map_dmach_param(0);
+
+	for (i = 0; i < info[0]->n_region; i++) {
+		edma_write_array2(0, EDMA_DRAE, i, 0, 0x0);
+		edma_write_array2(0, EDMA_DRAE, i, 1, 0x0);
+		edma_write_array(0, EDMA_QRAE, i, 0x0);
+	}
+	arch_num_cc++;
+
+	if (tc_errs_handled) {
+ 		rt_hw_interrupt_install(IRQ_TCERRINT0, dma_tc0err_handler, "edma_tc0");
+		rt_hw_interrupt_umask(IRQ_TCERRINT0);
+		rt_hw_interrupt_install(IRQ_TCERRINT, dma_tc1err_handler, "edma_tc1");
+		rt_hw_interrupt_umask(IRQ_TCERRINT);
+	}
+
+	return 0;
+
+fail:
+
+fail1:
+	rt_free(edma_cc[0]);
+
+	return status;
+}
+
+

+ 308 - 0
bsp/dm365/platform/edma.h

@@ -0,0 +1,308 @@
+/*
+ *  TI DAVINCI dma definitions
+ *
+ *  Copyright (C) 2006-2009 Texas Instruments.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+ * This EDMA3 programming framework exposes two basic kinds of resource:
+ *
+ *  Channel	Triggers transfers, usually from a hardware event but
+ *		also manually or by "chaining" from DMA completions.
+ *		Each channel is coupled to a Parameter RAM (PaRAM) slot.
+ *
+ *  Slot	Each PaRAM slot holds a DMA transfer descriptor (PaRAM
+ *		"set"), source and destination addresses, a link to a
+ *		next PaRAM slot (if any), options for the transfer, and
+ *		instructions for updating those addresses.  There are
+ *		more than twice as many slots as event channels.
+ *
+ * Each PaRAM set describes a sequence of transfers, either for one large
+ * buffer or for several discontiguous smaller buffers.  An EDMA transfer
+ * is driven only from a channel, which performs the transfers specified
+ * in its PaRAM slot until there are no more transfers.  When that last
+ * transfer completes, the "link" field may be used to reload the channel's
+ * PaRAM slot with a new transfer descriptor.
+ *
+ * The EDMA Channel Controller (CC) maps requests from channels into physical
+ * Transfer Controller (TC) requests when the channel triggers (by hardware
+ * or software events, or by chaining).  The two physical DMA channels provided
+ * by the TCs are thus shared by many logical channels.
+ *
+ * DaVinci hardware also has a "QDMA" mechanism which is not currently
+ * supported through this interface.  (DSP firmware uses it though.)
+ */
+
+#ifndef EDMA_H_
+#define EDMA_H_
+
+#include <rtthread.h>
+#include <dm36x.h>
+
+#ifdef RT_EDMA_DEBUG
+#define edma_dbg(fmt, ...)  rt_kprintf(fmt, ##__VA_ARGS__)
+#else
+#define edma_dbg(fmt, ...)
+#endif
+
+
+/* PaRAM slots are laid out like this */
+struct edmacc_param {
+	unsigned int opt;
+	unsigned int src;
+	unsigned int a_b_cnt;
+	unsigned int dst;
+	unsigned int src_dst_bidx;
+	unsigned int link_bcntrld;
+	unsigned int src_dst_cidx;
+	unsigned int ccnt;
+};
+
+#define CCINT0_INTERRUPT     16
+#define CCERRINT_INTERRUPT   17
+#define TCERRINT0_INTERRUPT   18
+#define TCERRINT1_INTERRUPT   19
+
+/* fields in edmacc_param.opt */
+#define SAM		BIT(0)
+#define DAM		BIT(1)
+#define SYNCDIM		BIT(2)
+#define STATIC		BIT(3)
+#define EDMA_FWID	(0x07 << 8)
+#define TCCMODE		BIT(11)
+#define EDMA_TCC(t)	((t) << 12)
+#define TCINTEN		BIT(20)
+#define ITCINTEN	BIT(21)
+#define TCCHEN		BIT(22)
+#define ITCCHEN		BIT(23)
+
+#define TRWORD (0x7<<2)
+#define PAENTRY (0x1ff<<5)
+
+/* DM365 specific EDMA3 Events Information */
+enum dm365_edma_ch {
+	DM365_DMA_TIMER3_TINT6,
+	DM365_DMA_TIMER3_TINT7,
+	DM365_DMA_MCBSP_TX = 2,
+	DM365_DMA_VCIF_TX = 2,
+	DM365_DMA_MCBSP_RX = 3,
+	DM365_DMA_VCIF_RX = 3,
+	DM365_DMA_VPSS_EVT1,
+	DM365_DMA_VPSS_EVT2,
+	DM365_DMA_VPSS_EVT3,
+	DM365_DMA_VPSS_EVT4,
+	DM365_DMA_TIMER2_TINT4,
+	DM365_DMA_TIMER2_TINT5,
+	DM365_DMA_SPI2XEVT,
+	DM365_DMA_SPI2REVT,
+	DM365_DMA_IMCOP_IMX0INT = 12,
+	DM365_DMA_KALEIDO_ARMINT = 12,
+	DM365_DMA_IMCOP_SEQINT,
+	DM365_DMA_SPI1XEVT,
+	DM365_DMA_SPI1REVT,
+	DM365_DMA_SPI0XEVT,
+	DM365_DMA_SPI0REVT,
+	DM365_DMA_URXEVT0 = 18,
+	DM365_DMA_SPI3XEVT = 18,
+	DM365_DMA_UTXEVT0 = 19,
+	DM365_DMA_SPI3REVT = 19,
+	DM365_DMA_URXEVT1,
+	DM365_DMA_UTXEVT1,
+	DM365_DMA_TIMER4_TINT8,
+	DM365_DMA_TIMER4_TINT9,
+	DM365_DMA_RTOINT,
+	DM365_DMA_GPIONT9,
+	DM365_DMA_MMC0RXEVT = 26,
+	DM365_DMA_MEMSTK_MSEVT = 26,
+	DM365_DMA_MMC0TXEVT,
+	DM365_DMA_I2C_ICREVT,
+	DM365_DMA_I2C_ICXEVT,
+	DM365_DMA_MMC1RXEVT,
+	DM365_DMA_MMC1TXEVT,
+	DM365_DMA_GPIOINT0,
+	DM365_DMA_GPIOINT1,
+	DM365_DMA_GPIOINT2,
+	DM365_DMA_GPIOINT3,
+	DM365_DMA_GPIOINT4,
+	DM365_DMA_GPIOINT5,
+	DM365_DMA_GPIOINT6,
+	DM365_DMA_GPIOINT7,
+	DM365_DMA_GPIOINT10 = 40,
+	DM365_DMA_EMAC_RXTHREESH = 40,
+	DM365_DMA_GPIOINT11 = 41,
+	DM365_DMA_EMAC_RXPULSE = 41,
+	DM365_DMA_GPIOINT12 = 42,
+	DM365_DMA_EMAC_TXPULSE = 42,
+	DM365_DMA_GPIOINT13 = 43,
+	DM365_DMA_EMAC_MISCPULSE = 43,
+	DM365_DMA_GPIOINT14 = 44,
+	DM365_DMA_SPI4XEVT = 44,
+	DM365_DMA_GPIOINT15 = 45,
+	DM365_DMA_SPI4REVT = 45,
+	DM365_DMA_ADC_ADINT,
+	DM365_DMA_GPIOINT8,
+	DM365_DMA_TIMER0_TINT0,
+	DM365_DMA_TIMER0_TINT1,
+	DM365_DMA_TIMER1_TINT2,
+	DM365_DMA_TIMER1_TINT3,
+	DM365_DMA_PWM0,
+	DM365_DMA_PWM1 = 53,
+	DM365_DMA_IMCOP_IMX1INT = 53,
+	DM365_DMA_PWM2 = 54,
+	DM365_DMA_IMCOP_NSFINT = 54,
+	DM365_DMA_PWM3 = 55,
+	DM365_DMA_KALEIDO6_CP_UNDEF = 55,
+	DM365_DMA_IMCOP_VLCDINT = 56,
+	DM365_DMA_KALEIDO5_CP_ECDCMP = 56,
+	DM365_DMA_IMCOP_BIMINT = 57,
+	DM365_DMA_KALEIDO8_CP_ME = 57,
+	DM365_DMA_IMCOP_DCTINT = 58,
+	DM365_DMA_KALEIDO1_CP_CALC = 58,
+	DM365_DMA_IMCOP_QIQINT = 59,
+	DM365_DMA_KALEIDO7_CP_IPE = 59,
+	DM365_DMA_IMCOP_BPSINT = 60,
+	DM365_DMA_KALEIDO2_CP_BS = 60,
+	DM365_DMA_IMCOP_VLCDERRINT = 61,
+	DM365_DMA_KALEIDO0_CP_LPF = 61,
+	DM365_DMA_IMCOP_RCNTINT = 62,
+	DM365_DMA_KALEIDO3_CP_MC = 62,
+	DM365_DMA_IMCOP_COPCINT = 63,
+	DM365_DMA_KALEIDO4_CP_ECDEND = 63,
+};
+/* end DM365 specific info */
+
+
+/*ch_status paramater of callback function possible values*/
+#define DMA_COMPLETE 1
+#define DMA_CC_ERROR 2
+#define DMA_TC1_ERROR 3
+#define DMA_TC2_ERROR 4
+
+enum address_mode {
+	INCR = 0,
+	FIFO = 1
+};
+
+enum fifo_width {
+	W8BIT = 0,
+	W16BIT = 1,
+	W32BIT = 2,
+	W64BIT = 3,
+	W128BIT = 4,
+	W256BIT = 5
+};
+
+enum dma_event_q {
+	EVENTQ_0 = 0,
+	EVENTQ_1 = 1,
+	EVENTQ_2 = 2,
+	EVENTQ_3 = 3,
+	EVENTQ_DEFAULT = -1
+};
+
+enum sync_dimension {
+	ASYNC = 0,
+	ABSYNC = 1
+};
+
+#define EDMA_CTLR_CHAN(ctlr, chan)	(((ctlr) << 16) | (chan))
+#define EDMA_CTLR(i)			((i) >> 16)
+#define EDMA_CHAN_SLOT(i)		((i) & 0xffff)
+
+#define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
+#define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
+#define EDMA_CONT_PARAMS_ANY		 1001
+#define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
+#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+
+#define EDMA_MAX_CC               2
+
+/* alloc/free DMA channels and their dedicated parameter RAM slots */
+int edma_alloc_channel(int channel,
+	void (*callback)(unsigned channel, rt_uint16_t ch_status, void *data),
+	void *data, enum dma_event_q);
+void edma_free_channel(unsigned channel);
+
+/* alloc/free parameter RAM slots */
+int edma_alloc_slot(unsigned ctlr, int slot);
+void edma_free_slot(unsigned slot);
+
+/* alloc/free a set of contiguous parameter RAM slots */
+int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
+int edma_free_cont_slots(unsigned slot, int count);
+
+/* calls that operate on part of a parameter RAM slot */
+void edma_set_src(unsigned slot, rt_uint32_t src_port,
+				enum address_mode mode, enum fifo_width);
+void edma_set_dest(unsigned slot, rt_uint32_t dest_port,
+				 enum address_mode mode, enum fifo_width);
+void edma_get_position(unsigned slot, rt_uint32_t *src, rt_uint32_t *dst);
+void edma_set_src_index(unsigned slot, rt_int16_t src_bidx, rt_int16_t src_cidx);
+void edma_set_dest_index(unsigned slot, rt_int16_t dest_bidx, rt_int16_t dest_cidx);
+void edma_set_transfer_params(unsigned slot, rt_uint16_t acnt, rt_uint16_t bcnt, rt_uint16_t ccnt,
+		rt_uint16_t bcnt_rld, enum sync_dimension sync_mode);
+void edma_link(unsigned from, unsigned to);
+void edma_unlink(unsigned from);
+
+/* calls that operate on an entire parameter RAM slot */
+void edma_write_slot(unsigned slot, const struct edmacc_param *params);
+void edma_read_slot(unsigned slot, struct edmacc_param *params);
+
+/* channel control operations */
+int edma_start(unsigned channel);
+void edma_stop(unsigned channel);
+void edma_clean_channel(unsigned channel);
+void edma_clear_event(unsigned channel);
+void edma_pause(unsigned channel);
+void edma_resume(unsigned channel);
+
+
+struct edma_rsv_info {
+
+	const rt_int16_t	(*rsv_chans)[2];
+	const rt_int16_t	(*rsv_slots)[2];
+};
+
+/* platform_data for EDMA driver */
+struct edma_soc_info {
+
+	/* how many dma resources of each type */
+	unsigned	n_channel;
+	unsigned	n_region;
+	unsigned	n_slot;
+	unsigned	n_tc;
+	unsigned	n_cc;
+	enum dma_event_q	default_queue;
+
+	/* Resource reservation for other cores */
+	struct edma_rsv_info	*rsv;
+
+	const rt_int8_t	(*queue_tc_mapping)[2];
+	const rt_int8_t	(*queue_priority_mapping)[2];
+};
+
+int edma_init(struct edma_soc_info **info);
+
+
+#endif

+ 199 - 0
bsp/dm365/platform/findbit.S

@@ -0,0 +1,199 @@
+/*
+ *  linux/arch/arm/lib/findbit.S
+ *
+ *  Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16th March 2001 - John Ripley <jripley@sonicblue.com>
+ *   Fixed so that "size" is an exclusive not an inclusive quantity.
+ *   All users of these functions expect exclusive sizes, and may
+ *   also call with zero size.
+ * Reworked by rmk.
+ */
+//#include <rtthread.h>
+                //.text
+
+/*
+ * Purpose  : Find a 'zero' bit
+ * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
+ */
+.globl _find_first_zero_bit_le
+_find_first_zero_bit_le:
+		teq	r1, #0	
+		beq	3f
+		mov	r2, #0
+1:
+ 		ldrb	r3, [r0, r2, lsr #3]
+ 		lsr	r3, r2, #3
+		ldrb	r3, [r0, r3]
+		eors	r3, r3, #0xff		@ invert bits
+		bne	.L_found		@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc, lr
+//ENDPROC(_find_first_zero_bit_le)
+
+/*
+ * Purpose  : Find next 'zero' bit
+ * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
+ */
+.globl _find_next_zero_bit_le
+_find_next_zero_bit_le:
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+ 		ldrb	r3, [r0, r2, lsr #3]
+ 		lsr	r3, r2, #3
+ 		ldrb	r3, [r0, r3]
+		eor	r3, r3, #0xff		@ now looking for a 1 bit
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.L_found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+//ENDPROC(_find_next_zero_bit_le)
+
+/*
+ * Purpose  : Find a 'one' bit
+ * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
+ */
+.globl _find_first_bit_le
+_find_first_bit_le:
+		teq	r1, #0	
+		beq	3f
+		mov	r2, #0
+1:
+ 		ldrb	r3, [r0, r2, lsr #3]
+ 		lsr	r3, r2, #3
+ 		ldrb	r3, [r0, r3]
+		movs	r3, r3
+		bne	.L_found		@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc, lr
+//ENDPROC(_find_first_bit_le)
+
+/*
+ * Purpose  : Find next 'one' bit
+ * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
+ */
+.globl _find_next_bit_le
+_find_next_bit_le:
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+ 		ldrb	r3, [r0, r2, lsr #3]
+ 		lsr	r3, r2, #3
+ 		ldrb	r3, [r0, r3]
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.L_found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+//ENDPROC(_find_next_bit_le)
+
+#ifdef __ARMEB__
+
+ENTRY(_find_first_zero_bit_be)
+		teq	r1, #0
+		beq	3f
+		mov	r2, #0
+1:		eor	r3, r2, #0x18		@ big endian byte ordering
+ ARM(		ldrb	r3, [r0, r3, lsr #3]	)
+ THUMB(		lsr	r3, #3			)
+ THUMB(		ldrb	r3, [r0, r3]		)
+		eors	r3, r3, #0xff		@ invert bits
+		bne	.L_found		@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc, lr
+ENDPROC(_find_first_zero_bit_be)
+
+ENTRY(_find_next_zero_bit_be)
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+		eor	r3, r2, #0x18		@ big endian byte ordering
+ ARM(		ldrb	r3, [r0, r3, lsr #3]	)
+ THUMB(		lsr	r3, #3			)
+ THUMB(		ldrb	r3, [r0, r3]		)
+		eor	r3, r3, #0xff		@ now looking for a 1 bit
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.L_found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+ENDPROC(_find_next_zero_bit_be)
+
+ENTRY(_find_first_bit_be)
+		teq	r1, #0
+		beq	3f
+		mov	r2, #0
+1:		eor	r3, r2, #0x18		@ big endian byte ordering
+ ARM(		ldrb	r3, [r0, r3, lsr #3]	)
+ THUMB(		lsr	r3, #3			)
+ THUMB(		ldrb	r3, [r0, r3]		)
+		movs	r3, r3
+		bne	.L_found		@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc, lr
+ENDPROC(_find_first_bit_be)
+
+ENTRY(_find_next_bit_be)
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+		eor	r3, r2, #0x18		@ big endian byte ordering
+ ARM(		ldrb	r3, [r0, r3, lsr #3]	)
+ THUMB(		lsr	r3, #3			)
+ THUMB(		ldrb	r3, [r0, r3]		)
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.L_found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+ENDPROC(_find_next_bit_be)
+
+#endif
+
+/*
+ * One or more bits in the LSB of r3 are assumed to be set.
+ */
+.L_found:
+#if 1 //__LINUX_ARM_ARCH__ >= 5
+		rsb	r0, r3, #0
+		and	r3, r3, r0
+		clz	r3, r3
+		rsb	r3, r3, #31
+		add	r0, r2, r3
+#else
+		tst	r3, #0x0f
+		addeq	r2, r2, #4
+		movne	r3, r3, lsl #4
+		tst	r3, #0x30
+		addeq	r2, r2, #2
+		movne	r3, r3, lsl #2
+		tst	r3, #0x40
+		addeq	r2, r2, #1
+		mov	r0, r2
+#endif
+		cmp	r1, r0			@ Clamp to maxbit
+		movlo	r0, r1
+		mov	pc, lr
+

+ 298 - 0
bsp/dm365/platform/interrupt.c

@@ -0,0 +1,298 @@
+/*
+ * File      : trap.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://openlab.rt-thread.com/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2010-11-13     weety      first version
+ */
+
+#include <rtthread.h>
+#include <rthw.h>
+#include "dm36x.h"
+
+#define MAX_HANDLERS	64
+
+extern rt_uint32_t rt_interrupt_nest;
+
+struct rt_irq_desc irq_desc[MAX_HANDLERS];
+
+/* exception and interrupt handler table */
+rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
+rt_uint32_t rt_thread_switch_interrupt_flag;
+
+#define IRQ_BIT(irq)		((irq) & 0x1f)
+
+#define FIQ_REG0_OFFSET		0x0000
+#define FIQ_REG1_OFFSET		0x0004
+#define IRQ_REG0_OFFSET		0x0008
+#define IRQ_REG1_OFFSET		0x000C
+#define IRQ_ENT_REG0_OFFSET	0x0018
+#define IRQ_ENT_REG1_OFFSET	0x001C
+#define IRQ_INCTL_REG_OFFSET	0x0020
+#define IRQ_EABASE_REG_OFFSET	0x0024
+#define IRQ_INTPRI0_REG_OFFSET	0x0030
+#define IRQ_INTPRI7_REG_OFFSET	0x004C
+
+/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
+static const rt_uint8_t dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = {
+	[IRQ_DM3XX_VPSSINT0]	= 2,
+	[IRQ_DM3XX_VPSSINT1]	= 6,
+	[IRQ_DM3XX_VPSSINT2]	= 6,
+	[IRQ_DM3XX_VPSSINT3]	= 6,
+	[IRQ_DM3XX_VPSSINT4]	= 6,
+	[IRQ_DM3XX_VPSSINT5]	= 6,
+	[IRQ_DM3XX_VPSSINT6]	= 6,
+	[IRQ_DM3XX_VPSSINT7]	= 7,
+	[IRQ_DM3XX_VPSSINT8]	= 6,
+	[IRQ_ASQINT]		= 6,
+	[IRQ_DM365_IMXINT0]	= 6,
+	[IRQ_DM3XX_IMCOPINT]	= 6,
+	[IRQ_USBINT]		= 4,
+	[IRQ_DM3XX_RTOINT]	= 4,
+	[IRQ_DM3XX_TINT5]	= 7,
+	[IRQ_DM3XX_TINT6]	= 7,
+	[IRQ_CCINT0]		= 5,	/* dma */
+	[IRQ_DM3XX_SPINT1_0]	= 5,	/* dma */
+	[IRQ_DM3XX_SPINT1_1]	= 5,	/* dma */
+	[IRQ_DM3XX_SPINT2_0]	= 5,	/* dma */
+	[IRQ_DM365_PSCINT]	= 7,
+	[IRQ_DM3XX_SPINT2_1]	= 7,
+	[IRQ_DM3XX_TINT7]	= 4,
+	[IRQ_DM3XX_SDIOINT0]	= 7,
+	[IRQ_DM365_MBXINT]	= 7,
+	[IRQ_DM365_MBRINT]	= 7,
+	[IRQ_DM3XX_MMCINT0]	= 7,
+	[IRQ_DM3XX_MMCINT1]	= 7,
+	[IRQ_DM3XX_PWMINT3]	= 7,
+	[IRQ_DM365_DDRINT]	= 7,
+	[IRQ_DM365_AEMIFINT]	= 7,
+	[IRQ_DM3XX_SDIOINT1]	= 4,
+	[IRQ_DM365_TINT0]	= 2,	/* clockevent */
+	[IRQ_DM365_TINT1]	= 2,	/* clocksource */
+	[IRQ_DM365_TINT2]	= 7,	/* DSP timer */
+	[IRQ_DM365_TINT3]	= 7,	/* system tick */
+	[IRQ_PWMINT0]		= 7,
+	[IRQ_PWMINT1]		= 7,
+	[IRQ_DM365_PWMINT2]	= 7,
+	[IRQ_DM365_IICINT]	= 3,
+	[IRQ_UARTINT0]		= 3,
+	[IRQ_UARTINT1]		= 3,
+	[IRQ_DM3XX_SPINT0_0]	= 3,
+	[IRQ_DM3XX_SPINT0_1]	= 3,
+	[IRQ_DM3XX_GPIO0]	= 3,
+	[IRQ_DM3XX_GPIO1]	= 7,
+	[IRQ_DM3XX_GPIO2]	= 4,
+	[IRQ_DM3XX_GPIO3]	= 4,
+	[IRQ_DM3XX_GPIO4]	= 7,
+	[IRQ_DM3XX_GPIO5]	= 7,
+	[IRQ_DM3XX_GPIO6]	= 7,
+	[IRQ_DM3XX_GPIO7]	= 7,
+	[IRQ_DM3XX_GPIO8]	= 7,
+	[IRQ_DM3XX_GPIO9]	= 7,
+	[IRQ_DM365_GPIO10]	= 7,
+	[IRQ_DM365_GPIO11]	= 7,
+	[IRQ_DM365_GPIO12]	= 7,
+	[IRQ_DM365_GPIO13]	= 7,
+	[IRQ_DM365_GPIO14]	= 7,
+	[IRQ_DM365_GPIO15]	= 7,
+	[IRQ_DM365_KEYINT]	= 7,
+	[IRQ_DM365_COMMTX]	= 7,
+	[IRQ_DM365_COMMRX]	= 7,
+	[IRQ_EMUINT]		= 7,
+};
+
+static inline unsigned int davinci_irq_readl(int offset)
+{
+	return davinci_readl(DAVINCI_ARM_INTC_BASE + offset);
+}
+
+static inline void davinci_irq_writel(unsigned long value, int offset)
+{
+	davinci_writel(value, DAVINCI_ARM_INTC_BASE + offset);
+}
+
+/**
+ * @addtogroup DM36X
+ */
+/*@{*/
+
+rt_isr_handler_t rt_hw_interrupt_handle(int vector, void *param)
+{
+	rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
+	return RT_NULL;
+}
+
+/**
+ * This function will initialize hardware interrupt
+ */
+void rt_hw_interrupt_init(void)
+{
+	int i;
+	register rt_uint32_t idx;
+	const rt_uint8_t *priority;
+	priority = dm365_default_priorities;
+
+	/* Clear all interrupt requests */
+	davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
+	davinci_irq_writel(~0x0, FIQ_REG1_OFFSET);
+	davinci_irq_writel(~0x0, IRQ_REG0_OFFSET);
+	davinci_irq_writel(~0x0, IRQ_REG1_OFFSET);
+
+	/* Disable all interrupts */
+	davinci_irq_writel(0x0, IRQ_ENT_REG0_OFFSET);
+	davinci_irq_writel(0x0, IRQ_ENT_REG1_OFFSET);
+
+	/* Interrupts disabled immediately, IRQ entry reflects all */
+	davinci_irq_writel(0x0, IRQ_INCTL_REG_OFFSET);
+
+	/* we don't use the hardware vector table, just its entry addresses */
+	davinci_irq_writel(0, IRQ_EABASE_REG_OFFSET);
+
+	/* Clear all interrupt requests */
+	davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
+	davinci_irq_writel(~0x0, FIQ_REG1_OFFSET);
+	davinci_irq_writel(~0x0, IRQ_REG0_OFFSET);
+	davinci_irq_writel(~0x0, IRQ_REG1_OFFSET);
+
+	for (i = IRQ_INTPRI0_REG_OFFSET; i <= IRQ_INTPRI7_REG_OFFSET; i += 4) {
+		unsigned	j;
+		rt_uint32_t	pri;
+
+		for (j = 0, pri = 0; j < 32; j += 4, priority++)
+			pri |= (*priority & 0x07) << j;
+		davinci_irq_writel(pri, i);
+	}
+
+	/* init exceptions table */
+	for(idx=0; idx < MAX_HANDLERS; idx++)
+	{
+		
+		irq_desc[idx].handler = (rt_isr_handler_t)rt_hw_interrupt_handle;
+		irq_desc[idx].param = RT_NULL;
+	#ifdef RT_USING_INTERRUPT_INFO
+		rt_snprintf(irq_desc[idx].name, RT_NAME_MAX - 1, "default");
+		irq_desc[idx].counter = 0;
+	#endif
+	}
+
+	/* init interrupt nest, and context in thread sp */
+	rt_interrupt_nest = 0;
+	rt_interrupt_from_thread = 0;
+	rt_interrupt_to_thread = 0;
+	rt_thread_switch_interrupt_flag = 0;
+}
+
+/**
+ * This function will mask a interrupt.
+ * @param vector the interrupt number
+ */
+void rt_hw_interrupt_mask(int irq)
+{
+	unsigned int mask;
+	rt_uint32_t l;
+
+	mask = 1 << IRQ_BIT(irq);
+
+	if (irq > 31) {
+		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
+		l &= ~mask;
+		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
+	} else {
+		l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET);
+		l &= ~mask;
+		davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET);
+	}
+}
+
+/**
+ * This function will un-mask a interrupt.
+ * @param vector the interrupt number
+ */
+void rt_hw_interrupt_umask(int irq)
+{
+	unsigned int mask;
+	rt_uint32_t l;
+
+	mask = 1 << IRQ_BIT(irq);
+
+	if (irq > 31) {
+		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
+		l |= mask;
+		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
+	} else {
+		l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET);
+		l |= mask;
+		davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET);
+	}
+}
+
+/**
+ * This function will install a interrupt service routine to a interrupt.
+ * @param vector the interrupt number
+ * @param handler the interrupt service routine to be installed
+ * @param param the interrupt service function parameter
+ * @param name the interrupt name
+ * @return old handler
+ */
+
+rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
+									void *param, char *name)
+{
+	rt_isr_handler_t old_handler = RT_NULL;
+
+	if(vector < MAX_HANDLERS)
+	{
+		old_handler = irq_desc[vector].handler;
+		if (handler != RT_NULL)
+		{
+			irq_desc[vector].handler = (rt_isr_handler_t)handler;
+			irq_desc[vector].param = param;
+		#ifdef RT_USING_INTERRUPT_INFO
+			rt_snprintf(irq_desc[vector].name, RT_NAME_MAX - 1, "%s", name);
+			irq_desc[vector].counter = 0;
+		#endif
+		}
+	}
+
+	return old_handler;
+
+}
+
+#ifdef RT_USING_FINSH
+#ifdef RT_USING_INTERRUPT_INFO
+void list_irq(void)
+{
+	int irq;
+	
+	rt_kprintf("number\tcount\tname\n");
+	for (irq = 0; irq < MAX_HANDLERS; irq++)
+	{
+		if (rt_strncmp(irq_desc[irq].name, "default", sizeof("default")))
+		{
+			rt_kprintf("%02ld: %10ld  %s\n", irq, irq_desc[irq].counter, irq_desc[irq].name);
+		}
+	}
+}
+
+#include <finsh.h>
+FINSH_FUNCTION_EXPORT(list_irq, list system irq);
+
+#ifdef FINSH_USING_MSH
+int cmd_list_irq(int argc, char** argv)
+{
+    list_irq();
+    return 0;
+}
+FINSH_FUNCTION_EXPORT_ALIAS(cmd_list_irq, __cmd_list_irq, list system irq.);
+
+#endif
+#endif
+#endif
+
+/*@}*/

+ 181 - 0
bsp/dm365/platform/irqs.h

@@ -0,0 +1,181 @@
+#ifndef __DM36X_IRQS_H__
+#define __DM36X_IRQS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Base address */
+#define DAVINCI_ARM_INTC_BASE 0x01C48000
+
+#define DAVINCI_N_AINTC_IRQ	64
+
+/* Interrupt lines */
+#define IRQ_VDINT0       0
+#define IRQ_VDINT1       1
+#define IRQ_VDINT2       2
+#define IRQ_HISTINT      3
+#define IRQ_H3AINT       4
+#define IRQ_PRVUINT      5
+#define IRQ_RSZINT       6
+#define IRQ_VFOCINT      7
+#define IRQ_VENCINT      8
+#define IRQ_ASQINT       9
+#define IRQ_IMXINT       10
+#define IRQ_VLCDINT      11
+#define IRQ_USBINT       12
+#define IRQ_EMACINT      13
+
+#define IRQ_CCINT0       16
+#define IRQ_CCERRINT     17
+#define IRQ_TCERRINT0    18
+#define IRQ_TCERRINT     19
+#define IRQ_PSCIN        20
+
+#define IRQ_IDE          22
+#define IRQ_HPIINT       23
+#define IRQ_MBXINT       24
+#define IRQ_MBRINT       25
+#define IRQ_MMCINT       26
+#define IRQ_SDIOINT      27
+#define IRQ_MSINT        28
+#define IRQ_DDRINT       29
+#define IRQ_AEMIFINT     30
+#define IRQ_VLQINT       31
+#define IRQ_TINT0_TINT12 32
+#define IRQ_TINT0_TINT34 33
+#define IRQ_TINT1_TINT12 34
+#define IRQ_TINT1_TINT34 35
+#define IRQ_PWMINT0      36
+#define IRQ_PWMINT1      37
+#define IRQ_PWMINT2      38
+#define IRQ_I2C          39
+#define IRQ_UARTINT0     40
+#define IRQ_UARTINT1     41
+#define IRQ_UARTINT2     42
+#define IRQ_SPINT0       43
+#define IRQ_SPINT1       44
+
+#define IRQ_DSP2ARM0     46
+#define IRQ_DSP2ARM1     47
+#define IRQ_GPIO0        48
+#define IRQ_GPIO1        49
+#define IRQ_GPIO2        50
+#define IRQ_GPIO3        51
+#define IRQ_GPIO4        52
+#define IRQ_GPIO5        53
+#define IRQ_GPIO6        54
+#define IRQ_GPIO7        55
+#define IRQ_GPIOBNK0     56
+#define IRQ_GPIOBNK1     57
+#define IRQ_GPIOBNK2     58
+#define IRQ_GPIOBNK3     59
+#define IRQ_GPIOBNK4     60
+#define IRQ_COMMTX       61
+#define IRQ_COMMRX       62
+#define IRQ_EMUINT       63
+
+/*
+ * Base Interrupts common across DM355 and DM365
+ */
+#define IRQ_DM3XX_VPSSINT0	0
+#define IRQ_DM3XX_VPSSINT1	1
+#define IRQ_DM3XX_VPSSINT2	2
+#define IRQ_DM3XX_VPSSINT3	3
+#define IRQ_DM3XX_VPSSINT4	4
+#define IRQ_DM3XX_VPSSINT5	5
+#define IRQ_DM3XX_VPSSINT6	6
+#define IRQ_DM3XX_VPSSINT7	7
+#define IRQ_DM3XX_VPSSINT8	8
+#define IRQ_DM3XX_IMCOPINT	11
+#define IRQ_DM3XX_RTOINT	13
+#define IRQ_DM3XX_TINT4		13
+#define IRQ_DM3XX_TINT2_TINT12	13
+#define IRQ_DM3XX_TINT5		14
+#define IRQ_DM3XX_TINT2_TINT34	14
+#define IRQ_DM3XX_TINT6		15
+#define IRQ_DM3XX_TINT3_TINT12	15
+#define IRQ_DM3XX_SPINT1_0	17
+#define IRQ_DM3XX_SPINT1_1	18
+#define IRQ_DM3XX_SPINT2_0	19
+#define IRQ_DM3XX_SPINT2_1	21
+#define IRQ_DM3XX_TINT7		22
+#define IRQ_DM3XX_TINT3_TINT34	22
+#define IRQ_DM3XX_SDIOINT0	23
+#define IRQ_DM3XX_MMCINT0	26
+#define IRQ_DM3XX_MSINT		26
+#define IRQ_DM3XX_MMCINT1	27
+#define IRQ_DM3XX_PWMINT3	28
+#define IRQ_DM3XX_SDIOINT1	31
+#define IRQ_DM3XX_SPINT0_0	42
+#define IRQ_DM3XX_SPINT0_1	43
+#define IRQ_DM3XX_GPIO0		44
+#define IRQ_DM3XX_GPIO1		45
+#define IRQ_DM3XX_GPIO2		46
+#define IRQ_DM3XX_GPIO3		47
+#define IRQ_DM3XX_GPIO4		48
+#define IRQ_DM3XX_GPIO5		49
+#define IRQ_DM3XX_GPIO6		50
+#define IRQ_DM3XX_GPIO7		51
+#define IRQ_DM3XX_GPIO8		52
+#define IRQ_DM3XX_GPIO9		53
+
+/* DaVinci DM365-specific Interrupts */
+#define IRQ_DM365_INSFINT	7
+#define IRQ_DM365_IMXINT1	8
+#define IRQ_DM365_IMXINT0	10
+#define IRQ_DM365_KLD_ARMINT	10
+#define IRQ_DM365_CCERRINT	17
+#define IRQ_DM365_TCERRINT0	18
+#define IRQ_DM365_SPINT2_0	19
+#define IRQ_DM365_PSCINT	20
+#define IRQ_DM365_TVINT		20
+#define IRQ_DM365_SPINT4_0	21
+#define IRQ_DM365_MBXINT	24
+#define IRQ_DM365_VCINT		24
+#define IRQ_DM365_MBRINT	25
+#define IRQ_DM365_TINT9		28
+#define IRQ_DM365_TINT4_TINT34	28
+#define IRQ_DM365_DDRINT	29
+#define IRQ_DM365_RTCINT	29
+#define IRQ_DM365_AEMIFINT	30
+#define IRQ_DM365_HPIINT	30
+#define IRQ_DM365_TINT0		32
+#define IRQ_DM365_TINT0_TINT12	32
+#define IRQ_DM365_TINT1		33
+#define IRQ_DM365_TINT0_TINT34	33
+#define IRQ_DM365_TINT2		34
+#define IRQ_DM365_TINT1_TINT12	34
+#define IRQ_DM365_TINT3		35
+#define IRQ_DM365_TINT1_TINT34	35
+#define IRQ_DM365_PWMINT2	38
+#define IRQ_DM365_TINT8		38
+#define IRQ_DM365_TINT4_TINT12	38
+#define IRQ_DM365_IICINT	39
+#define IRQ_DM365_SPINT3_0	43
+#define IRQ_DM365_EMAC_RXTHRESH	52
+#define IRQ_DM365_EMAC_RXPULSE	53
+#define IRQ_DM365_GPIO10	54
+#define IRQ_DM365_EMAC_TXPULSE	54
+#define IRQ_DM365_GPIO11	55
+#define IRQ_DM365_EMAC_MISCPULSE 55
+#define IRQ_DM365_GPIO12	56
+#define IRQ_DM365_PWRGIO0	56
+#define IRQ_DM365_GPIO13	57
+#define IRQ_DM365_PWRGIO1	57
+#define IRQ_DM365_GPIO14	58
+#define IRQ_DM365_PWRGIO2	58
+#define IRQ_DM365_GPIO15	59
+#define IRQ_DM365_ADCINT	59
+#define IRQ_DM365_KEYINT	60
+#define IRQ_DM365_COMMTX	61
+#define IRQ_DM365_TCERRINT2	61
+#define IRQ_DM365_COMMRX	62
+#define IRQ_DM365_TCERRINT3	62
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

+ 48 - 0
bsp/dm365/platform/psc.c

@@ -0,0 +1,48 @@
+
+#include "dm36x.h"
+
+
+/* ------------------------------------------------------------------------ *
+ *  psc_change_state( id, state )                                           *
+ *      id    = Domain #ID                                                  *
+ *      state = ( ENABLE, DISABLE, SYNCRESET, RESET )                       *
+ *              (   =3  ,   =2   ,    =1    ,   =0  )                       *
+ * ------------------------------------------------------------------------ */
+void psc_change_state(int id, int state)
+{
+	rt_uint32_t mdstat, mdctl;
+
+	if (id > DAVINCI_DM365_LPSC_KALEIDO)
+		return;
+
+	mdstat = PSC_MDSTAT_BASE + (id * 4);
+	mdctl = PSC_MDCTL_BASE + (id * 4);
+
+    /*
+     *  Step 0 - Ignore request if the state is already set as is
+     */
+    if ((readl(mdstat) & 0x1f) == state)
+        return;
+
+    /*
+     *  Step 1 - Wait for PTSTAT.GOSTAT to clear
+     */
+    while (readl(PSC_PTSTAT) & 1) ;
+
+    /*
+     *  Step 2 - Set MDCTLx.NEXT to new state
+     */
+    writel(readl(mdctl) & (~0x1f), mdctl);
+    writel(readl(mdctl) | state, mdctl);
+
+    /*
+     *  Step 3 - Start power transition ( set PTCMD.GO to 1 )
+     */
+    writel(readl(PSC_PTCMD) | 1, PSC_PTCMD);
+
+    /*
+     *  Step 4 - Wait for PTSTAT.GOSTAT to clear
+     */
+    while (readl(PSC_PTSTAT) & 1) ;
+}
+

+ 86 - 0
bsp/dm365/platform/psc.h

@@ -0,0 +1,86 @@
+
+#ifndef __DM36X_PSC_H
+#define __DM36X_PSC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* PSC register offsets */
+#define EPCPR		0x070
+#define PTCMD		0x120
+#define PTSTAT		0x128
+#define PDSTAT		0x200
+#define PDCTL1		0x304
+#define MDSTAT(n)	(0x800 + (n) * 4)
+#define MDCTL(n)	(0xA00 + (n) * 4)
+
+/* Power and Sleep Controller (PSC) Domains */
+#define DAVINCI_GPSC_ARMDOMAIN		0
+#define DAVINCI_GPSC_DSPDOMAIN		1
+
+
+#define DAVINCI_DM365_LPSC_TPCC		0
+#define DAVINCI_DM365_LPSC_TPTC0	1
+#define DAVINCI_DM365_LPSC_TPTC1	2
+#define DAVINCI_DM365_LPSC_TPTC2	3
+#define DAVINCI_DM365_LPSC_TPTC3	4
+#define DAVINCI_DM365_LPSC_TIMER3	5
+#define DAVINCI_DM365_LPSC_SPI1		6
+#define DAVINCI_DM365_LPSC_MMC_SD1	7
+#define DAVINCI_DM365_LPSC_McBSP	8
+#define DAVINCI_DM365_LPSC_USB		9
+#define DAVINCI_DM365_LPSC_PWM3		10
+#define DAVINCI_DM365_LPSC_SPI2		11
+#define DAVINCI_DM365_LPSC_RTO		12
+#define DAVINCI_DM365_LPSC_DDR_EMIF	13
+#define DAVINCI_DM365_LPSC_AEMIF	14
+#define DAVINCI_DM365_LPSC_MMC_SD	15
+#define DAVINCI_DM365_LPSC_MMC_SD0	15
+#define DAVINCI_DM365_LPSC_MEMSTICK	16
+#define DAVINCI_DM365_LPSC_TIMER4	17
+#define DAVINCI_DM365_LPSC_I2C		18
+#define DAVINCI_DM365_LPSC_UART0	19
+#define DAVINCI_DM365_LPSC_UART1	20
+#define DAVINCI_DM365_LPSC_UHPI		21
+#define DAVINCI_DM365_LPSC_SPI0		22
+#define DAVINCI_DM365_LPSC_PWM0		23
+#define DAVINCI_DM365_LPSC_PWM1		24
+#define DAVINCI_DM365_LPSC_PWM2		25
+#define DAVINCI_DM365_LPSC_GPIO		26
+#define DAVINCI_DM365_LPSC_TIMER0	27
+#define DAVINCI_DM365_LPSC_TIMER1	28
+#define DAVINCI_DM365_LPSC_TIMER2	29
+#define DAVINCI_DM365_LPSC_SYSTEM_SUBSYS 30
+#define DAVINCI_DM365_LPSC_ARM		31
+#define DAVINCI_DM365_LPSC_SCR0		33
+#define DAVINCI_DM365_LPSC_SCR1		34
+#define DAVINCI_DM365_LPSC_EMU		35
+#define DAVINCI_DM365_LPSC_CHIPDFT	36
+#define DAVINCI_DM365_LPSC_PBIST	37
+#define DAVINCI_DM365_LPSC_SPI3		38
+#define DAVINCI_DM365_LPSC_SPI4		39
+#define DAVINCI_DM365_LPSC_CPGMAC	40
+#define DAVINCI_DM365_LPSC_RTC		41
+#define DAVINCI_DM365_LPSC_KEYSCAN	42
+#define DAVINCI_DM365_LPSC_ADCIF	43
+#define DAVINCI_DM365_LPSC_VOICE_CODEC	44
+#define DAVINCI_DM365_LPSC_DAC_CLKRES	45
+#define DAVINCI_DM365_LPSC_DAC_CLK	46
+#define DAVINCI_DM365_LPSC_VPSSMSTR	47
+#define DAVINCI_DM365_LPSC_IMCOP	50
+#define DAVINCI_DM365_LPSC_KALEIDO	51
+
+#define PSC_ENABLE     3
+#define PSC_DISABLE    2
+#define PSC_SYNCRESET  1
+#define PSC_RESET      0
+
+void psc_change_state(int id, int state);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

+ 66 - 0
bsp/dm365/platform/reset.c

@@ -0,0 +1,66 @@
+/*
+ * File      : cpu.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://openlab.rt-thread.com/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2006-03-13     Bernard      first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include "dm36x.h"
+
+/**
+ * @addtogroup DM36X
+ */
+/*@{*/
+
+/**
+ * reset cpu by dog's time-out
+ *
+ */
+void machine_reset()
+{
+	reset_system();
+}
+
+/**
+ *  shutdown CPU
+ *
+ */
+void machine_shutdown()
+{
+
+}
+
+#ifdef RT_USING_FINSH
+
+#include <finsh.h>
+FINSH_FUNCTION_EXPORT_ALIAS(rt_hw_cpu_reset, reset, restart the system);
+
+#ifdef FINSH_USING_MSH
+int cmd_reset(int argc, char** argv)
+{
+	rt_hw_cpu_reset();
+	return 0;
+}
+
+int cmd_shutdown(int argc, char** argv)
+{
+	rt_hw_cpu_shutdown();
+	return 0;
+}
+
+FINSH_FUNCTION_EXPORT_ALIAS(cmd_reset, __cmd_reset, restart the system.);
+FINSH_FUNCTION_EXPORT_ALIAS(cmd_shutdown, __cmd_shutdown, shutdown the system.);
+
+#endif
+#endif
+
+/*@}*/

+ 367 - 0
bsp/dm365/platform/start_gcc.S

@@ -0,0 +1,367 @@
+/*
+ * File      : start.S
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2010-11-13     weety      first version
+ */
+
+#define CONFIG_STACKSIZE    512
+#define S_FRAME_SIZE        68
+
+#define S_PC                64
+#define S_LR                60
+#define S_SP                56
+#define S_IP                52
+#define S_FP                48
+#define S_R10               44
+#define S_R9                40
+#define S_R8                36
+#define S_R7                32
+#define S_R6                28
+#define S_R5                24
+#define S_R4                20
+#define S_R3                16
+#define S_R2                12
+#define S_R1                8
+#define S_R0                4
+#define S_CPSR              0
+
+.equ    I_BIT,              0x80    @ when I bit is set, IRQ is disabled
+.equ    F_BIT,              0x40    @ when F bit is set, FIQ is disabled
+
+.equ 	USERMODE, 			0x10
+.equ 	FIQMODE,			0x11
+.equ 	IRQMODE,			0x12
+.equ 	SVCMODE,			0x13
+.equ 	ABORTMODE,			0x17
+.equ 	UNDEFMODE,			0x1b
+.equ 	MODEMASK,			0x1f
+.equ 	NOINT,				0xc0
+
+.equ 	RAM_BASE,			0x00000000	/*Start address of RAM		*/
+.equ 	ROM_BASE,			0x80000000	/*Start address of Flash	*/
+
+
+.equ	EINT_ENABLE0,			0x01c48018
+.equ	EINT_ENABLE1,			0x01c4801c
+
+/*
+ *************************************************************************
+ *
+ * Jump vector table
+ *
+ *************************************************************************
+ */
+
+.section .init, "ax"
+.code 32
+
+.globl _start
+_start:
+    b       reset
+    ldr     pc, _vector_undef
+    ldr     pc, _vector_swi
+    ldr     pc, _vector_pabt
+    ldr     pc, _vector_dabt
+    ldr     pc, _vector_resv
+    ldr     pc, _vector_irq
+    ldr     pc, _vector_fiq
+
+_vector_undef:  .word vector_undef
+_vector_swi:    .word vector_swi
+_vector_pabt:   .word vector_pabt
+_vector_dabt:   .word vector_dabt
+_vector_resv:   .word vector_resv
+_vector_irq:    .word vector_irq
+_vector_fiq:    .word vector_fiq
+
+.balignl    16,0xdeadbeef
+
+/*
+ *************************************************************************
+ *
+ * Startup Code (reset vector)
+ * relocate armboot to ram
+ * setup stack
+ * jump to second stage
+ *
+ *************************************************************************
+ */
+
+_TEXT_BASE:
+    .word    TEXT_BASE
+
+/*
+ * rtthread kernel start and end
+ * which are defined in linker script
+ */
+.globl _rtthread_start
+_rtthread_start:
+    .word _start
+
+.globl _rtthread_end
+_rtthread_end:
+    .word  _end
+
+/*
+ * rtthread bss start and end which are defined in linker script
+ */
+.globl _bss_start
+_bss_start:	
+    .word __bss_start
+
+.globl _bss_end
+_bss_end:
+    .word __bss_end
+
+/* IRQ stack memory (calculated at run-time)     */
+.globl IRQ_STACK_START
+IRQ_STACK_START:
+    .word _irq_stack_start + 1024
+
+.globl FIQ_STACK_START
+FIQ_STACK_START:
+    .word _fiq_stack_start + 1024
+
+.globl UNDEFINED_STACK_START
+UNDEFINED_STACK_START:
+    .word _undefined_stack_start + CONFIG_STACKSIZE
+
+.globl ABORT_STACK_START
+ABORT_STACK_START:
+    .word _abort_stack_start + CONFIG_STACKSIZE
+	
+.globl _STACK_START
+_STACK_START:
+    .word _svc_stack_start + 1024
+
+/* ----------------------------------entry------------------------------*/
+reset:
+
+    /* set the cpu to SVC32 mode    */
+    mrs     r0,cpsr
+    bic     r0,r0,#MODEMASK
+    orr     r0,r0,#SVCMODE
+    msr     cpsr,r0
+
+    /* mask all IRQs by clearing all bits in the INTMRs               */
+	mov	r1, $0
+	ldr	r0, =EINT_ENABLE0
+	str	r1, [r0]
+	ldr	r0, =EINT_ENABLE1
+	str	r1, [r0]
+#if 0
+    /* set interrupt vector        */
+    ldr     r0, _TEXT_BASE
+    mov	r1, #0x00
+    add	r2, r0, #0x40         /* size, 32bytes                   */
+
+copy_loop:
+    ldmia   r0!, {r3-r10}           /* copy from source address [r0]    */
+    stmia   r1!, {r3-r10}           /* copy to   target address [r1]    */
+    cmp     r0, r2                  /* until source end addreee [r2]    */
+    ble     copy_loop
+#endif
+
+    /* setup stack */
+    bl      stack_setup
+
+    /* clear .bss */
+    mov     r0,#0                   /* get a zero                       */
+    ldr     r1,=__bss_start         /* bss start                        */
+    ldr     r2,=__bss_end           /* bss end                          */
+
+bss_loop:
+    cmp     r1,r2                   /* check if data to clear           */
+    strlo   r0,[r1],#4              /* clear 4 bytes                    */
+    blo     bss_loop                /* loop until done                  */
+
+    /* call C++ constructors of global objects                          */
+    ldr     r0, =__ctors_start__
+    ldr     r1, =__ctors_end__
+
+ctor_loop:
+    cmp     r0, r1
+    beq     ctor_end
+    ldr     r2, [r0], #4
+    stmfd   sp!, {r0-r1}
+    mov     lr, pc
+    bx      r2
+    ldmfd   sp!, {r0-r1}
+    b       ctor_loop
+
+ctor_end:
+
+    /* start RT-Thread Kernel        */
+    ldr     pc, _rtthread_startup
+
+_rtthread_startup: 
+    .word rtthread_startup
+#if defined (__FLASH_BUILD__)
+_load_address: 
+    .word ROM_BASE + _TEXT_BASE
+#else
+_load_address: 
+    .word RAM_BASE + _TEXT_BASE
+#endif
+
+
+/*
+ *************************************************************************
+ *
+ * Interrupt handling
+ *
+ *************************************************************************
+ */
+
+.macro push_exp_reg
+    sub     sp, sp, #S_FRAME_SIZE   @/* Sizeof(struct rt_hw_stack)  */
+    stmib   sp, {r0 - r12}          @/* Calling r0-r12                  */
+    mov     r0, sp
+    mrs     r6, spsr                @/* Save CPSR                       */
+    str     lr, [r0, #S_PC]         @/* Push PC                         */
+    str     r6, [r0, #S_CPSR]       @/* Push CPSR                       */
+    @ switch to SVC mode with no interrupt
+    msr     cpsr_c, #I_BIT|F_BIT|SVCMODE
+    str     sp, [r0, #S_SP]         @/* Save calling SP                 */
+    str     lr, [r0, #S_LR]         @/* Save calling PC                 */
+.endm
+/* exception handlers    */
+    .align  5
+vector_undef:
+    push_exp_reg
+    bl     rt_hw_trap_udef
+
+    .align  5
+vector_swi:
+    push_exp_reg
+    bl     rt_hw_trap_swi
+
+    .align  5
+vector_pabt:
+    push_exp_reg
+    bl     rt_hw_trap_pabt
+
+    .align  5
+vector_dabt:
+    push_exp_reg
+    bl     rt_hw_trap_dabt
+
+    .align  5
+vector_resv:
+    push_exp_reg
+    bl     rt_hw_trap_resv
+
+.globl     rt_interrupt_enter
+.globl     rt_interrupt_leave
+.globl     rt_thread_switch_interrupt_flag
+.globl     rt_interrupt_from_thread
+.globl     rt_interrupt_to_thread
+vector_irq:
+    stmfd   sp!, {r0-r12,lr}
+
+    bl      rt_interrupt_enter
+    bl      rt_hw_trap_irq
+    bl      rt_interrupt_leave
+
+
+    @ if rt_thread_switch_interrupt_flag set, jump to
+    @ rt_hw_context_switch_interrupt_do and don't return
+    ldr     r0, =rt_thread_switch_interrupt_flag
+    ldr     r1, [r0]
+    cmp     r1, #1
+    beq     rt_hw_context_switch_interrupt_do
+
+    ldmfd   sp!, {r0-r12,lr}
+    subs    pc, lr, #4
+
+    .align  5
+vector_fiq:
+    stmfd   sp!,{r0-r7,lr}
+    bl      rt_hw_trap_fiq
+    ldmfd   sp!,{r0-r7,lr}
+    subs    pc,lr,#4
+
+rt_hw_context_switch_interrupt_do:
+    mov     r1,  #0         @ clear flag
+    str     r1,  [r0]
+
+    ldmfd   sp!, {r0-r12,lr}@ reload saved registers
+    stmfd   sp,  {r0-r2}    @ save r0-r2
+
+    mrs     r0,  spsr       @ get cpsr of interrupt thread
+
+    sub     r1,  sp, #4*3
+    sub     r2,  lr, #4     @ save old task's pc to r2
+
+    @ switch to SVC mode with no interrupt
+    msr     cpsr_c, #I_BIT|F_BIT|SVCMODE
+
+    stmfd   sp!, {r2}       @ push old task's pc
+    stmfd   sp!, {r3-r12,lr}@ push old task's lr,r12-r4
+    ldmfd   r1,  {r1-r3}    @ restore r0-r2 of the interrupt thread
+    stmfd   sp!, {r1-r3}    @ push old task's r0-r2
+    stmfd   sp!, {r0}       @ push old task's cpsr
+
+    ldr     r4,  =rt_interrupt_from_thread
+    ldr     r5,  [r4]
+    str     sp,  [r5]       @ store sp in preempted tasks's TCB
+
+    ldr     r6,  =rt_interrupt_to_thread
+    ldr     r6,  [r6]
+    ldr     sp,  [r6]       @ get new task's stack pointer
+
+    ldmfd   sp!, {r4}       @ pop new task's cpsr to spsr
+    msr     spsr_cxsf, r4
+
+    ldmfd   sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
+
+
+stack_setup:
+    mrs     r0, cpsr
+    bic     r0, r0, #MODEMASK
+    orr     r1, r0, #UNDEFMODE|NOINT
+    msr     cpsr_cxsf, r1           /* undef mode          */
+    ldr     sp, UNDEFINED_STACK_START
+
+    orr     r1,r0,#ABORTMODE|NOINT
+    msr     cpsr_cxsf,r1            /* abort mode          */
+    ldr     sp, ABORT_STACK_START
+
+    orr     r1,r0,#IRQMODE|NOINT
+    msr     cpsr_cxsf,r1            /* IRQ mode            */
+    ldr     sp, IRQ_STACK_START
+
+    orr     r1,r0,#FIQMODE|NOINT
+    msr     cpsr_cxsf,r1            /* FIQ mode            */
+    ldr     sp, FIQ_STACK_START
+
+    bic     r0,r0,#MODEMASK
+    orr     r1,r0,#SVCMODE|NOINT
+    msr     cpsr_cxsf,r1            /* SVC mode            */
+
+    ldr     sp, _STACK_START
+
+    /* USER mode is not initialized. */
+    bx      lr    /* The LR register may be not valid for the mode changes.*/
+
+/*/*}*/
+
+

+ 29 - 0
bsp/dm365/platform/system_clock.c

@@ -0,0 +1,29 @@
+/*
+ * File      : clock.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://openlab.rt-thread.com/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2008-04-25     Yi.qiu       first version
+ */
+
+#include <rtthread.h>
+#include "dm36x.h"
+
+
+/**
+ * @brief System Clock Configuration
+ */
+void rt_hw_clock_init(void)
+{
+	//LOCKTIME = 0xFFFFFFFF;	//u-boot already init system clock
+	//rt_hw_set_mpll_clock(MPL_SDIV, MPL_PDIV, MPL_MIDV);
+	//rt_hw_set_upll_clock(UPL_SDIV, UPL_PDIV, UPL_MDIV);
+	//rt_hw_set_divider(HDIVN, PDIVN);
+}
+

+ 189 - 0
bsp/dm365/platform/trap.c

@@ -0,0 +1,189 @@
+/*
+ * File      : trap.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://openlab.rt-thread.com/license/LICENSE
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2010-11-13     weety      first version
+ */
+
+#include <rtthread.h>
+#include <rthw.h>
+
+#include "dm36x.h"
+
+/**
+ * @addtogroup DM36X
+ */
+/*@{*/
+
+extern struct rt_thread *rt_current_thread;
+#ifdef RT_USING_FINSH
+extern long list_thread(void);
+#endif
+
+/**
+ * this function will show registers of CPU
+ *
+ * @param regs the registers point
+ */
+
+void rt_hw_show_register (struct rt_hw_register *regs)
+{
+	rt_kprintf("Execption:\n");
+	rt_kprintf("r00:0x%08x r01:0x%08x r02:0x%08x r03:0x%08x\n", regs->r0, regs->r1, regs->r2, regs->r3);
+	rt_kprintf("r04:0x%08x r05:0x%08x r06:0x%08x r07:0x%08x\n", regs->r4, regs->r5, regs->r6, regs->r7);
+	rt_kprintf("r08:0x%08x r09:0x%08x r10:0x%08x\n", regs->r8, regs->r9, regs->r10);
+	rt_kprintf("fp :0x%08x ip :0x%08x\n", regs->fp, regs->ip);
+	rt_kprintf("sp :0x%08x lr :0x%08x pc :0x%08x\n", regs->sp, regs->lr, regs->pc);
+	rt_kprintf("cpsr:0x%08x\n", regs->cpsr);
+}
+
+/**
+ * When ARM7TDMI comes across an instruction which it cannot handle,
+ * it takes the undefined instruction trap.
+ *
+ * @param regs system registers
+ *
+ * @note never invoke this function in application
+ */
+void rt_hw_trap_udef(struct rt_hw_register *regs)
+{
+	rt_hw_show_register(regs);
+
+	rt_kprintf("undefined instruction\n");
+	rt_kprintf("thread - %s stack:\n", rt_current_thread->name);
+
+#ifdef RT_USING_FINSH
+	list_thread();
+#endif
+	rt_hw_cpu_shutdown();
+}
+
+/**
+ * The software interrupt instruction (SWI) is used for entering
+ * Supervisor mode, usually to request a particular supervisor
+ * function.
+ *
+ * @param regs system registers
+ *
+ * @note never invoke this function in application
+ */
+void rt_hw_trap_swi(struct rt_hw_register *regs)
+{
+	rt_hw_show_register(regs);
+
+	rt_kprintf("software interrupt\n");
+	rt_hw_cpu_shutdown();
+}
+
+/**
+ * An abort indicates that the current memory access cannot be completed,
+ * which occurs during an instruction prefetch.
+ *
+ * @param regs system registers
+ *
+ * @note never invoke this function in application
+ */
+void rt_hw_trap_pabt(struct rt_hw_register *regs)
+{
+	rt_hw_show_register(regs);
+
+	rt_kprintf("prefetch abort\n");
+	rt_kprintf("thread - %s stack:\n", rt_current_thread->name);
+
+#ifdef RT_USING_FINSH
+	list_thread();
+#endif
+	rt_hw_cpu_shutdown();
+}
+
+/**
+ * An abort indicates that the current memory access cannot be completed,
+ * which occurs during a data access.
+ *
+ * @param regs system registers
+ *
+ * @note never invoke this function in application
+ */
+void rt_hw_trap_dabt(struct rt_hw_register *regs)
+{
+	rt_uint32_t fault_addr;
+	rt_uint32_t fault_status;
+	asm  volatile ("mrc p15, 0, %0, c6, c0, 0"
+			:
+			:"r"(fault_addr)
+			:"cc");
+	rt_kprintf("unhandler access to 0x%08x\n", fault_addr);
+
+	/* read DFSR */
+	asm volatile ("MRC p15, 0, %0, c5, c0, 0"
+			:
+			:"r"(fault_status)
+			:"cc");
+	rt_kprintf("fault status 0x%08x\n", fault_status);
+
+	rt_hw_show_register(regs);
+
+	rt_kprintf("data abort\n");
+	rt_kprintf("thread - %s stack:\n", rt_current_thread->name);
+
+#ifdef RT_USING_FINSH
+	list_thread();
+#endif
+	rt_hw_cpu_shutdown();
+}
+
+/**
+ * Normally, system will never reach here
+ *
+ * @param regs system registers
+ *
+ * @note never invoke this function in application
+ */
+void rt_hw_trap_resv(struct rt_hw_register *regs)
+{
+	rt_kprintf("not used\n");
+	rt_hw_show_register(regs);
+	rt_hw_cpu_shutdown();
+}
+
+extern struct rt_irq_desc irq_desc[];
+
+
+void rt_hw_trap_irq()
+{
+	rt_isr_handler_t isr_func;
+	rt_uint32_t val, irq, mask;
+	void *param;
+
+	/* get irq number */
+	val = readl(DAVINCI_ARM_INTC_BASE+0x14) - readl(DAVINCI_ARM_INTC_BASE+0x24);
+	irq = (val >> 2) - 1;
+	/* clear pending register */
+	mask = 1 << (irq & 0x1f);
+	if (irq > 31)
+		writel(mask, DAVINCI_ARM_INTC_BASE+0x0c); //IRQ1
+	else
+		writel(mask, DAVINCI_ARM_INTC_BASE+0x08); //IRQ0
+	
+	/* get interrupt service routine */
+	isr_func = irq_desc[irq].handler;
+	param = irq_desc[irq].param;
+
+	/* turn to interrupt service routine */
+	isr_func(irq, param);
+	irq_desc[irq].counter++;
+}
+
+void rt_hw_trap_fiq()
+{
+	rt_kprintf("fast interrupt request\n");
+}
+
+/*@}*/

+ 271 - 0
bsp/dm365/rtconfig.h

@@ -0,0 +1,271 @@
+/* RT-Thread config file */
+#ifndef __RTTHREAD_CFG_H__
+#define __RTTHREAD_CFG_H__
+
+/* RT_NAME_MAX*/
+#define RT_NAME_MAX	32
+
+/* RT_ALIGN_SIZE*/
+#define RT_ALIGN_SIZE	4
+
+/* PRIORITY_MAX */
+#define RT_THREAD_PRIORITY_MAX	256
+
+/* Tick per Second */
+#define RT_TICK_PER_SECOND	100
+
+/* SECTION: RT_DEBUG */
+/* Thread Debug */
+#define RT_DEBUG
+#define SCHEDULER_DEBUG
+/* #define RT_THREAD_DEBUG */
+
+#define RT_USING_OVERFLOW_CHECK
+
+/* Using Hook */
+#define RT_USING_HOOK
+
+/* Using Software Timer */
+#define RT_USING_TIMER_SOFT
+#define RT_TIMER_THREAD_PRIO		8
+#define RT_TIMER_THREAD_STACK_SIZE	512
+#define RT_TIMER_TICK_PER_SECOND	10
+
+#define IDLE_THREAD_STACK_SIZE	1024
+
+
+/* SECTION: IPC */
+/* Using Semaphore */
+#define RT_USING_SEMAPHORE
+
+/* Using Mutex */
+#define RT_USING_MUTEX
+
+/* Using Event */
+#define RT_USING_EVENT
+
+/* Using MailBox */
+#define RT_USING_MAILBOX
+
+/* Using Message Queue */
+#define RT_USING_MESSAGEQUEUE
+
+/* SECTION: Memory Management */
+/* Using Memory Pool Management*/
+#define RT_USING_MEMPOOL
+
+/* Using Dynamic Heap Management */
+#define RT_USING_HEAP
+
+/* Using Small MM */
+/* #define RT_USING_SMALL_MEM */
+
+/* Using SLAB Allocator */
+#define RT_USING_SLAB
+
+/* SECTION: Device System */
+/* Using Device System */
+#define RT_USING_DEVICE
+#define RT_USING_DEVICE_IPC
+
+/* Using Module System */
+#define RT_USING_MODULE
+#define RT_USING_LIBDL
+
+/* Interrupt debug */
+#define RT_USING_INTERRUPT_INFO
+
+#define RT_USING_SERIAL
+/* SECTION: Console options */
+#define RT_USING_CONSOLE
+/* the buffer size of console */
+#define RT_CONSOLEBUF_SIZE	1024
+#define RT_CONSOLE_DEVICE_NAME "uart0"
+
+/* SECTION: finsh, a C-Express shell */
+/* Using FinSH as Shell*/
+#define RT_USING_FINSH
+/* Using symbol table */
+#define FINSH_USING_SYMTAB
+#define FINSH_USING_DESCRIPTION
+#define FINSH_THREAD_STACK_SIZE 4096
+#define FINSH_USING_HISTORY 1
+#define FINSH_USING_MSH
+
+/* SECTION: the runtime libc library */
+/* the runtime libc library */
+#define RT_USING_LIBC
+#define RT_USING_PTHREADS
+
+/* SECTION: C++ support */
+/* Using C++ support */
+/* #define RT_USING_CPLUSPLUS */
+
+/* SECTION: Device filesystem support */
+/* using DFS support */
+#define RT_USING_DFS
+#define RT_USING_DFS_ELMFAT
+/* use long file name feature 			*/
+#define RT_DFS_ELM_USE_LFN			2
+#define RT_DFS_ELM_REENTRANT
+/* define OEM code page */
+#define RT_DFS_ELM_CODE_PAGE	936
+/* Using OEM code page file */
+// #define RT_DFS_ELM_CODE_PAGE_FILE
+/* the max number of file length 		*/
+#define RT_DFS_ELM_MAX_LFN			128
+/* #define RT_USING_DFS_YAFFS2 */
+#define RT_USING_DFS_DEVFS
+
+#define RT_USING_DFS_NFS
+#define RT_NFS_HOST_EXPORT		"192.168.1.5:/"
+
+#define DFS_USING_WORKDIR
+
+/* the max number of mounted filesystem */
+#define DFS_FILESYSTEMS_MAX		4
+/* the max number of opened files 		*/
+#define DFS_FD_MAX					16
+/* the max number of cached sector 		*/
+#define DFS_CACHE_MAX_NUM   		4
+
+/* Enable freemodbus protocol stack*/
+/* #define RT_USING_MODBUS */
+
+/* USING CPU FFS */
+#define RT_USING_CPU_FFS
+
+/* MMU pte item size defined */
+#define RT_MMU_PTE_SIZE		4096
+
+#define RT_USING_GPIO
+
+#define RT_USING_I2C
+
+#define RT_USING_SPI
+
+#define RT_USING_SDIO
+#define RT_MMCSD_DBG
+
+/* SECTION: lwip, a lightweight TCP/IP protocol stack */
+/* Using lightweight TCP/IP protocol stack */
+#define RT_USING_LWIP
+
+/* Trace LwIP protocol */
+/* #define RT_LWIP_DEBUG */
+
+//#define RT_LWIP_USING_RT_MEM
+//#define RT_LWIP_REASSEMBLY_FRAG
+
+#define SO_REUSE 1
+
+#define RT_LWIP_DNS
+
+/* Enable ICMP protocol */
+#define RT_LWIP_ICMP
+
+/* Enable IGMP protocol */
+#define RT_LWIP_IGMP
+
+/* Enable UDP protocol */
+#define RT_LWIP_UDP
+
+/* Enable TCP protocol */
+#define RT_LWIP_TCP
+
+/* the number of simulatenously active TCP connections*/
+#define RT_LWIP_TCP_PCB_NUM	32
+
+/* TCP sender buffer space */
+#define RT_LWIP_TCP_SND_BUF	1460*10
+
+/* TCP receive window. */
+#define RT_LWIP_TCP_WND	1460*8
+
+/* Enable SNMP protocol */
+/* #define RT_LWIP_SNMP */
+
+/* Using DHCP */
+/* #define RT_LWIP_DHCP */
+
+/* ip address of target */
+#define RT_LWIP_IPADDR0	192
+#define RT_LWIP_IPADDR1	168
+#define RT_LWIP_IPADDR2	1
+#define RT_LWIP_IPADDR3	30
+
+/* gateway address of target */
+#define RT_LWIP_GWADDR0	192
+#define RT_LWIP_GWADDR1	168
+#define RT_LWIP_GWADDR2	1
+#define RT_LWIP_GWADDR3	1
+
+/* mask address of target */
+#define RT_LWIP_MSKADDR0	255
+#define RT_LWIP_MSKADDR1	255
+#define RT_LWIP_MSKADDR2	255
+#define RT_LWIP_MSKADDR3	0
+
+/* the number of blocks for pbuf */
+#define RT_LWIP_PBUF_NUM	16
+
+/* the number of simultaneously queued TCP */
+#define RT_LWIP_TCP_SEG_NUM    40
+
+/* thread priority of tcpip thread */
+#define RT_LWIP_TCPTHREAD_PRIORITY	128
+
+/* mail box size of tcpip thread to wait for */
+#define RT_LWIP_TCPTHREAD_MBOX_SIZE	32
+
+/* thread stack size of tcpip thread */
+#define RT_LWIP_TCPTHREAD_STACKSIZE	4096
+
+/* thread priority of ethnetif thread */
+#define RT_LWIP_ETHTHREAD_PRIORITY	144
+
+/* mail box size of ethnetif thread to wait for */
+#define RT_LWIP_ETHTHREAD_MBOX_SIZE	32
+
+/* thread stack size of ethnetif thread */
+#define RT_LWIP_ETHTHREAD_STACKSIZE	1024
+
+/* SECTION: RTGUI support */
+/* using RTGUI support */
+/* #define RT_USING_RTGUI */
+
+/* name length of RTGUI object */
+//#define RTGUI_NAME_MAX		16
+/* support 16 weight font */
+//#define RTGUI_USING_FONT16
+/* support 16 weight font */
+//#define RTGUI_USING_FONT12
+/* support Chinese font */
+//#define RTGUI_USING_FONTHZ
+/* use DFS as file interface */
+//#define RTGUI_USING_DFS_FILERW
+/* use font file as Chinese font */
+/* #define RTGUI_USING_HZ_FILE */
+/* use Chinese bitmap font */
+//#define RTGUI_USING_HZ_BMP
+/* use small size in RTGUI */
+/* #define RTGUI_USING_SMALL_SIZE */
+/* use mouse cursor */
+/* #define RTGUI_USING_MOUSE_CURSOR */
+
+/* SECTION: FTK support */
+/* using FTK support */
+/* #define RT_USING_FTK */
+
+/*
+ * Note on FTK:
+ * 
+ * FTK depends :
+ * #define RT_USING_NEWLIB
+ * #define DFS_USING_WORKDIR
+ * 
+ * And the maximal length must great than 64
+ * #define RT_DFS_ELM_MAX_LFN	128
+ */
+
+#endif

+ 80 - 0
bsp/dm365/rtconfig.py

@@ -0,0 +1,80 @@
+# toolchains options
+ARCH     = 'arm'
+CPU      = 'dm36x'
+TextBase = '0x80000000'
+
+CROSS_TOOL 	= 'gcc'
+
+if  CROSS_TOOL == 'gcc':
+	PLATFORM 	= 'gcc'
+	#EXEC_PATH 	= '/opt/arm-2010q1/bin'
+	EXEC_PATH = r'D:\arm-2013.11\bin'
+elif CROSS_TOOL == 'keil':
+	PLATFORM 	= 'armcc'
+	EXEC_PATH 	= 'E:/Keil'
+BUILD = 'release'
+
+if PLATFORM == 'gcc':
+    # toolchains
+    PREFIX = 'arm-none-eabi-'
+    CC = PREFIX + 'gcc'
+    AS = PREFIX + 'gcc'
+    AR = PREFIX + 'ar'
+    LINK = PREFIX + 'gcc'
+    TARGET_EXT = 'axf'
+    SIZE = PREFIX + 'size'
+    OBJDUMP = PREFIX + 'objdump'
+    OBJCPY = PREFIX + 'objcopy'
+
+    DEVICE = ' -mcpu=arm926ej-s'
+    CFLAGS = DEVICE
+    AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp' + ' -DTEXT_BASE=' + TextBase
+    LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread_dm365.map,-cref,-u,_start -T dm365_ram.ld' + ' -Ttext ' + TextBase
+
+    CPATH = ''
+    LPATH = ''
+
+    if BUILD == 'debug':
+        CFLAGS += ' -O0 -gdwarf-2'
+        AFLAGS += ' -gdwarf-2'
+    else:
+        CFLAGS += ' -O2'
+
+    POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
+
+elif PLATFORM == 'armcc':
+    # toolchains
+    CC = 'armcc'
+    AS = 'armasm'
+    AR = 'armar'
+    LINK = 'armlink'
+    TARGET_EXT = 'axf'
+
+    DEVICE = ' --device DARMSS9'
+    CFLAGS = DEVICE + ' --apcs=interwork --diag_suppress=870'
+    AFLAGS = DEVICE
+    LFLAGS = DEVICE + ' --strict --info sizes --info totals --info unused --info veneers --list rtthread-mini2440.map --ro-base 0x30000000 --entry Entry_Point --first Entry_Point'
+
+    CFLAGS += ' -I"' + EXEC_PATH + '/ARM/RV31/INC"'
+    LFLAGS += ' --libpath "' + EXEC_PATH + '/ARM/RV31/LIB"'
+
+    EXEC_PATH += '/arm/bin40/'
+
+    if BUILD == 'debug':
+        CFLAGS += ' -g -O0'
+        AFLAGS += ' -g'
+    else:
+        CFLAGS += ' -O2'
+
+    POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
+
+elif PLATFORM == 'iar':
+    # toolchains
+    CC = 'armcc'
+    AS = 'armasm'
+    AR = 'armar'
+    LINK = 'armlink'
+
+    CFLAGS = ''
+    AFLAGS = ''
+    LFLAGS = ''

二进制
components/drivers/drivers.rar


+ 109 - 0
libcpu/arm/dm36x/context_gcc.S

@@ -0,0 +1,109 @@
+/*
+ * File      : context.S
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2011-01-13     weety
+ */
+
+/*!
+ * \addtogroup DM36X
+ */
+/*@{*/
+
+#define NOINT          0xc0
+
+
+/*
+ * rt_base_t rt_hw_interrupt_disable();
+ */
+.globl rt_hw_interrupt_disable
+rt_hw_interrupt_disable:
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    bx  lr
+
+/*
+ * void rt_hw_interrupt_enable(rt_base_t level);
+ */
+.globl rt_hw_interrupt_enable
+rt_hw_interrupt_enable:
+    msr cpsr, r0
+    bx  lr
+
+/*
+ * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
+ * r0 --> from
+ * r1 --> to
+ */
+.globl rt_hw_context_switch
+rt_hw_context_switch:
+    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
+    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
+
+    mrs r4, cpsr
+    tst lr, #0x01
+    orrne r4, r4, #0x20     @ it's thumb code
+
+    stmfd sp!, {r4}         @ push cpsr
+
+    str sp, [r0]            @ store sp in preempted tasks TCB
+    ldr sp, [r1]            @ get new task stack pointer
+
+    ldmfd sp!, {r4}         @ pop new task cpsr to spsr
+    msr spsr_cxsf, r4
+_do_switch:
+    ldmfd sp!, {r0-r12, lr, pc}^  @ pop new task r0-r12, lr & pc, copy spsr to cpsr
+
+/*
+ * void rt_hw_context_switch_to(rt_uint32 to);
+ * r0 --> to
+ */
+.globl rt_hw_context_switch_to
+rt_hw_context_switch_to:
+    ldr sp, [r0]            @ get new task stack pointer
+
+    ldmfd sp!, {r4}         @ pop new task spsr
+    msr spsr_cxsf, r4
+
+    bic r4, r4, #0x20       @ must be ARM mode
+    msr cpsr_cxsf, r4
+    ldmfd sp!, {r0-r12, lr, pc}^   @ pop new task r0-r12, lr & pc
+
+/*
+ * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
+ */
+.globl rt_thread_switch_interrupt_flag
+.globl rt_interrupt_from_thread
+.globl rt_interrupt_to_thread
+.globl rt_hw_context_switch_interrupt
+rt_hw_context_switch_interrupt:
+    ldr r2, =rt_thread_switch_interrupt_flag
+    ldr r3, [r2]
+    cmp r3, #1
+    beq _reswitch
+    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
+    str r3, [r2]
+    ldr r2, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
+    str r0, [r2]
+_reswitch:
+    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
+    str r1, [r2]
+    bx  lr

+ 117 - 0
libcpu/arm/dm36x/context_rvds.S

@@ -0,0 +1,117 @@
+;/*
+; * File      : context_rvds.S
+; * This file is part of RT-Thread RTOS
+; * COPYRIGHT (C) 2006, RT-Thread Development Team
+; *
+; *  This program is free software; you can redistribute it and/or modify
+; *  it under the terms of the GNU General Public License as published by
+; *  the Free Software Foundation; either version 2 of the License, or
+; *  (at your option) any later version.
+; *
+; *  This program is distributed in the hope that it will be useful,
+; *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+; *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; *  GNU General Public License for more details.
+; *
+; *  You should have received a copy of the GNU General Public License along
+; *  with this program; if not, write to the Free Software Foundation, Inc.,
+; *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+; *
+; * Change Logs:
+; * Date           Author       Notes
+; * 2011-08-14     weety    copy from mini2440
+; */
+
+NOINT	EQU		0xc0	; disable interrupt in psr
+
+	AREA |.text|, CODE, READONLY, ALIGN=2
+	ARM
+	REQUIRE8
+	PRESERVE8
+
+;/*
+; * rt_base_t rt_hw_interrupt_disable();
+; */
+rt_hw_interrupt_disable	PROC
+	EXPORT rt_hw_interrupt_disable
+	MRS r0, cpsr
+	ORR r1, r0, #NOINT
+	MSR cpsr_c, r1
+	BX	lr
+	ENDP
+
+;/*
+; * void rt_hw_interrupt_enable(rt_base_t level);
+; */
+rt_hw_interrupt_enable	PROC
+	EXPORT rt_hw_interrupt_enable
+	MSR cpsr_c, r0
+	BX	lr
+	ENDP
+
+;/*
+; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
+; * r0 --> from
+; * r1 --> to
+; */
+rt_hw_context_switch	PROC
+	EXPORT rt_hw_context_switch
+	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
+	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+
+	MRS		r4, cpsr
+	STMFD	sp!, {r4}			; push cpsr
+	MRS		r4, spsr
+	STMFD	sp!, {r4}			; push spsr
+
+	STR	sp, [r0]				; store sp in preempted tasks TCB
+	LDR	sp, [r1]				; get new task stack pointer
+
+	LDMFD	sp!, {r4}			; pop new task spsr
+	MSR	spsr_cxsf, r4
+	LDMFD	sp!, {r4}			; pop new task cpsr
+	MSR	spsr_cxsf, r4
+
+	LDMFD	sp!, {r0-r12, lr, pc}^	; pop new task r0-r12, lr & pc
+	ENDP
+
+;/*
+; * void rt_hw_context_switch_to(rt_uint32 to);
+; * r0 --> to
+; */
+rt_hw_context_switch_to	PROC
+	EXPORT rt_hw_context_switch_to
+	LDR	sp, [r0]				; get new task stack pointer
+
+	LDMFD	sp!, {r4}			; pop new task spsr
+	MSR	spsr_cxsf, r4
+	LDMFD	sp!, {r4}			; pop new task cpsr
+	MSR	cpsr_cxsf, r4
+
+	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
+	ENDP
+
+;/*
+; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
+; */
+	IMPORT rt_thread_switch_interrupt_flag
+	IMPORT rt_interrupt_from_thread
+	IMPORT rt_interrupt_to_thread
+
+rt_hw_context_switch_interrupt	PROC
+	EXPORT rt_hw_context_switch_interrupt
+	LDR r2, =rt_thread_switch_interrupt_flag
+	LDR r3, [r2]
+	CMP r3, #1
+	BEQ _reswitch
+	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
+	STR r3, [r2]
+	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
+	STR r0, [r2]
+_reswitch
+	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
+	STR r1, [r2]
+	BX	lr
+	ENDP
+
+	END

+ 246 - 0
libcpu/arm/dm36x/cpuport.c

@@ -0,0 +1,246 @@
+/*
+ * File      : cpu.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Develop Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2011-01-13     weety      modified from mini2440
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+
+#define ICACHE_MASK	(rt_uint32_t)(1 << 12)
+#define DCACHE_MASK	(rt_uint32_t)(1 << 2)
+
+extern void machine_reset(void);
+extern void machine_shutdown(void);
+
+#ifdef __GNUC__
+rt_inline rt_uint32_t cp15_rd(void)
+{
+	rt_uint32_t i;
+
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+	return i;
+}
+
+rt_inline void cache_enable(rt_uint32_t bit)
+{
+	__asm__ __volatile__(			\
+		"mrc  p15,0,r0,c1,c0,0\n\t"	\
+		"orr  r0,r0,%0\n\t"			\
+	   	"mcr  p15,0,r0,c1,c0,0"		\
+		:							\
+		:"r" (bit)					\
+		:"memory");
+}
+
+rt_inline void cache_disable(rt_uint32_t bit)
+{
+	__asm__ __volatile__(			\
+		"mrc  p15,0,r0,c1,c0,0\n\t"	\
+		"bic  r0,r0,%0\n\t"			\
+		"mcr  p15,0,r0,c1,c0,0"		\
+		:							\
+		:"r" (bit)					\
+		:"memory");
+}
+#endif
+
+#ifdef __CC_ARM
+rt_inline rt_uint32_t cp15_rd(void)
+{
+	rt_uint32_t i;
+
+	__asm
+	{
+		mrc p15, 0, i, c1, c0, 0
+	}
+
+	return i;
+}
+
+rt_inline void cache_enable(rt_uint32_t bit)
+{
+	rt_uint32_t value;
+
+	__asm
+	{
+		mrc p15, 0, value, c1, c0, 0
+		orr value, value, bit
+		mcr p15, 0, value, c1, c0, 0
+	}
+}
+
+rt_inline void cache_disable(rt_uint32_t bit)
+{
+	rt_uint32_t value;
+
+	__asm
+	{
+		mrc p15, 0, value, c1, c0, 0
+		bic value, value, bit
+		mcr p15, 0, value, c1, c0, 0
+	}
+}
+#endif
+
+/**
+ * enable I-Cache
+ *
+ */
+void rt_hw_cpu_icache_enable()
+{
+	cache_enable(ICACHE_MASK);
+}
+
+/**
+ * disable I-Cache
+ *
+ */
+void rt_hw_cpu_icache_disable()
+{
+	cache_disable(ICACHE_MASK);
+}
+
+/**
+ * return the status of I-Cache
+ *
+ */
+rt_base_t rt_hw_cpu_icache_status()
+{
+	return (cp15_rd() & ICACHE_MASK);
+}
+
+/**
+ * enable D-Cache
+ *
+ */
+void rt_hw_cpu_dcache_enable()
+{
+	cache_enable(DCACHE_MASK);
+}
+
+/**
+ * disable D-Cache
+ *
+ */
+void rt_hw_cpu_dcache_disable()
+{
+	cache_disable(DCACHE_MASK);
+}
+
+/**
+ * return the status of D-Cache
+ *
+ */
+rt_base_t rt_hw_cpu_dcache_status()
+{
+	return (cp15_rd() & DCACHE_MASK);
+}
+
+/**
+ * reset cpu by dog's time-out
+ *
+ */
+void rt_hw_cpu_reset()
+{
+	
+	rt_kprintf("Restarting system...\n");
+	machine_reset();
+
+	while(1);	/* loop forever and wait for reset to happen */
+
+	/* NEVER REACHED */
+}
+
+/**
+ *  shutdown CPU
+ *
+ */
+void rt_hw_cpu_shutdown()
+{
+	rt_uint32_t level;
+	rt_kprintf("shutdown...\n");
+
+	level = rt_hw_interrupt_disable();
+	machine_shutdown();
+	while (level)
+	{
+		RT_ASSERT(0);
+	}
+}
+
+#ifdef RT_USING_CPU_FFS
+/**
+ * This function finds the first bit set (beginning with the least significant bit) 
+ * in value and return the index of that bit.
+ *
+ * Bits are numbered starting at 1 (the least significant bit).  A return value of 
+ * zero from any of these functions means that the argument was zero.
+ * 
+ * @return return the index of the first bit set. If value is 0, then this function 
+ * shall return 0.
+ */
+#if defined(__CC_ARM)
+int __rt_ffs(int value)
+{
+	register rt_uint32_t x;
+
+	if (value == 0)
+		return value;
+	
+	__asm
+	{
+		rsb x, value, #0
+		and x, x, value
+		clz x, x
+		rsb x, x, #32
+	}
+
+	return x;
+}
+#elif defined(__IAR_SYSTEMS_ICC__)
+int __rt_ffs(int value)
+{
+	if (value == 0)
+		return value;
+
+	__ASM("RSB  r4, r0, #0");
+	__ASM("AND  r4, r4, r0");
+	__ASM("CLZ  r4, r4");
+	__ASM("RSB  r0, r4, #32");
+}
+#elif defined(__GNUC__)
+int __rt_ffs(int value)
+{
+	if (value == 0)
+		return value;
+
+	value &= (-value);
+	asm ("clz %0, %1": "=r"(value) :"r"(value));
+
+	return (32 - value);
+}
+#endif
+
+#endif
+
+
+/*@}*/

+ 575 - 0
libcpu/arm/dm36x/mmu.c

@@ -0,0 +1,575 @@
+/*
+ * File      : mmu.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+
+#include "mmu.h"
+
+#ifdef __CC_ARM
+void mmu_setttbase(rt_uint32_t i)
+{
+	register rt_uint32_t value;
+
+   /* Invalidates all TLBs.Domain access is selected as
+    * client by configuring domain access register,
+    * in that case access controlled by permission value
+    * set by page table entry
+    */
+	value = 0;
+    __asm
+    {
+        mcr p15, 0, value, c8, c7, 0
+	}
+
+	value = 0x55555555;
+	__asm
+	{
+        mcr p15, 0, value, c3, c0, 0
+        mcr p15, 0, i, c2, c0, 0
+    }
+}
+
+void mmu_set_domain(rt_uint32_t i)
+{
+    __asm
+    {
+        mcr p15,0, i, c3, c0,  0
+    }
+}
+
+void mmu_enable()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        orr value, value, #0x01
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_disable()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        bic value, value, #0x01
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_enable_icache()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        orr value, value, #0x1000
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_enable_dcache()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        orr value, value, #0x04
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_disable_icache()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        bic value, value, #0x1000
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_disable_dcache()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        bic value, value, #0x04
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_enable_alignfault()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        orr value, value, #0x02
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_disable_alignfault()
+{
+    register rt_uint32_t value;
+
+    __asm
+    {
+        mrc p15, 0, value, c1, c0, 0
+        bic value, value, #0x02
+        mcr p15, 0, value, c1, c0, 0
+    }
+}
+
+void mmu_clean_invalidated_cache_index(int index)
+{
+    __asm
+    {
+        mcr p15, 0, index, c7, c14, 2
+    }
+}
+
+void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+    unsigned int ptr;
+
+    ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+    while(ptr < buffer + size)
+    {
+    	__asm
+    	{
+    		MCR p15, 0, ptr, c7, c14, 1
+    	}
+        ptr += CACHE_LINE_SIZE;
+    }
+}
+
+void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+	unsigned int ptr;
+
+	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+	while (ptr < buffer + size)
+	{
+		__asm
+		{
+			MCR p15, 0, ptr, c7, c10, 1
+		}
+		ptr += CACHE_LINE_SIZE;
+	}
+}
+
+void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+	unsigned int ptr;
+
+	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+	while (ptr < buffer + size)
+	{
+		__asm
+		{
+			MCR p15, 0, ptr, c7, c6, 1
+		}
+		ptr += CACHE_LINE_SIZE;
+	}
+}
+
+void mmu_invalidate_tlb()
+{
+    register rt_uint32_t value;
+
+    value = 0;
+    __asm
+    {
+        mcr p15, 0, value, c8, c7, 0
+    }
+}
+
+void mmu_invalidate_icache()
+{
+    register rt_uint32_t value;
+
+    value = 0;
+
+    __asm
+    {
+        mcr p15, 0, value, c7, c5, 0
+    }
+}
+
+
+void mmu_invalidate_dcache_all()
+{
+    register rt_uint32_t value;
+
+    value = 0;
+
+    __asm
+    {
+        mcr p15, 0, value, c7, c6, 0
+    }
+}
+#elif defined(__GNUC__)
+void mmu_setttbase(register rt_uint32_t i)
+{
+	register rt_uint32_t value;
+
+   /* Invalidates all TLBs.Domain access is selected as
+    * client by configuring domain access register,
+    * in that case access controlled by permission value
+    * set by page table entry
+    */
+	value = 0;
+	asm ("mcr p15, 0, %0, c8, c7, 0"::"r"(value));
+
+	value = 0x55555555;
+	asm ("mcr p15, 0, %0, c3, c0, 0"::"r"(value));
+	asm ("mcr p15, 0, %0, c2, c0, 0"::"r"(i));
+}
+
+void mmu_set_domain(register rt_uint32_t i)
+{
+	asm ("mcr p15,0, %0, c3, c0,  0": :"r" (i));
+}
+
+void mmu_enable()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i |= 0x1;
+	i |= (1 << 13); /* High exception vectors selected, address range = 0xFFFF0000-0xFFFF001C */
+	/* S R bit=1 0  for system protection */
+	i |= (1 << 8);
+	i &= ~(1 << 9);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_disable()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i &= ~0x1;
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_enable_icache()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i |= (1 << 12);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_enable_dcache()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i |= (1 << 2);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_disable_icache()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i &= ~(1 << 12);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_disable_dcache()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i &= ~(1 << 2);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_enable_alignfault()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i |= (1 << 1);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_disable_alignfault()
+{
+	register rt_uint32_t i;
+
+	/* read control register */
+	asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
+
+	i &= ~(1 << 1);
+
+	/* write back to control register */
+	asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
+}
+
+void mmu_clean_invalidated_cache_index(int index)
+{
+	asm ("mcr p15, 0, %0, c7, c14, 2": :"r" (index));
+}
+
+void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+    unsigned int ptr;
+
+    ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+    while(ptr < buffer + size)
+    {
+    	asm ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr));
+        ptr += CACHE_LINE_SIZE;
+    }
+}
+
+
+void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+	unsigned int ptr;
+
+	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+	while (ptr < buffer + size)
+	{
+		asm ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr));
+		ptr += CACHE_LINE_SIZE;
+	}
+}
+
+void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
+{
+	unsigned int ptr;
+
+	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
+
+	while (ptr < buffer + size)
+	{
+		asm ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr));
+		ptr += CACHE_LINE_SIZE;
+	}
+}
+
+void mmu_invalidate_tlb()
+{
+	asm ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
+}
+
+void mmu_invalidate_icache()
+{
+	asm ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
+}
+
+void mmu_invalidate_dcache_all()
+{
+    asm ("mcr p15, 0, %0, c7, c6, 0": :"r" (0));
+}
+#endif
+
+/* level1 page table */
+static volatile unsigned int _pgd_table[4*1024] ALIGN(16*1024);
+/*
+ * level2 page table
+ * RT_MMU_PTE_SIZE must be 1024*n
+ */
+static volatile unsigned int _pte_table[RT_MMU_PTE_SIZE] ALIGN(1*1024);
+
+void mmu_create_pgd(struct mem_desc *mdesc)
+{
+    volatile rt_uint32_t *pTT;
+    volatile int i, nSec;
+    pTT = (rt_uint32_t *)_pgd_table + (mdesc->vaddr_start >> 20);
+    nSec = (mdesc->vaddr_end >> 20) - (mdesc->vaddr_start >> 20);
+    for(i = 0; i <= nSec; i++)
+    {
+        *pTT = mdesc->sect_attr | (((mdesc->paddr_start >> 20) + i) << 20);
+        pTT++;
+    }
+}
+
+void mmu_create_pte(struct mem_desc *mdesc)
+{
+    volatile rt_uint32_t *pTT;
+    volatile rt_uint32_t *p_pteentry;
+    int i;
+    rt_uint32_t vaddr;
+    rt_uint32_t total_page = 0;
+    rt_uint32_t pte_offset = 0;
+    rt_uint32_t sect_attr = 0;
+
+    total_page = (mdesc->vaddr_end >> 12) - (mdesc->vaddr_start >> 12) + 1;
+    pte_offset = mdesc->sect_attr & 0xfffffc00;
+    sect_attr = mdesc->sect_attr & 0x3ff;
+    vaddr = mdesc->vaddr_start;
+
+    for(i = 0; i < total_page; i++)
+    {
+        pTT = (rt_uint32_t *)_pgd_table + (vaddr >> 20);
+        if (*pTT == 0) /* Level 1 page table item not used, now update pgd item */
+        {
+            *pTT = pte_offset | sect_attr;
+            p_pteentry = (rt_uint32_t *)pte_offset + 
+            ((vaddr & 0x000ff000) >> 12);
+            pte_offset += 1024;
+        }
+        else /* using old Level 1 page table item */
+        {
+            p_pteentry = (rt_uint32_t *)(*pTT & 0xfffffc00) + 
+            ((vaddr & 0x000ff000) >> 12);
+        }
+
+
+        *p_pteentry = mdesc->page_attr | (((mdesc->paddr_start >> 12) + i) << 12);
+        vaddr += 0x1000;
+    }
+}
+
+static void build_pte_mem_desc(struct mem_desc *mdesc, rt_uint32_t size)
+{
+    rt_uint32_t pte_offset = 0;
+    rt_uint32_t nsec = 0;
+    /* set page table */
+    for (; size > 0; size--)
+    {
+        if (mdesc->mapped_mode == PAGE_MAPPED)
+        {
+            nsec = (RT_ALIGN(mdesc->vaddr_end, 0x100000) - RT_ALIGN_DOWN(mdesc->vaddr_start, 0x100000)) >> 20;
+            mdesc->sect_attr |= (((rt_uint32_t)_pte_table)& 0xfffffc00) + pte_offset;
+            pte_offset += nsec << 10;
+        }
+        if (pte_offset >= RT_MMU_PTE_SIZE)
+        {
+            rt_kprintf("PTE table size too little\n");
+            RT_ASSERT(0);
+        }
+
+        mdesc++;
+    }
+}
+
+#if 0
+void mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd, rt_uint32_t paddrStart, rt_uint32_t attr)
+{
+    volatile rt_uint32_t *pTT;
+    volatile int i,nSec;
+    pTT=(rt_uint32_t *)_page_table+(vaddrStart>>20);
+    nSec=(vaddrEnd>>20)-(vaddrStart>>20);
+    for(i=0;i<=nSec;i++)
+    {
+		*pTT = attr |(((paddrStart>>20)+i)<<20);
+		pTT++;
+    }
+}
+#endif
+
+void rt_hw_mmu_init(struct mem_desc *mdesc, rt_uint32_t size)
+{
+	/* disable I/D cache */
+	mmu_disable_dcache();
+	mmu_disable_icache();
+	mmu_disable();
+	mmu_invalidate_tlb();
+
+	/* clear pgd and pte table */
+    rt_memset((void *)_pgd_table, 0, 16*1024);
+    rt_memset((void *)_pte_table, 0, RT_MMU_PTE_SIZE);
+    build_pte_mem_desc(mdesc, size);
+    /* set page table */
+    for (; size > 0; size--)
+    {
+        if (mdesc->mapped_mode == SECT_MAPPED)
+        {
+            mmu_create_pgd(mdesc);
+        }
+        else
+        {
+            mmu_create_pte(mdesc);
+        }
+
+        mdesc++;
+    }
+
+    /* set MMU table address */
+    mmu_setttbase((rt_uint32_t)_pgd_table);
+
+    /* enables MMU */
+    mmu_enable();
+
+    /* enable Instruction Cache */
+    mmu_enable_icache();
+
+    /* enable Data Cache */
+    mmu_enable_dcache();
+
+    mmu_invalidate_icache();
+    mmu_invalidate_dcache_all();
+}
+

+ 164 - 0
libcpu/arm/dm36x/mmu.h

@@ -0,0 +1,164 @@
+/*
+ * File      : mmu.h
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+
+#ifndef __MMU_H__
+#define __MMU_H__
+
+#include <rtthread.h>
+
+#define CACHE_LINE_SIZE	32
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PGD)
+ *   - common
+ */
+#define PGD_TYPE_MASK       (3 << 0)
+#define PGD_TYPE_FAULT      (0 << 0)
+#define PGD_TYPE_TABLE      (1 << 0)
+#define PGD_TYPE_SECT       (2 << 0)
+#define PGD_BIT4            (1 << 4)
+#define PGD_DOMAIN(x)       ((x) << 5)
+#define PGD_PROTECTION      (1 << 9)    /* ARMv5 */
+/*
+ *   - section
+ */
+#define PGD_SECT_BUFFERABLE	(1 << 2)
+#define PGD_SECT_CACHEABLE  (1 << 3)
+#define PGD_SECT_XN         (1 << 4)    /* ARMv6 */
+#define PGD_SECT_AP0        (1 << 10)
+#define PGD_SECT_AP1        (1 << 11)
+#define PGD_SECT_TEX(x)     ((x) << 12) /* ARMv5 */
+#define PGD_SECT_APX        (1 << 15)   /* ARMv6 */
+#define PGD_SECT_S          (1 << 16)   /* ARMv6 */
+#define PGD_SECT_nG         (1 << 17)   /* ARMv6 */
+#define PGD_SECT_SUPER      (1 << 18)   /* ARMv6 */
+
+#define PGD_SECT_UNCACHED   (0)
+#define PGD_SECT_BUFFERED   (PGD_SECT_BUFFERABLE)
+#define PGD_SECT_WT         (PGD_SECT_CACHEABLE)
+#define PGD_SECT_WB         (PGD_SECT_CACHEABLE | PGD_SECT_BUFFERABLE)
+#define PGD_SECT_MINICACHE  (PGD_SECT_TEX(1) | PGD_SECT_CACHEABLE)
+#define PGD_SECT_WBWA       (PGD_SECT_TEX(1) | PGD_SECT_CACHEABLE | PGD_SECT_BUFFERABLE)
+#define PGD_SECT_NONSHARED_DEV  (PGD_SECT_TEX(2))
+
+
+/*
+ * + Level 2 descriptor (PTE)
+ *   - common
+ */
+#define PTE_TYPE_MASK       (3 << 0)
+#define PTE_TYPE_FAULT      (0 << 0)
+#define PTE_TYPE_LARGE      (1 << 0)
+#define PTE_TYPE_SMALL      (2 << 0)
+#define PTE_TYPE_EXT        (3 << 0)    /* ARMv5 */
+#define PTE_BUFFERABLE      (1 << 2)
+#define PTE_CACHEABLE       (1 << 3)
+
+/*
+ *   - extended small page/tiny page
+ */
+#define PTE_EXT_XN          (1 << 0)    /* ARMv6 */
+#define PTE_EXT_AP_MASK     (3 << 4)
+#define PTE_EXT_AP0         (1 << 4)
+#define PTE_EXT_AP1         (2 << 4)
+#define PTE_EXT_AP_UNO_SRO  (0 << 4)
+#define PTE_EXT_AP_UNO_SRW  (PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW  (PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW  (PTE_EXT_AP1|PTE_EXT_AP0)
+#define PTE_EXT_TEX(x)      ((x) << 6)  /* ARMv5 */
+#define PTE_EXT_APX         (1 << 9)    /* ARMv6 */
+#define PTE_EXT_SHARED      (1 << 10)   /* ARMv6 */
+#define PTE_EXT_NG          (1 << 11)   /* ARMv6 */
+
+/*
+ *   - small page
+ */
+#define PTE_SMALL_AP_MASK       (0xff << 4)
+#define PTE_SMALL_AP_UNO_SRO    (0x00 << 4)
+#define PTE_SMALL_AP_UNO_SRW    (0x55 << 4)
+#define PTE_SMALL_AP_URO_SRW    (0xaa << 4)
+#define PTE_SMALL_AP_URW_SRW    (0xff << 4)
+
+/*
+ *  sector table properities
+ */
+#define SECT_CB          (PGD_SECT_CACHEABLE|PGD_SECT_BUFFERABLE) //cache_on, write_back
+#define SECT_CNB         (PGD_SECT_CACHEABLE)                     //cache_on, write_through
+#define SECT_NCB         (PGD_SECT_BUFFERABLE)                    //cache_off,WR_BUF on
+#define SECT_NCNB        (0 << 2)                                 //cache_off,WR_BUF off
+
+#define SECT_AP_RW       (PGD_SECT_AP0|PGD_SECT_AP1)              //supervisor=RW, user=RW
+#define SECT_AP_RO       ((0 << 10)|(0 << 11))                    //supervisor=RO, user=NO Access(SR=10)
+
+#define SECT_RW_CB      (SECT_AP_RW|PGD_DOMAIN(0)|PGD_SECT_WB|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write, cache, write back */
+#define SECT_RW_CNB     (SECT_AP_RW|PGD_DOMAIN(0)|PGD_SECT_WT|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write, cache, write through */
+#define SECT_RW_NCNB    (SECT_AP_RW|PGD_DOMAIN(0)|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write without cache and write buffer */
+#define SECT_RW_FAULT   (SECT_AP_RW|PGD_DOMAIN(1)|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write without cache and write buffer */
+
+#define SECT_RO_CB      (SECT_AP_RO|PGD_DOMAIN(0)|PGD_SECT_WB|PGD_TYPE_SECT|PGD_BIT4) /* Read Only, cache, write back */
+#define SECT_RO_CNB     (SECT_AP_RO|PGD_DOMAIN(0)|PGD_SECT_WT|PGD_TYPE_SECT|PGD_BIT4) /* Read Only, cache, write through */
+#define SECT_RO_NCNB    (SECT_AP_RO|PGD_DOMAIN(0)|PGD_TYPE_SECT|PGD_BIT4) /* Read Only without cache and write buffer */
+#define SECT_RO_FAULT   (SECT_AP_RO|PGD_DOMAIN(1)|PGD_TYPE_SECT|PGD_BIT4) /* Read Only without cache and write buffer */
+
+#define SECT_TO_PAGE     (PGD_DOMAIN(0)|PGD_TYPE_TABLE|PGD_BIT4) /* Level 2 descriptor (PTE) entry properity */
+
+/*
+ * page table properities
+ */
+#define PAGE_CB          (PTE_BUFFERABLE|PTE_CACHEABLE)  //cache_on, write_back
+#define PAGE_CNB         (PTE_CACHEABLE)                 //cache_on, write_through
+#define PAGE_NCB         (PTE_BUFFERABLE)                //cache_off,WR_BUF on
+#define PAGE_NCNB        (0 << 2)                        //cache_off,WR_BUF off
+
+#define PAGE_AP_RW       PTE_SMALL_AP_URW_SRW             //supervisor=RW, user=RW
+#define PAGE_AP_RO       PTE_SMALL_AP_UNO_SRO             //supervisor=RO, user=NO Access(SR=10)
+
+#define PAGE_RW_CB      (PAGE_AP_RW|PAGE_CB|PTE_TYPE_SMALL) /* Read/Write, cache, write back */
+#define PAGE_RW_CNB     (PAGE_AP_RW|PAGE_CNB|PTE_TYPE_SMALL) /* Read/Write, cache, write through */
+#define PAGE_RW_NCNB    (PAGE_AP_RW|PTE_TYPE_SMALL) /* Read/Write without cache and write buffer */
+#define PAGE_RW_FAULT   (PAGE_AP_RW|PTE_TYPE_SMALL) /* Read/Write without cache and write buffer */
+
+
+#define PAGE_RO_CB      (PAGE_AP_RO|PAGE_CB|PTE_TYPE_SMALL) /* Read Only, cache, write back */
+#define PAGE_RO_CNB     (PAGE_AP_RO|PAGE_CNB|PTE_TYPE_SMALL) /* Read Only, cache, write through */
+#define PAGE_RO_NCNB    (PAGE_AP_RO|PTE_TYPE_SMALL) /* Read Only without cache and write buffer */
+#define PAGE_RO_FAULT   (PAGE_AP_RO|PTE_TYPE_SMALL) /* Read Only without cache and write buffer */
+
+struct mem_desc {
+	rt_uint32_t vaddr_start;
+	rt_uint32_t vaddr_end;
+	rt_uint32_t paddr_start;
+	rt_uint32_t sect_attr;   /* when page mapped */
+	rt_uint32_t page_attr;   /* only sector mapped valid */
+	rt_uint32_t mapped_mode;
+	#define     SECT_MAPPED  0
+	#define     PAGE_MAPPED  1
+};
+
+
+void rt_hw_mmu_init(struct mem_desc *mdesc, rt_uint32_t size);
+
+#endif
+

+ 79 - 0
libcpu/arm/dm36x/stack.c

@@ -0,0 +1,79 @@
+/*
+ * File      : stack.c
+ * This file is part of RT-Thread RTOS
+ * COPYRIGHT (C) 2006, RT-Thread Development Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2011-01-13     weety
+ */
+#include <rtthread.h>
+
+/*****************************/
+/* CPU Mode                  */
+/*****************************/
+#define USERMODE		0x10
+#define FIQMODE			0x11
+#define IRQMODE			0x12
+#define SVCMODE			0x13
+#define ABORTMODE		0x17
+#define UNDEFMODE		0x1b
+#define MODEMASK		0x1f
+#define NOINT			0xc0
+
+/**
+ * This function will initialize thread stack
+ *
+ * @param tentry the entry of thread
+ * @param parameter the parameter of entry 
+ * @param stack_addr the beginning stack address
+ * @param texit the function will be called when thread exit
+ *
+ * @return stack address
+ */
+rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
+	rt_uint8_t *stack_addr, void *texit)
+{
+	rt_uint32_t *stk;
+
+	stk 	 = (rt_uint32_t*)stack_addr;
+	*(stk) 	 = (rt_uint32_t)tentry;			/* entry point */
+	*(--stk) = (rt_uint32_t)texit;			/* lr */
+	*(--stk) = 0;							/* r12 */
+	*(--stk) = 0;							/* r11 */
+	*(--stk) = 0;							/* r10 */
+	*(--stk) = 0;							/* r9 */
+	*(--stk) = 0;							/* r8 */
+	*(--stk) = 0;							/* r7 */
+	*(--stk) = 0;							/* r6 */
+	*(--stk) = 0;							/* r5 */
+	*(--stk) = 0;							/* r4 */
+	*(--stk) = 0;							/* r3 */
+	*(--stk) = 0;							/* r2 */
+	*(--stk) = 0;							/* r1 */
+	*(--stk) = (rt_uint32_t)parameter;		/* r0 : argument */
+
+	/* cpsr */
+	if ((rt_uint32_t)tentry & 0x01)
+		*(--stk) = SVCMODE | 0x20;			/* thumb mode */
+	else
+		*(--stk) = SVCMODE;					/* arm mode   */
+
+	/* return task's current stack address */
+	return (rt_uint8_t *)stk;
+}
+