[U-Boot-Users] [PATCH 4/6 part 3] UCC lib support in QUICC Engine

Subject: [PATCH] UCC lib support in QUICC Engine
---
drivers/sysdev/qe_lib/ucc/Makefile | 36 ++ drivers/sysdev/qe_lib/ucc/list.h | 728 ++++++++++++++++++++++++++++++++++++ drivers/sysdev/qe_lib/ucc/ucc.c | 332 ++++++++++++++++ drivers/sysdev/qe_lib/ucc/ucc.h | 98 +++++ 4 files changed, 1194 insertions(+), 0 deletions(-) create mode 100644 drivers/sysdev/qe_lib/ucc/Makefile create mode 100644 drivers/sysdev/qe_lib/ucc/list.h create mode 100644 drivers/sysdev/qe_lib/ucc/ucc.c create mode 100644 drivers/sysdev/qe_lib/ucc/ucc.h
289a211d534f06211b566ad164238cad3e76e8ca diff --git a/drivers/sysdev/qe_lib/ucc/Makefile b/drivers/sysdev/qe_lib/ucc/Makefile new file mode 100644 index 0000000..85de266 --- /dev/null +++ b/drivers/sysdev/qe_lib/ucc/Makefile @@ -0,0 +1,36 @@ +# +# Copyright (C) 2004 Freescale Semiconductor, Inc. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# + +include $(TOPDIR)/config.mk + +LIB = libucc.a + +OBJS := ucc.o ucc_fast.o ucc_geth.o ucc_geth_phy.o + +CFLAGS += -I.. + +$(LIB): $(OBJS) + $(AR) crv $@ $(OBJS) + +clean: + rm -f $(OBJS) + +distclean: clean + rm -f $(LIB) core *.bak .depend *.flc + +####################################################################### ## + +.depend: Makefile $(OBJS:.o=.c) + $(CC) -M $(CPPFLAGS) $(CFLAGS) $(OBJS:.o=.c) > $@ + +-include .depend +####################################################################### ## diff --git a/drivers/sysdev/qe_lib/ucc/list.h b/drivers/sysdev/qe_lib/ucc/list.h new file mode 100644 index 0000000..9217cc6 --- /dev/null +++ b/drivers/sysdev/qe_lib/ucc/list.h @@ -0,0 +1,728 @@ +/* + * drivers/sysdev/qe_lib/ucc/list.h + * General List Operations from Linux Kernel (linux/include/linux/list.h) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +#ifdef __KERNEL__ + +#include "common.h" + +#define barrier() __asm__ __volatile__("": : :"memory") +#define smp_wmb barrier + +/** + * container_of - cast a member of a structure out to the containing structure + * + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +#define INIT_LIST_HEAD(ptr) do { \ + (ptr)->next = (ptr); (ptr)->prev = (ptr); \ +} while (0) + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add_rcu(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + new->next = next; + new->prev = prev; + smp_wmb(); + next->prev = new; + prev->next = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_tail_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * Note: list_empty on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_del_rcu() + * or list_add_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_kernel() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = LIST_POISON2; +} + +/* + * list_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The old entry will be replaced with the new entry atomically. + */ +static inline void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + smp_wmb(); + new->next->prev = new; + new->prev->next = new; +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is + * empty _and_ checks that no other CPU might be + * in the process of still modifying either member + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + * + * @head: the list to test. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +static inline void __list_splice(struct list_head *list, struct list_head *head) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; +} + +/** + * list_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(struct list_head *list, struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop counter. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use as a start point in + * list_for_each_entry_continue + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - iterate over list of given type + * continuing after existing point + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop counter. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_rcu - iterate over an rcu-protected list + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_rcu(pos, head) \ + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = rcu_dereference(pos->next)) + +#define __list_for_each_rcu(pos, head) \ + for (pos = (head)->next; pos != (head); \ + pos = rcu_dereference(pos->next)) + +/** + * list_for_each_safe_rcu - iterate over an rcu-protected list safe + * against removal of list entry + * @pos: the &struct list_head to use as a loop counter. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_safe_rcu(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = rcu_dereference(n), n = pos->next) + +/** + * list_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = rcu_dereference(list_entry(pos->member.next, \ + typeof(*pos), member))) + +/** + * list_for_each_continue_rcu - iterate over an rcu-protected list + * continuing after existing point. + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_continue_rcu(pos, head) \ + for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \ + (pos) = rcu_dereference((pos)->next)) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +/** + * hlist_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry(). + */ +static inline void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (n->pprev) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/** + * hlist_add_head_rcu - adds the specified element to the specified hlist, + * while permitting racing traversals. + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + n->pprev = &h->first; + smp_wmb(); + if (first) + first->pprev = &n->next; + h->first = n; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if (next->next) + next->next->pprev = &next->next; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +#define hlist_for_each_rcu(pos, head) \ + for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \ + (pos) = rcu_dereference((pos)->next)) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop counter. + * @pos: the &struct hlist_node to use as a loop counter. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point + * @tpos: the type * to use as a loop counter. + * @pos: the &struct hlist_node to use as a loop counter. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from existing point + * @tpos: the type * to use as a loop counter. + * @pos: the &struct hlist_node to use as a loop counter. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop counter. + * @pos: the &struct hlist_node to use as a loop counter. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +/** + * hlist_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop counter. + * @pos: the &struct hlist_node to use as a loop counter. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = rcu_dereference(pos->next)) + +#else +#warning "don't include kernel headers in userspace" +#endif /* __KERNEL__ */ +#endif diff --git a/drivers/sysdev/qe_lib/ucc/ucc.c b/drivers/sysdev/qe_lib/ucc/ucc.c new file mode 100644 index 0000000..59b4495 --- /dev/null +++ b/drivers/sysdev/qe_lib/ucc/ucc.c @@ -0,0 +1,332 @@ +/* + * drivers/sysdev/qe_lib/ucc/ucc.c + * + * QE UCC API Set - UCC specific routines implementations. + * + * (C) Copyright 2006 Freescale Semiconductor, Inc + * Author: Shlomi Gridish gridish@freescale.com + * + * History: + * 20060601 tanya jiang (tanya.jiang@freescale.com) + * Code style fixed; move from cpu/mpc83xx to drivers/sysdev + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include "common.h" +#include "asm/errno.h" +#include "immap_qe.h" +#include "qe.h" +#include "asm/io.h" +#include "ucc.h" + +int ucc_set_type(int ucc_num, struct ucc_common *regs, + enum ucc_speed_type speed) +{ + u8 guemr = 0; + + /* check if the UCC number is in range. */ + if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) + return -EINVAL; + + guemr = regs->guemr; + guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX); + switch (speed) { + case UCC_SPEED_TYPE_SLOW: + guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX); + break; + case UCC_SPEED_TYPE_FAST: + guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX); + break; + default: + return -EINVAL; + } + regs->guemr = guemr; + + return 0; +} + +int ucc_init_guemr(struct ucc_common *regs) +{ + u8 guemr = 0; + + if (!regs) + return -EINVAL; + + /* Set bit 3 (which is reserved in the GUEMR register) to 1 */ + guemr = UCC_GUEMR_SET_RESERVED3; + + regs->guemr = guemr; + + return 0; +} + +static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num, + u8 * shift) +{ + switch (ucc_num) { + case (0): + *p_cmxucr = &(qe_immr->qmx.cmxucr1); + *reg_num = 1; + *shift = 16; + break; + case (2): + *p_cmxucr = &(qe_immr->qmx.cmxucr1); + *reg_num = 1; + *shift = 0; + break; + case (4): + *p_cmxucr = &(qe_immr->qmx.cmxucr2); + *reg_num = 2; + *shift = 16; + break; + case (6): + *p_cmxucr = &(qe_immr->qmx.cmxucr2); + *reg_num = 2; + *shift = 0; + break; + case (1): + *p_cmxucr = &(qe_immr->qmx.cmxucr3); + *reg_num = 3; + *shift = 16; + break; + case (3): + *p_cmxucr = &(qe_immr->qmx.cmxucr3); + *reg_num = 3; + *shift = 0; + break; + case (5): + *p_cmxucr = &(qe_immr->qmx.cmxucr4); + *reg_num = 4; + *shift = 16; + break; + case (7): + *p_cmxucr = &(qe_immr->qmx.cmxucr4); + *reg_num = 4; + *shift = 0; + break; + default: + break; + } +} + +int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask) +{ + volatile u32 *p_cmxucr; + u8 reg_num; + u8 shift; + + /* check if the UCC number is in range. */ + if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) + return -EINVAL; + + get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); + + if (set) + out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift)); + else + out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift)); + + return 0; +} + +int ucc_set_qe_mux_rxtx(int ucc_num, qe_clock_e clock, comm_dir_e mode) +{ + volatile u32 *p_cmxucr; + u8 reg_num; + u8 shift; + u32 clockBits; + u32 clockMask; + int source = -1; + + /* check if the UCC number is in range. */ + if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) + return -EINVAL; + + if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) { + printf("ucc_set_qe_mux_rxtx: bad comm mode type passed."); + return -EINVAL; + } + + get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); + + switch (reg_num) { + case (1): + switch (clock) { + case (QE_BRG1): + source = 1; + break; + case (QE_BRG2): + source = 2; + break; + case (QE_BRG7): + source = 3; + break; + case (QE_BRG8): + source = 4; + break; + case (QE_CLK9): + source = 5; + break; + case (QE_CLK10): + source = 6; + break; + case (QE_CLK11): + source = 7; + break; + case (QE_CLK12): + source = 8; + break; + case (QE_CLK15): + source = 9; + break; + case (QE_CLK16): + source = 10; + break; + default: + source = -1; + break; + } + break; + case (2): + switch (clock) { + case (QE_BRG5): + source = 1; + break; + case (QE_BRG6): + source = 2; + break; + case (QE_BRG7): + source = 3; + break; + case (QE_BRG8): + source = 4; + break; + case (QE_CLK13): + source = 5; + break; + case (QE_CLK14): + source = 6; + break; + case (QE_CLK19): + source = 7; + break; + case (QE_CLK20): + source = 8; + break; + case (QE_CLK15): + source = 9; + break; + case (QE_CLK16): + source = 10; + break; + default: + source = -1; + break; + } + break; + case (3): + switch (clock) { + case (QE_BRG9): + source = 1; + break; + case (QE_BRG10): + source = 2; + break; + case (QE_BRG15): + source = 3; + break; + case (QE_BRG16): + source = 4; + break; + case (QE_CLK3): + source = 5; + break; + case (QE_CLK4): + source = 6; + break; + case (QE_CLK17): + source = 7; + break; + case (QE_CLK18): + source = 8; + break; + case (QE_CLK7): + source = 9; + break; + case (QE_CLK8): + source = 10; + break; + case (QE_CLK16): + source = 11; + break; + default: + source = -1; + break; + } + break; + case (4): + switch (clock) { + case (QE_BRG13): + source = 1; + break; + case (QE_BRG14): + source = 2; + break; + case (QE_BRG15): + source = 3; + break; + case (QE_BRG16): + source = 4; + break; + case (QE_CLK5): + source = 5; + break; + case (QE_CLK6): + source = 6; + break; + case (QE_CLK21): + source = 7; + break; + case (QE_CLK22): + source = 8; + break; + case (QE_CLK7): + source = 9; + break; + case (QE_CLK8): + source = 10; + break; + case (QE_CLK16): + source = 11; + break; + default: + source = -1; + break; + } + break; + default: + source = -1; + break; + } + + if (source == -1) { + printf("ucc_set_qe_mux_rxtx: Bad combination of clock and UCC."); + return -ENOENT; + } + + clockBits = (u32) source; + clockMask = QE_CMXUCR_TX_CLK_SRC_MASK; + if (mode == COMM_DIR_RX) { + clockBits <<= 4; /* Rx field is 4 bits to left of Tx field */ + clockMask <<= 4; /* Rx field is 4 bits to left of Tx field */ + } + clockBits <<= shift; + clockMask <<= shift; + + out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clockMask) | clockBits); + + return 0; +} diff --git a/drivers/sysdev/qe_lib/ucc/ucc.h b/drivers/sysdev/qe_lib/ucc/ucc.h new file mode 100644 index 0000000..5d92594 --- /dev/null +++ b/drivers/sysdev/qe_lib/ucc/ucc.h @@ -0,0 +1,98 @@ +/* + * drivers/sysdev/qe_lib/ucc/ucc.h + * + * Internal header file for UCC unit routines. + * + * (C) Copyright 2006 Freescale Semiconductor, Inc + * Author: Shlomi Gridish gridish@freescale.com + * + * History: + * 20060601 tanya jiang (tanya.jiang@freescale.com) + * Code style fixed; move from cpu/mpc83xx to drivers/sysdev + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_H__ +#define __UCC_H__ + +#include "immap_qe.h" +#include "qe.h" + +#define STATISTICS + +#define UCC_MAX_NUM 8 + +typedef enum _enet_interface_e { + ENET_10_MII, + ENET_10_RMII, + ENET_10_RGMII, + ENET_100_MII, + ENET_100_RMII, + ENET_100_RGMII, + ENET_1000_GMII, + ENET_1000_RGMII, + ENET_1000_TBI, + ENET_1000_RTBI +} enet_interface_e; + +/* Slow or fast type for UCCs. +*/ +typedef enum ucc_speed_type { + UCC_SPEED_TYPE_FAST, UCC_SPEED_TYPE_SLOW +} ucc_speed_type_e; + +/* Initial UCCs Parameter RAM address relative to: MEM_MAP_BASE (IMMR). +*/ +typedef enum ucc_pram_initial_offset { + UCC_PRAM_OFFSET_UCC1 = 0x8400, + UCC_PRAM_OFFSET_UCC2 = 0x8500, + UCC_PRAM_OFFSET_UCC3 = 0x8600, + UCC_PRAM_OFFSET_UCC4 = 0x9000, + UCC_PRAM_OFFSET_UCC5 = 0x8000, + UCC_PRAM_OFFSET_UCC6 = 0x8100, + UCC_PRAM_OFFSET_UCC7 = 0x8200, + UCC_PRAM_OFFSET_UCC8 = 0x8300 +} ucc_pram_initial_offset_e; + +/* ucc_set_type + * Sets UCC to slow or fast mode. + * + * ucc_num - (In) number of UCC (0-7). + * regs - (In) pointer to registers base for the UCC. + * speed - (In) slow or fast mode for UCC. + */ +int ucc_set_type(int ucc_num, struct ucc_common *regs, + enum ucc_speed_type speed); + +/* ucc_init_guemr + * Init the Guemr register. + * + * regs - (In) pointer to registers base for the UCC. + */ +int ucc_init_guemr(struct ucc_common *regs); + +int ucc_set_qe_mux_rxtx(int ucc_num, qe_clock_e clock, comm_dir_e mode); + +int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask); + +/* QE MUX clock routing for UCC +*/ +static inline int ucc_set_qe_mux_grant(int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT); +} + +static inline int ucc_set_qe_mux_tsa(int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA); +} + +static inline int ucc_set_qe_mux_bkpt(int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT); +} + +#endif /* __UCC_H__ */
participants (1)
-
Jiang Bo-r61859