diff -Naur mki-adapter26-old/arch/i386/Makefile mki-adapter26-new/arch/i386/Makefile --- mki-adapter26-old/arch/i386/Makefile 2004-08-23 16:41:41.000000000 -0400 +++ mki-adapter26-new/arch/i386/Makefile 2004-08-23 16:41:41.000000000 -0400 @@ -0,0 +1,2 @@ +# Added by mki-adapter26 patch: +drivers-$(CONFIG_MKI) += arch/i386/mki-adapter26/ diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/LICENSE mki-adapter26-new/arch/i386/mki-adapter26/LICENSE --- mki-adapter26-old/arch/i386/mki-adapter26/LICENSE 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/LICENSE 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/Makefile mki-adapter26-new/arch/i386/mki-adapter26/Makefile --- mki-adapter26-old/arch/i386/mki-adapter26/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/Makefile 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,11 @@ +############################################################################## +# Copyright 2003 by NeTraverse, Inc. +# This software is distributed under the terms of the GPL +# which is supplied in the LICENSE file with this distribution +############################################################################## +# $Id: Makefile,v 1.2 2003/11/01 00:40:45 rwb Exp $ + +ifeq ($(CONFIG_MKI),y) +obj-m += mki-adapter.o +mki-adapter-objs := mki-main.o mki26.o mkivnet.o timer.o +endif diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/mki26.c mki-adapter26-new/arch/i386/mki-adapter26/mki26.c --- mki-adapter26-old/arch/i386/mki-adapter26/mki26.c 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/mki26.c 2004-08-23 16:41:19.000000000 -0400 @@ -0,0 +1,1813 @@ +/* + **************************************************************************** + * Copyright 2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: mki26.c,v 1.11 2004/08/23 20:41:19 lreiter Exp $ + *************************************************************************** + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mkifunc.h" +#include "mki-adapter26.h" + +extern struct desc_struct idt_table[], gdt_table[]; + +/* externals */ +extern asmlinkage int sys_ftruncate(int, unsigned long); +extern asmlinkage long sys_munmap(unsigned long, size_t); +extern asmlinkage long sys_mprotect(unsigned long, size_t, unsigned long); + +atomic_t mkia_context_rss = ATOMIC_INIT(0); + +/* Performance/debugging counters */ +unsigned int mkia_cnt_rpr_pagereserved = 0; +unsigned int mkia_cnt_rpr_not_dirty_acc = 0; +unsigned int mkia_cnt_rpr_dirty = 0; +unsigned int mkia_cnt_rpr_accessed = 0; + +struct idt_gdt_desc_struct { + unsigned short limit; + struct desc_struct __attribute__((packed)) *table; + unsigned short pad; /* Align for stack variables */ +}; + +struct idt_gdt_desc_struct mkia_idt_ = {(256 * 8) - 1, 0}; +#define mkia_idt mkia_idt_.table + + +static int +mhia_void(void *parm) +{ + return -1; +} + +int (*mhia_table[])(void *) = { + &mhia_void, /* SWITCH_TO */ + &mhia_void, /* SWITCH_AWAY */ + &mhia_void, /* THREAD_EXIT */ + &mhia_void, /* RET_USER */ + &mhia_void, /* SIGNAL */ + &mhia_void, /* QUERY */ + &mhia_void, /* SWAP_PAGES */ +}; + +/* The next one might as well go into bss */ +static struct desc_struct * mkia_idt_rw; + +/* + * Used in the implementation of the MKI functions + */ +void mkia_assert_failure(char *exprstr, char *filename, int linenum); + +#define MKIA_ASSERT(expression) do { \ + if (unlikely(!(expression))) \ + mkia_assert_failure(#expression, __FILE__, __LINE__); \ +} while (0) + + +#define TSS_SIZE_32 104 /* TSS is 104 bytes long for 32 bit TSS */ +#define MKI_TSS_IOBITMAP_BITS 1024 /* This many bits in the TSS's iobitmap */ +#define MKI_TSS_FUDGE 8 /* Fudge factor to add to the tss limit */ +#define MKI_TSS_LIMIT(x) (TSS_SIZE_32 + ((x) >> 3) + MKI_TSS_FUDGE) + +/* + * This is the address where the MKI expects the Linux kernel to live. + * If someone makes a kernel with PAGE_OFFSET at a different address, + * then we probably won't work, because the Windows address space + * is very tight as it is! + */ + +#if __PAGE_OFFSET < MKI_END_USER_ADDR + #error MKI will not work if __PAGE_OFFSET is not >= MKI_END_USER_ADDR +#endif + +void +mkia_init(void) +{ +} + +mkia_task_t * mkia_alloc_task_info(void); +void mkia_pgvfree(void *, unsigned long); +void mkia_alloc_tss(void *, int); + +/***************************************************************************/ + +#ifdef CONFIG_PREEMPT +#ifdef PREEMPT_DEBUG +unsigned long mkia_preempt_enable_counter = 0; +unsigned long mkia_preempt_enable_nosched_counter = 0; +unsigned long mkia_preempt_disable_counter = 0; +#define PREEMPT_COUNT(a) a++; +#else +#define PREEMPT_COUNT(a) +#endif +#endif + +void +mkia_preempt_disable(void) +{ +#ifdef CONFIG_PREEMPT + PREEMPT_COUNT(mkia_preempt_disable_counter); + preempt_disable(); +#endif +} + +void +mkia_preempt_enable(void) +{ +#ifdef CONFIG_PREEMPT + PREEMPT_COUNT(mkia_preempt_enable_counter); + preempt_enable(); +#endif +} + +void +mkia_preempt_enable_nosched(void) +{ +#ifdef CONFIG_PREEMPT + PREEMPT_COUNT(mkia_preempt_enable_nosched_counter); + preempt_enable_no_resched(); +#endif +} + +int +mkia_get_preempt_count(void) +{ +#ifdef CONFIG_PREEMPT + return preempt_count(); +#else + return 0; +#endif +} + + +/* + * mkia_get_task_info + * allocates the task_info struct if it's null in the current task + */ +static inline mkia_task_t * +mkia_get_task_info(struct task_struct *curr) +{ + mkia_task_t *mtip; + if ((mtip = curr->mki_task_info) == NULL) { + mtip = mkia_alloc_task_info(); + curr->mki_task_info = mtip; + } + return mtip; +} + +static inline void +mkia_free_ldt(mkia_task_t *mtip) +{ + if (mtip->mti_merge_ldtp != NULL) { + vfree(mtip->mti_merge_ldtp); + mtip->mti_merge_ldtp = NULL; + mtip->mti_save_ldtp = NULL; + } +} + +static inline void +mkia_free_tss(mkia_task_t *mtip) +{ + if (mtip->mti_merge_tssp != NULL) { + kfree(mtip->mti_merge_tssp); + mtip->mti_merge_tssp = NULL; + } +} + +static inline void +mkia_free_gdt(mkia_task_t *mtip) +{ + if (mtip->mti_merge_gdtp != NULL) { + free_page((unsigned long) mtip->mti_merge_gdtp); + mtip->mti_merge_gdtp = NULL; + } +} + +/* Called from mkia_remove_hook */ +static inline void +mkia_cleanup_idt(void) +{ + lock_kernel(); + if (mkia_idt_rw != NULL) { + if (boot_cpu_data.f00f_bug) { + mkia_pgvfree(mkia_idt_rw, 2*PAGE_SIZE); + } + else { + free_page((unsigned long) mkia_idt_rw); + } + mkia_idt_rw = NULL; + } + mkia_idt = NULL; + unlock_kernel(); +} + +/* + * mkia_load_linux_descriptors + * + * switch in the linux gdt, idt, ldt, and tr + */ +static inline void +mkia_load_linux_descriptors(void) +{ + int smp_id = smp_processor_id(); + + /* + * KERNEL gdt elements at the tail end because Linux has + * per-CPU GDTs in this version. + */ + struct desc_struct *gdttable; + + asm volatile("lgdt %0": : "m" (cpu_gdt_descr[smp_id])); + asm volatile("lidt %0": : "m" (idt_descr)); + load_LDT_desc(); + + /* Clear busy bit prior to ltr */ + gdttable = (struct desc_struct *) (cpu_gdt_descr[smp_id].address); + gdttable[GDT_ENTRY_TSS].b &= 0xfffffdff; + load_TR_desc(); +} + +static inline +void +mkia_alloc_gdt(mkia_task_t *mtip) +{ + struct idt_gdt_desc_struct cur_desc; + struct desc_struct *pgdt_table; + unsigned short tbllimit; + unsigned long size; + + MKIA_ASSERT(mtip->mti_merge_gdtp == NULL); + asm volatile("sgdt %0": "=m" (cur_desc)); + tbllimit = cur_desc.limit; + size = PAGE_ALIGN(tbllimit + 1); + + if (size != PAGE_SIZE) { + printk("mkia_alloc_gdt: unexpected GDT size %lx\n", size); + return; + } + pgdt_table = (struct desc_struct *) __get_free_page(GFP_KERNEL); + if (pgdt_table == NULL) { + printk("mkia_alloc_gdt: allocation failed!\n"); + return; + } + + /* + * Get copy of the linux GDT + */ + memcpy(pgdt_table, cur_desc.table, size); + mtip->mti_merge_gdtp = pgdt_table; +} + +/* + * mkia_internal_set_gdt_entry + * + * This is only called within mkiXX.c to set up the descriptors for the + * newly allocated gdt during initialization, and is called from + * mkia_set_gdt_entry below + */ +static inline int +mkia_internal_set_gdt_entry(unsigned short sel, unsigned long *new_entry, mkia_task_t *mtip) +{ + struct idt_gdt_desc_struct cur_desc; + int i; + unsigned short tbllimit; + struct desc_struct *pgdt_table; + + asm volatile("sgdt %0": "=m" (cur_desc)); + tbllimit = cur_desc.limit; + if (sel < 1 || sel > tbllimit) { + return EINVAL; + } + pgdt_table = mtip->mti_merge_gdtp; + if (sel >= PAGE_SIZE) { + return EBUSY; + } + if (sel >= __KERNEL_CS) { + return EBUSY; + } + + /* selector -> index */ + i = sel >> 3; + pgdt_table[i] = *(struct desc_struct *) new_entry; + return 0; +} + +static inline +void +mkia_setup_ldt_descriptor(mkia_task_t *mtip, void * pldt_table, int limit) +{ + struct desc_struct ldtdesc; + mtip->mti_current_ldtp = pldt_table; + mtip->mti_current_ldt_limit = limit; + ldtdesc.a = (0xffff0000 & ((unsigned long) pldt_table << 16)) | + (0x0000ffff & (limit)); + ldtdesc.b = (0xff000000 & (unsigned long) pldt_table) | + (0x000f0000 & limit) | (0x00008200) | + (0x000000ff & ((unsigned long) pldt_table >> 16)); + if (mtip->mti_merge_gdtp == NULL) { + printk("mki error: gdtp not allocated and " + "mkia_setup_ldt_descriptor called\n"); + } else { + mkia_internal_set_gdt_entry(MKI_LDT_DESC, + (unsigned long *) &ldtdesc, mtip); + } +} + +static inline +void +mkia_alloc_ldt(mkia_task_t *mtip) +{ + struct desc_struct *pldt_table; + char *copydest; + mm_context_t *contextp; + void *orig_ldt; + unsigned short limit; + int orig_ldt_size; + int new_ldt_size; + + MKIA_ASSERT(mtip->mti_merge_ldtp == NULL); + mtip->mti_save_ldtp = NULL; + mtip->mti_save_ldt_size = 0; + + limit = (LDT_ENTRIES * LDT_ENTRY_SIZE) - 1; + new_ldt_size = PAGE_ALIGN(limit + 1); + if (!(pldt_table = vmalloc(new_ldt_size))) { + printk("mkia_alloc_ldt: vamlloc failed\n"); + return; + } + mtip->mti_merge_ldtp = pldt_table; + + contextp = ¤t->mm->context; + orig_ldt = contextp->ldt; + orig_ldt_size = (contextp->size) * LDT_ENTRY_SIZE; + + if (orig_ldt_size) { + memcpy(pldt_table, orig_ldt, orig_ldt_size); + } + if (orig_ldt_size < new_ldt_size) { + copydest = ((char *) pldt_table) + orig_ldt_size; + memset(copydest, 0, new_ldt_size - orig_ldt_size); + } + mkia_setup_ldt_descriptor(mtip, pldt_table, limit); +} + +/* + * mkia_load_mki_descriptors + * + * switch in the task's gdt, idt, ldt, and tr + */ +static inline void +mkia_load_mki_descriptors(mkia_task_t *mtip) +{ + struct idt_gdt_desc_struct cur_desc; + unsigned int *gdttable; + + MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED); + asm volatile("sgdt %0": "=m" (cur_desc)); + cur_desc.table = mtip->mti_merge_gdtp; + asm volatile("lgdt %0": : "m" (cur_desc)); + asm volatile("lldt %%ax": : "a" (MKI_LDT_DESC)); + + /* + * Point to second part of TSS descriptor and clear busy + * bit prior to ltr + */ + gdttable = mtip->mti_merge_gdtp; + gdttable[(MKI_TSS_ENTRY << 1) + 1] &= (unsigned int) 0xfffffdff; + asm volatile("ltr %%ax": : "a" (MKI_TSS_DESC)); + if (mkia_idt != NULL) { + asm volatile("lidt %0": : "m" (mkia_idt_.limit)); + } +} + + +void +mkia_alloc_descriptors(struct task_struct *curr) +{ + mkia_task_t *mtip=mkia_get_task_info(curr); + + MKIA_ASSERT(!(mtip->mti_flags & MKIF_DESC_ALLOCATED)); + mkia_alloc_gdt(mtip); + mkia_alloc_ldt(mtip); + mkia_alloc_tss(curr, MKI_TSS_IOBITMAP_BITS); + MKIA_ASSERT(mtip->mti_merge_gdtp != NULL); + MKIA_ASSERT(mtip->mti_merge_ldtp != NULL); + MKIA_ASSERT(mtip->mti_merge_tssp != NULL); + mtip->mti_flags |= MKIF_DESC_ALLOCATED; +} + +/* + * mkia_cleanup_descriptors() + * needs to be called for each MKI process that exits. + * + * This function is always called in task context so no locking is + * necessary. + */ +void +mkia_cleanup_descriptors(mkia_task_t *mtip) +{ + MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED); + mkia_free_ldt(mtip); + mkia_free_tss(mtip); + mkia_free_gdt(mtip); + MKIA_ASSERT(mtip->mti_merge_gdtp == NULL); + MKIA_ASSERT(mtip->mti_merge_ldtp == NULL); + MKIA_ASSERT(mtip->mti_merge_tssp == NULL); + mtip->mti_flags &= ~MKIF_DESC_ALLOCATED; +} + +/* + * mkia_free_task_info() + */ +void mkia_free_task_info(mkia_task_t *mtip) +{ + if(mtip->mti_merge_gdtp != NULL) { + printk("WARNING! freeing MKI task info struct without " + "first freeing gdt. Freeing gdt/ldt/tss\n"); + mkia_load_linux_descriptors(); + mkia_cleanup_descriptors(mtip); + } + kfree(mtip); +} + +/* + * mhi_switch_to + */ +void +mhia_switch_to(void *arg1_next, void *arg2_unused) +{ + struct task_struct *next = arg1_next; + mkia_task_t *mtip; + + + /* + * If the next task is MARKED, switch in our descriptor tables + * and call the SWITCH_TO hook don't use mki_get_task_info, + * because it allocates it if NULL. + */ + if (unlikely(((mtip = next->mki_task_info) != NULL) && + (mtip->mti_flags & MKIF_MARKED))) { + /* switch in private gdt, idt, ldt and tr */ + mkia_load_mki_descriptors(mtip); + mtip->mti_flags |= MKIF_IN_SWITCH; + (void) (*mhia_table[SWITCH_TO])(mtip->mti_vm86p); + mtip->mti_flags &= ~MKIF_IN_SWITCH; + next->thread.fs = mtip->mti_fs; + next->thread.gs = mtip->mti_gs; + } +} + +/* + * mhi_switch_away + */ +void +mhia_switch_away(void *arg1_prev, void *arg2_unused) +{ + struct task_struct *prev = arg1_prev; + mkia_task_t *mtip; + + + /* + * Don't use mki_get_task_info, because it allocates it if NULL, + * and we don't need _every_ task having an mtip! + */ + if (unlikely(((mtip = prev->mki_task_info) != NULL) && + (mtip->mti_flags & MKIF_MARKED))) { + int zero = 0; + asm volatile("movl %%fs,%0":"=m" (*(int *)&mtip->mti_fs)); + asm volatile("movl %%gs,%0":"=m" (*(int *)&mtip->mti_gs)); + asm volatile("movl %0, %%fs": : "m" (zero)); + asm volatile("movl %0, %%gs": : "m" (zero)); + + mkia_load_linux_descriptors(); + mtip->mti_flags |= MKIF_IN_SWITCH; + (void) (*mhia_table[SWITCH_AWAY])(mtip->mti_vm86p); + mtip->mti_flags &= ~MKIF_IN_SWITCH; + } +} + +void +mhia_exit(void *arg1_unused, void *arg2_unused) +{ + /* Call the EXIT hook for MARKED tasks */ + mkia_task_t *mtip; + struct task_struct *curr; + curr = current; + + /* + * Don't use mki_get_task_info, because it allocates it if NULL + */ + if (unlikely((mtip = curr->mki_task_info) != NULL)) { + if (likely(mtip->mti_flags & MKIF_MARKED)) { + (void) (*mhia_table[THREAD_EXIT])(mtip->mti_vm86p); + /* put everything back */ + mkia_load_linux_descriptors(); + if (mtip->mti_flags & MKIF_DESC_ALLOCATED) + mkia_cleanup_descriptors(mtip); + } + mkia_free_task_info(mtip); + curr->mki_task_info = NULL; + } +} + +/* + * mhia_ret_user + * + * This routine gets called just before the kernel is going to make + * a kernel mode to user mode transition. On 2.6, this only gets + * called for "marked" tasks (or about to be marked tasks), because + * only marked tasks have TIF_MKI_RETUSER set. + */ +void +mhia_ret_user(void *arg1_regs, void *arg2_unused) +{ + unsigned long *r0ptr = arg1_regs; + struct task_struct *curr = current; + mkia_task_t *mtip; + + if (unlikely((mtip = curr->mki_task_info) == NULL)) + return; + + if (unlikely(!(mtip->mti_flags & MKIF_MARKED))) + return; + + while (curr->flags & PF_FREEZE) { +#ifdef PF_IOTHREAD + /* Call with PF_IOTHREAD to flush signals */ + refrigerator(PF_IOTHREAD); +#else + /* Call with PF_NOFREEZE to flush signals */ + refrigerator(PF_NOFREEZE); +#endif /* PF_IOTHREAD */ + } + if (signal_pending(current)) { + /* + * We catch signals here so that the lower layer does + * not try to do the Linux DOSEMU vm86 handling or any + * other kind of usermode signal handling. With merge a + * signal in the VM86 process is always a reason to exit. + */ + __asm__ __volatile__("sti\n"); + do_exit(1); + /* NORETURN */ + } + if (mtip->mti_event_pending) { + mtip->mti_event_pending = 0; + (void) (*mhia_table[RET_USER])(r0ptr); + } + MKIA_ASSERT(mkia_get_preempt_count() == 0); +} + + +/* + * mhia_swap + */ +void +mhia_swap(void *arg1_priority, void *arg2_gfp_mask) +{ + int priority; + int gfp_mask; + int hard_flag; + + priority = (int) arg1_priority; + gfp_mask = (int) arg2_gfp_mask; + + /* + * A "HARD" swap means get rid of all mappings rather than + * just aging them. + */ + hard_flag = (priority < 6); + (void) (*mhia_table[SWAP_PAGES])((void *) hard_flag); +} + +/* mkia_pgvfree assumes PAGE_SIZE <= size <= 4M */ +/* mkia_pgvfree assumes size is multiple of PAGE_SIZE */ +void +mkia_pgvfree(void * addr, unsigned long size) +{ + pgd_t * dir; + pmd_t * pmd; + pte_t * pte; + unsigned long end; + unsigned long address = (unsigned long) addr; + + MKIA_ASSERT((size >= PAGE_SIZE) && (size < 0x400000) && + ((size & 0xfff) == 0)); + + dir = pgd_offset_k(address); + pmd = pmd_offset(dir, address); + pte = pte_offset_kernel(pmd, address); + address &= ~PGDIR_MASK; + end = address + size; + if (end > PGDIR_SIZE) { + end = PGDIR_SIZE; + } + + /* skip first page and just clear pte of others */ + + pte++; + address += PAGE_SIZE; + size -= PAGE_SIZE; + while (address < end) { + pte_clear(pte); + address += PAGE_SIZE; + size -= PAGE_SIZE; + pte++; + } + if (size) { + dir++; + pmd = pmd_offset(dir, address); + pte = pte_offset_kernel(pmd, address); + while (size) { + pte_clear(pte); + size -= PAGE_SIZE; + pte++; + } + } + vfree(addr); +} + +/* + * void + * mkia_post_event(void *cookie) + * + * Set's an event pending. NOTE this routine may be called OUT of + * context and, in fact, out of an interrupt. We can get away without + * a BUS-LOCK prefix because we do an assigment rather than a Read- + * Modify-Write. + */ +void +mkia_post_event(void *cookie) +{ + struct task_struct *t; + mkia_task_t *mtip; + unsigned long flags; + + flags = 0; + t = (struct task_struct *) cookie; + + mtip = (mkia_task_t *)(t->mki_task_info); + mtip->mti_event_pending = 1; + +#ifdef CONFIG_SMP + /* + * If the task is running on a different CPU, force a reschedule + * on that CPU. This will force that task into the kernel if it + * is not already there, and on the way back to user mode, + * mti_event_pending will get checked. Only do this if we are not + * being called from a SWITCH_TO or SWITCH_AWAY hook. Otherwise + * we can get a deadlock, since interrupts could be disabled on + * this cpu and we could block on the run_queue lock or the reschedule. + */ + if (!(mtip->mti_flags & MKIF_IN_SWITCH) && (num_online_cpus() > 1)) { + mki_kick_if_running(t); + } +#endif /* CONFIG_SMP */ + +} +EXPORT_SYMBOL(mkia_post_event); + +int mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry) +{ + int ret; + mkia_task_t *mtip; + + mtip = mkia_get_task_info(current); + if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) { + printk("mki warning: allocating descriptors in " + "mkia_set_gdt_entry"); + mkia_alloc_descriptors(current); + } + + /* + * If this is the first time we are setting a descriptor, be sure + * the old copies of the linux descriptors are cleared out and + * that current->mm->context.segments is correct + */ + if (!(mtip->mti_flags & MKIF_GDT_SELECTOR_ADDED)) { + /* selectors 0-12 */ + mtip->mti_flags |= MKIF_GDT_SELECTOR_ADDED; + memset(mtip->mti_merge_gdtp, 0, MKI_CLEAR_GDT_AMOUNT); + } + if (sel >= MKI_TSS_DESC) { + return EBUSY; + } + if (!(ret = mkia_internal_set_gdt_entry(sel, new_entry, mtip))) { + /* Everything went ok, so switch in our private descriptors */ + mkia_load_mki_descriptors(mtip); + } + return ret; +} +EXPORT_SYMBOL(mkia_set_gdt_entry); + +int mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry) +{ + struct desc_struct *pldte; + mkia_task_t *mtip; + struct task_struct *curr = current; + + mtip = mkia_get_task_info(curr); + MKIA_ASSERT(mtip->mti_merge_ldtp); + + pldte = (struct desc_struct *) + ((char *) mtip->mti_current_ldtp + (sel & ~(0x7))); + + *pldte = *(struct desc_struct *) new_entry; + return 0; +} +EXPORT_SYMBOL(mkia_set_ldt_entry); + +int mkia_check_vm86(void) +{ + return ((mkia_get_task_info(current))->mti_flags & MKIF_MARKED) + ? 1 : 0; +} +EXPORT_SYMBOL(mkia_check_vm86); + +static inline +int +mkia_alloc_idt(void) +{ + /* + * No private IDT yet. Make private copy of IDT. For F00F bug + * systems allocate two pages. Make a ro version, which points + * to the rw. + */ + if (boot_cpu_data.f00f_bug) { + pte_t * pte; + pte_t * pte_rw; + unsigned long page; + + mkia_idt_rw = vmalloc(2*PAGE_SIZE); + page = (unsigned long) mkia_idt_rw; + pte_rw = pte_offset_kernel(pmd_offset(pgd_offset(&init_mm, + page), page), page); + page += PAGE_SIZE; + mkia_idt = (struct desc_struct *) page; + pte = pte_offset_kernel(pmd_offset(pgd_offset(&init_mm, + page), page), page); + __free_page(pte_page(*pte)); + *pte = *pte_rw; + pte_modify(*pte, PAGE_KERNEL_RO); + flush_tlb_all(); + } + else { + mkia_idt = mkia_idt_rw = (struct desc_struct *) + __get_free_page(GFP_KERNEL); + } + if (mkia_idt == NULL) { + return 0; + } + memcpy(mkia_idt_rw, idt_table, PAGE_SIZE); + return 1; +} + +void mkia_set_idt_entry(unsigned short vect_num, unsigned long *new_entry, unsigned long *prev_entry) +{ + if (mkia_idt == NULL) { + printk("mkia_set_idt_entry: FAILED - no private IDT\n"); + return; + } + lock_kernel(); + *(struct desc_struct *) prev_entry = mkia_idt[vect_num]; + mkia_idt_rw[vect_num] = *(struct desc_struct *) new_entry; + unlock_kernel(); +} +EXPORT_SYMBOL(mkia_set_idt_entry); + +void mkia_set_idt_dpl(void) +{ + int i; + struct desc_struct *p; + + /* + * Go make all IDT descriptors DPL 0. Note that Merge + * has special case code to enable Linux's "int 0x80" + * system calls. + */ + lock_kernel(); + p = mkia_idt_rw; + for (i = 0; i < 256; i++, p++) { + if (p->a | p->b) { + p->b &= ~(3 << 13); + } + } + unlock_kernel(); +} +EXPORT_SYMBOL(mkia_set_idt_dpl); + +void * +mkia_getparm(int request, void *parm) +{ + extern int mkia_disable_kmalloc; + mkia_task_t *mtip; + switch (request) { + case PARM_POST_COOKIE: + *(void **)parm = current; + break; + case PARM_FRAME_BASE: + /* + * This really is just the very bottom of the + * stack. To turn this into frame ptr, Merge + * has to subtract off its idea of the frame size. + */ + *(void **)parm = ((char *)current_thread_info()) + + THREAD_SIZE; + break; + case PARM_TASK_MASK: + /* + * Since 2.6 requires 5.5.x of merge, just return 0 + * 5.5.x will use PARM_TASK_MASK_V2. + */ + *(unsigned long *)parm = 0; + break; + case PARM_TSSP: + /* This should not be called in 2.4, + mkia_adjust_esp0 is used instead */ + *(void **) parm = NULL; + break; + case PARM_PRIVATE: + *(void **) parm = NULL; + break; + case PARM_LDTP: + if ((mtip = current->mki_task_info) == NULL) + *(void **) parm = NULL; + else + *(void **) parm = mtip->mti_current_ldtp; + break; + case PARM_RUID: + *(uid_t *) parm = current->uid; + break; + + /* ========================================================= */ + /* Begin Version 2 MKI calls */ + + case PARM_TASK_MASK_V2: + *(unsigned long *)parm = ~(THREAD_SIZE - 1); + break; + + case PARM_MKI_VERSION: + *(unsigned int *)parm = 6; + break; + + case PARM_NUM_CPUS: + *(unsigned int *)parm = num_online_cpus(); + break; + + case PARM_MAX_NUMPROCS: + *(unsigned int *)parm = PID_MAX_LIMIT; + break; + + case PARM_PREEMPT_ENABLE: +#ifdef CONFIG_PREEMPT + *(void **) parm = mkia_preempt_enable; +#else + *(int *) parm = 0; +#endif + break; + + case PARM_PREEMPT_DISABLE: +#ifdef CONFIG_PREEMPT + *(void **) parm = mkia_preempt_disable; +#else + *(int *) parm = 0; +#endif + break; + + case PARM_PREEMPT_COUNT: + mkia_disable_kmalloc = 0; +#ifdef CONFIG_PREEMPT + *(void **) parm = mkia_get_preempt_count; +#else + *(int *) parm = 0; +#endif + break; + + case PARM_HZ: + *(int *) parm = HZ; + break; + + default: + printk("mkia_getparm: no support for this request %d\n", + request); + *(int *) parm = 0; /* for sanity */ + break; + } + return 0; +} +EXPORT_SYMBOL(mkia_getparm); + +int +mkia_mem_lock_unlock(unsigned long start, unsigned long end, int lock) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + unsigned long curaddr; + unsigned long curend; + unsigned long pages; + + tsk = current; + mm = tsk->mm; + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + return -EBUSY; + + down_write(&mm->mmap_sem); + + /* Do page alignment */ + start &= PAGE_MASK; + end = (end + ~PAGE_MASK) & PAGE_MASK; + + curaddr = start; + while (curaddr < end) { + vma = find_vma(mm, curaddr); + if (!vma) { + printk("up_and_fail: no vma found! " + "(mm=0x%p curaddr=0x%lx)\n", mm, curaddr); + goto up_and_fail; + } + if (vma->vm_start != curaddr) { + goto up_and_fail; + } + if (vma->vm_end > end) { + goto up_and_fail; + } + curaddr = vma->vm_end; + } + + /* + * OK. Now that we've done that, we can go ahead and lock (or unlock) + * the pages. If we are locking we just have to set VM_LOCKED and + * then call make_pages_present to make sure the ptes get filled in. + * VM_LOCKED will prevent swap_out_vma() from stealing the pages. + * For unlock, we just have to clear VM_LOCKED. + */ + curaddr = start; + while (curaddr < end) { + vma = find_vma(mm, curaddr); + curend = vma->vm_end; + pages = (curend - curaddr) >> PAGE_SHIFT; + if (lock) { + MKIA_ASSERT((vma->vm_flags & VM_LOCKED) == 0); + vma->vm_flags |= VM_LOCKED; + mm->locked_vm += pages; + make_pages_present(curaddr, curend); + } else { + MKIA_ASSERT(vma->vm_flags & VM_LOCKED); + vma->vm_flags &= ~VM_LOCKED; + MKIA_ASSERT(mm->locked_vm >= pages); + mm->locked_vm -= pages; + } + curaddr = curend; + } + up_write(&mm->mmap_sem); + return 0; + +up_and_fail: + up_write(&mm->mmap_sem); + printk("_mki_mem_lock_unlock: bad params: " + "start %lx, end %lx, lock %x\n", start, end, lock); + return -EINVAL; +} + +int +mkia_set_private_ldt(void *ldtp, size_t limit) +{ + unsigned long ldtaddr; + unsigned long endaddr; + struct task_struct *curr; + mm_context_t *contextp; + mkia_task_t *mtip; + + curr=current; + mtip = mkia_get_task_info(curr); + MKIA_ASSERT((mtip->mti_flags & MKIF_MARKED) != 0); + if (curr->mm == NULL) + return EINVAL; + + contextp = &curr->mm->context; + if (! (mtip->mti_flags & MKIF_SETLDT_DONE)) { + mtip->mti_save_ldtp = mtip->mti_current_ldtp; + mtip->mti_save_ldt_size = mtip->mti_current_ldt_limit + 1; + mtip->mti_flags |= MKIF_SETLDT_DONE; + if (ldtp == NULL) + return 0; + } + if (ldtp == NULL) { + /* NULL means restore the saved original LDT */ + ldtp = mtip->mti_save_ldtp; + limit = (mtip->mti_save_ldt_size * LDT_ENTRY_SIZE) - 1; + } + + /* Unlock previous LDT */ + ldtaddr = (unsigned long) mtip->mti_current_ldtp; + if (ldtaddr < MKI_END_USER_ADDR) { + endaddr = ldtaddr + + mtip->mti_current_ldt_limit + 1; + mkia_mem_lock_unlock(ldtaddr, endaddr, 0 /* Unlock */); + } + + /* Lock the new LDT */ + ldtaddr = (unsigned long) ldtp; + if (ldtaddr < MKI_END_USER_ADDR) { + endaddr = ldtaddr + limit + 1; + mkia_mem_lock_unlock(ldtaddr, endaddr, 1 /* Lock */); + } + + mkia_setup_ldt_descriptor(mtip, ldtp, limit); + asm volatile("lldt %%ax": : "a" (MKI_LDT_DESC)); + return 0; +} +EXPORT_SYMBOL(mkia_set_private_ldt); + +void mkia_pgfault_get_state(int *pfault_ok, void *fcstate_arg) +{ + mki_fault_catch_t *fcstate = fcstate_arg; + unsigned long faulting_eip; + + /* + * Context check. Make sure that we are not in the middle of + * servicing an interrupt. If so, then the fault catch information + * is not valid. + */ + if (in_interrupt()) { + *pfault_ok = 0; + return; + } + + /* + * Save the old state and clear the current. + */ + *pfault_ok = 1; + faulting_eip = (unsigned long) + fcstate->mkifc_os_dependent[FC_FAULT_EIP]; + + fcstate->mkifc_catching_user_fault = + (search_exception_tables(faulting_eip) ? 1 : 0); + + fcstate->mkifc_os_dependent[FC_SAVE_FS] = (int) (get_fs()).seg; + set_fs(KERNEL_DS); +} +EXPORT_SYMBOL(mkia_pgfault_get_state); + +void mkia_pgfault_restore_state(void *fcstate_arg) +{ + mki_fault_catch_t *fcstate = fcstate_arg; + set_fs(MAKE_MM_SEG(fcstate->mkifc_os_dependent[FC_SAVE_FS])); + fcstate->mkifc_os_dependent[FC_SAVE_FS] = 0; +} +EXPORT_SYMBOL(mkia_pgfault_restore_state); + +/* + * void * + * mkia_alloc_priv_tss(void) + */ +void * +mkia_alloc_priv_tss(void) +{ + struct tss_struct *t; + mkia_task_t *mtip; + + mtip = mkia_get_task_info(current); + if (mtip->mti_vm86p == NULL) { + printk("mki-adapter: current task has no vm86p on " + "entry to mki_alloc_priv_tss\n"); + return NULL; + } + + MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED); + + /* Just in case, if we have not done so already, alloc the tss + * and make sure the iobitmap is properly initialized. + */ + if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) { + printk("mki-adapter: warning! allocating descriptors " + "from alloc_priv_tss\n"); + mkia_alloc_descriptors(current); + } + t = mtip->mti_merge_tssp; + + return (void *) t; +} +EXPORT_SYMBOL(mkia_alloc_priv_tss); + +void mkia_set_vm86p(void *vm86p) +{ + struct task_struct *curr=current; + (mkia_get_task_info(curr))->mti_vm86p = vm86p; +} +EXPORT_SYMBOL(mkia_set_vm86p); + +void * +mkia_get_vm86p(void) +{ + return (mkia_get_task_info(current))->mti_vm86p; +} +EXPORT_SYMBOL(mkia_get_vm86p); + +void mkia_mark_vm86(void) +{ + mkia_task_t *mtip; + mtip = mkia_get_task_info(current); + mtip->mti_flags |= MKIF_MARKED; + set_thread_flag(TIF_MKI_RETUSER); + + /* Ok, now switch in the mki descriptors */ + if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) { + printk("mki-adapter: warning! allocating descriptors " + "from mki_mark_vm86\n"); + mkia_alloc_descriptors(current); + } + mkia_load_mki_descriptors(mtip); +} +EXPORT_SYMBOL(mkia_mark_vm86); + +void mkia_yield(void) +{ + /* + * Modules are supposed to just call yield() instead of + * sys_sched_yield(). + */ + yield(); +} +EXPORT_SYMBOL(mkia_yield); + +void mkia_clear_vm86(void) +{ + mkia_task_t *mtip; + + mtip = mkia_get_task_info(current); + + /* + * Make sure we release our descriptors and go back to the + * linux gdt, idt, tss, and ldt + */ + mkia_load_linux_descriptors(); + mkia_cleanup_descriptors(mtip); + + mtip->mti_flags &= ~MKIF_MARKED; + clear_thread_flag(TIF_MKI_RETUSER); + set_thread_flag(TIF_IRET); +} +EXPORT_SYMBOL(mkia_clear_vm86); + +int +mkia_ftruncate_k(int fd, off_t length) +{ + return (int) sys_ftruncate(fd, (unsigned long) length); +} +EXPORT_SYMBOL(mkia_ftruncate_k); + +/* + * mkia_alloc_task_info() + */ +mkia_task_t * +mkia_alloc_task_info(void) +{ + mkia_task_t *mtip; + + mtip = (mkia_task_t *) kmalloc(sizeof(mkia_task_t), GFP_KERNEL); + if (mtip) + memset(mtip, 0, sizeof(mkia_task_t)); + return mtip; +} + +void +mkia_alloc_tss(void *cookie, int iob_size) +{ + struct desc_struct tssdesc; + unsigned short tbllimit; + int size; + struct tss_struct *ptss_table; + struct tss_struct *oldtss; + mkia_task_t *mtip = mkia_get_task_info((struct task_struct *) cookie); + + MKIA_ASSERT(mtip->mti_merge_tssp == NULL); + if ((iob_size < 0) || + (iob_size > 0x0ffff)) { + printk("mkia_alloc_tss: iob_size of 0x%x ignored\n", iob_size); + return; + } + oldtss = init_tss + smp_processor_id(); + tbllimit = MKI_TSS_LIMIT(iob_size); + size = PAGE_ALIGN(tbllimit + 1); + ptss_table = kmalloc(size,GFP_KERNEL); + MKIA_ASSERT(ptss_table != NULL); + + /* Make sure that the iobitmap is properly initialized */ + memcpy(ptss_table, oldtss, size); + ptss_table->ldt = (unsigned short) MKI_LDT_ENTRY; + ptss_table->io_bitmap_base = IO_BITMAP_OFFSET; + memset(ptss_table->io_bitmap, 0xff, (iob_size >> 3) + 1); + mtip->mti_merge_tssp = ptss_table; + tssdesc.a = (0xffff0000 & ((unsigned long) ptss_table << 16)) | + (0x0000ffff & (tbllimit)); + tssdesc.b = (0xff000000 & (unsigned long) ptss_table) | + (0x000f0000 & tbllimit) | (0x00008900) | + (0x000000ff & ((unsigned long) ptss_table >> 16)); + + if (mtip->mti_merge_gdtp == NULL) { + printk("mki error: gdtp not allocated and " + "mkia_alloc_tss called\n"); + } else { + mkia_internal_set_gdt_entry(MKI_TSS_DESC, + (unsigned long *) &tssdesc, mtip); + } +} + +#if defined(CONFIG_KDB) +#include +int mkia_assert_debugger = 1; +#else +int mkia_assert_debugger = 0; +#endif + +/* + * void + * mkia_enter_debugger(int reason, int error, struct pt_regs *regs) + */ +void +mkia_enter_debugger(int reason, int error, void *regs_arg) +{ +#ifdef CONFIG_KDB + struct pt_regs *regs = regs_arg; + + /* + * Enter the debugger that is currently in use, if any. + */ + switch (reason) { + case 1: /* Called with error and regs values */ + kdb(KDB_REASON_DEBUG, error, regs); + break; + default: + /*FALLSTHRU*/ + case 0: /* Called from an "assert" or some other place that + * does not have an error code or regs associated. + */ + if (in_interrupt()) { + kdb(KDB_REASON_CALL, 0, 0); + } else { + KDB_ENTER(); + } + break; + } +#else + (void) printk("mkia_enter_debugger: no debugger available\n"); +#endif +} +EXPORT_SYMBOL(mkia_enter_debugger); + +void +mkia_assert_failure(char *exprstr, char *filename, int linenum) +{ + /* + * Note that we make this a separate function so that + * we can put a breakpoint here to trace the problems. + */ + mkia_preempt_disable(); + (void) printk(KERN_ERR + "MKI Assertion \"%s\" failed: file \"%s\", line %d\n", + exprstr, filename, linenum); + if (mkia_assert_debugger) { + mkia_enter_debugger(0, 0, 0); + } else { + panic("MKI Assertion \"%s\" failed: file \"%s\", line %d\n", + exprstr, filename, linenum); + } + mkia_preempt_enable_nosched(); +} + +mkia_64bit_t +mkia_virt_to_phys(void *kaddr) +{ + unsigned long vaddr; + pte_t *pte; + pte_t entry; + + if (kaddr < high_memory) + return virt_to_phys(kaddr); + + vaddr = (unsigned long) kaddr; + pte = pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr); + if (pte == NULL) + return (mkia_64bit_t) -1; + + entry = *pte; + if (! pte_present(entry)) + return (mkia_64bit_t) -1; + + return (mkia_64bit_t) (pte_val(entry) & PAGE_MASK); +} +EXPORT_SYMBOL(mkia_virt_to_phys); + +mkia_64bit_t * +mkia_get_pagedir(void) +{ + struct mm_struct *mm; + + mm = current->mm; + return (mkia_64bit_t *) mm->pgd; +} +EXPORT_SYMBOL(mkia_get_pagedir); + +unsigned long +mkia_mmap_k(unsigned long addr, size_t len, int prot, int flags, + int filedes, off_t offset, int *errnop) +{ + struct file * file = NULL; + int error = -EFAULT; + + if ((prot & PROT_USER) == 0) { + printk("mmap_k: Non-USER mapping requested\n"); + } + + if (!(flags & MAP_ANONYMOUS)) { + error = -EBADF; + file = fget(filedes); + if (!file) + goto out; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + + down_write(¤t->mm->mmap_sem); + error = do_mmap(file, addr, len, prot, flags, offset); + if (file) + fput(file); + up_write(¤t->mm->mmap_sem); + +out: + if ((error < 0) && (error > -4096)) { + *errnop = error; + error = -1; + } else { + *errnop = 0; + } + return ((unsigned long) error); +} +EXPORT_SYMBOL(mkia_mmap_k); + +int +mkia_munmap_k(unsigned long addr, size_t len) +{ + return sys_munmap(addr, len); +} +EXPORT_SYMBOL(mkia_munmap_k); + +unsigned long +mkia_mprotect_k(unsigned long addr, size_t len, int prot) +{ + return sys_mprotect(addr, len, prot); +} +EXPORT_SYMBOL(mkia_mprotect_k); + +void * +mkia_get_file_cookie(int filedes) +{ + struct file *file; + + file = fget(filedes); + return file; +} +EXPORT_SYMBOL(mkia_get_file_cookie); + +void mkia_put_file_cookie(void *farg) +{ + struct file *file; + + if ((file = (struct file *) farg) != NULL) + fput(file); +} +EXPORT_SYMBOL(mkia_put_file_cookie); + +mkia_64bit_t mkia_add_page_ref(unsigned long vaddr) +{ + struct vm_area_struct *vma; + mkia_64bit_t retval; + struct mm_struct *mm; + int page_pres; + int tries; + pgd_t *pgd; + pmd_t *pmd; + pte_t *ptep; + pte_t entry; + + vaddr &= ~(PAGE_SIZE - 1); + mm = current->mm; + retval = 0; + + down_write(&mm->mmap_sem); + vma = find_vma(mm, vaddr); + if (!vma) { + printk("mkia_add_page_ref: find_vma failed for %lx\n", vaddr); + goto mki_add_page_ref_done; + } + + entry = __pte(0); + page_pres = 0; + ptep = NULL; + for (tries = 0; tries < 2; tries++) { + spin_lock(&mm->page_table_lock); + pgd = pgd_offset(vma->vm_mm, vaddr); + pmd = pmd_alloc(mm, pgd, vaddr); + if (pmd && ((ptep = pte_alloc_map(mm, pmd, vaddr)) != 0)) { + entry = *ptep; + page_pres = (pte_present(entry) && pte_write(entry)); + if (page_pres) { + /* break with the page_table_lock held! */ + break; + } + } + spin_unlock(&mm->page_table_lock); + if (ptep) { + pte_unmap(ptep); + ptep = NULL; + } + make_pages_present(vaddr, vaddr + PAGE_SIZE); + } + if (! page_pres) { + printk("mkia_add_page_ref: couldn't make %lx present\n", vaddr); + goto mki_add_page_ref_done; + } + MKIA_ASSERT(pfn_valid(pte_pfn(entry))); + get_page(pte_page(entry)); + spin_unlock(&mm->page_table_lock); + if (ptep) { + pte_unmap(ptep); + ptep = NULL; + } + atomic_inc(&mkia_context_rss); + retval = pte_val(entry); + +mki_add_page_ref_done: + up_write(&mm->mmap_sem); + return retval; +} +EXPORT_SYMBOL(mkia_add_page_ref); + +int mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte, + void *file_cookie, off_t offset) +{ + struct page *pageinfop; + unsigned long physaddr; + struct file *file; + int retcode; + pte_t pte; + + retcode = 0; + physaddr = physpte & PAGE_MASK; + offset &= PAGE_MASK; + file = (struct file *) file_cookie; + MKIA_ASSERT(file != NULL); + if (file == NULL) { + /* Just in case: handle it gracefully */ + goto rem_page_ref_done; + } + + pte = *((pte_t *)&physpte); + if (!pfn_valid(pte_pfn(pte))) + goto rem_page_ref_done; + + pageinfop = pte_page(pte); + if (PageReserved(pageinfop)) { + mkia_cnt_rpr_pagereserved++; + goto rem_page_ref_free; + } + + MKIA_ASSERT(pageinfop->mapping->host != NULL); + MKIA_ASSERT(file->f_dentry->d_inode == pageinfop->mapping->host); + MKIA_ASSERT(pageinfop->index == (unsigned long)(offset >> PAGE_SHIFT)); + + if ((physpte & (_PAGE_DIRTY | _PAGE_ACCESSED)) == 0) { + /* + * If this is a clean mapping, i.e. no mod or acc bits, + * then we can just decrement the page use counts and + * be done with it! + */ + mkia_cnt_rpr_not_dirty_acc++; + } + if (physpte & _PAGE_ACCESSED) { + mkia_cnt_rpr_accessed++; + flush_tlb_page(find_vma(current->mm,vaddr), vaddr); + } + if (physpte & _PAGE_DIRTY) { + mkia_cnt_rpr_dirty++; + set_page_dirty(pageinfop); + } + +rem_page_ref_free: + MKIA_ASSERT(page_count(pageinfop) >= 2); + put_page(pageinfop); + atomic_dec(&mkia_context_rss); + MKIA_ASSERT(atomic_read(&mkia_context_rss) >= 0); + +rem_page_ref_done: + return retcode; +} +EXPORT_SYMBOL(mkia_remove_page_ref); + +/* + * int + * mki_uaddr_mapped(unsigned long address) + * + * returns: + * 0 if the address is not mapped + * 1 if the address is mapped + */ +int +mkia_uaddr_mapped(unsigned long address) +{ + struct mm_struct *mm; + struct vm_area_struct * vma; + + mm = current->mm; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + up_read(&mm->mmap_sem); + + return (vma != NULL); +} + +/* + * int + * mki_upageflt(unsigned long address, int error_code) + * + * returns: + * 0 if the pagefault could not be resolved + * 1 if the pagefault could be resolved + */ +int +mkia_upageflt(unsigned long address, int error_code) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + int write; + + tsk = current; + mm = tsk->mm; + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto return_fail; + + down_read(&mm->mmap_sem); + + vma = find_vma(mm, address); + if (!vma) + goto up_and_fail; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto up_and_fail; + + /* + * The do_page_fault() code handles stack growth here but + * we don't do this because we don't have a standard process + * whose stack we can grow. The emulator has a fixed sized + * stack and it's not our job to grow the windows stacks. + */ + +good_area: + write = 0; + switch (error_code & 3) { + default: /* 3: write, present */ + /* fall through */ + case 2: /* write, not present */ + if (!(vma->vm_flags & VM_WRITE)) + goto up_and_fail; + write++; + break; + case 1: /* read, present */ + goto up_and_fail; + case 0: /* read, not present */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto up_and_fail; + } + + + switch (handle_mm_fault(mm, vma, address, write)) { + case VM_FAULT_MINOR: + tsk->min_flt++; + break; + case VM_FAULT_MAJOR: + tsk->maj_flt++; + break; + default: + goto up_and_fail; + } + + up_read(&mm->mmap_sem); + return 1; + +up_and_fail: + up_read(&mm->mmap_sem); + return 0; + +return_fail: + return 0; +} +EXPORT_SYMBOL(mkia_upageflt); + +void +mkia_adjust_esp0(int numlongs) +{ + struct tss_struct *tss; + struct task_struct *curr=current; + mkia_task_t *mtip; + + tss = init_tss + get_cpu(); + tss->esp0 -= (numlongs * sizeof(unsigned long)); + curr->thread.esp0 = tss->esp0; + put_cpu(); + + set_thread_flag(TIF_MKI_RETUSER); + + mtip = mkia_get_task_info(curr); + + /* This should always be path where we do the + * mkia_alloc_descriptors() + */ + if (likely(!(mtip->mti_flags & MKIF_DESC_ALLOCATED))) { + mkia_alloc_descriptors(curr); + } + +} +EXPORT_SYMBOL(mkia_adjust_esp0); + +int +mkia_get_current_task_index(void) +{ + return current->pid; +} + + +/* + * void mkia_wakeup() + * + * Synchronization variable wakeup. + */ +void +mkia_wake_up(void *wqp) +{ + wake_up((wait_queue_head_t *) wqp); +} +EXPORT_SYMBOL(mkia_wake_up); + + +/* + * int mkia_call_svwait() + * + * Synchronization variable wait. + * + * This must be implemented on the mki side of the world because + * write_lock_irqsave() compiles differently if __SMP__ is defined. + * The trick here (and why we can't just use sleep_on() or + * interruptible_sleep_on()) is that we need to give up the lock + * AFTER we have set the task state and added ourselves to the wait + * queue. + * + * This function is called indirectly (its address is obtained via + * mkia_getparm(). This is so that a version 2 win4lin can still run + * with a version 1 mki, even though version 1 mki does not support + * a mkia_sv_wait() function. + */ +int +mkia_call_svwait(void *wq, volatile int *lockp, int interruptible) +{ + struct task_struct *taskp; + wait_queue_t wait; + unsigned long wqflags; + int retval; + +#ifdef CONFIG_PREEMPT + MKIA_ASSERT(mkia_get_preempt_count() != 0) ; +#endif + taskp = current; + init_waitqueue_entry(&wait, taskp); + if (interruptible) { + taskp->state = TASK_INTERRUPTIBLE; + } else { + taskp->state = TASK_UNINTERRUPTIBLE; + } + + /* SLEEP_ON_HEAD start */ + spin_lock_irqsave(&((wait_queue_head_t *)wq)->lock, wqflags); + __add_wait_queue((wait_queue_head_t *)wq, &wait); + spin_unlock(&((wait_queue_head_t *) wq)->lock); + /* SLEEP_ON_HEAD end */ + + *lockp = 0; /* Unlock */ + mkia_preempt_enable_nosched(); + schedule(); + + /* SLEEP_ON_TAIL start */ + spin_lock_irq(&((wait_queue_head_t *) wq)->lock); + __remove_wait_queue((wait_queue_head_t *) wq, &wait); + spin_unlock_irqrestore(&((wait_queue_head_t *) wq)->lock, wqflags); + /* SLEEP_ON_TAIL end */ + + taskp->state = TASK_RUNNING; + + while (taskp->flags & PF_FREEZE) { +#ifdef PF_IOTHREAD + /* Call with PF_IOTHREAD to flush signals */ + refrigerator(PF_IOTHREAD); +#else + /* Call with PF_NOFREEZE to flush signals */ + refrigerator(PF_NOFREEZE); +#endif /* PF_IOTHREAD */ + } + + if (interruptible) { + retval = (signal_pending(taskp)) ? 1 : 0; + } else { + retval = 0; + } + return retval; +} +EXPORT_SYMBOL(mkia_call_svwait); + +int +mkia_install_hook(int id, int (*hook_fn)(void *)) +{ + if ((id >= 0) && (id < NUM_HOOKS)) { + mhia_table[id] = hook_fn; + if (id == SWITCH_TO) { + /* + * If we are removing the SWITCH_TO hook, then + * merge is loading so make sure we have a + * private IDT + */ + if (! mkia_alloc_idt()) + return -1; + } + return 0; + } + return -1; +} +EXPORT_SYMBOL(mkia_install_hook); + +void +mkia_remove_hook(int id) +{ + /* + * For now all the dummy hooks return the same value. + * If we ever add hooks where mhia_void() is not appropriate + * we need to change the code below to a switch() {} statement + */ + MKIA_ASSERT(mhia_table != NULL); + if ((id >= 0) && (id < NUM_HOOKS)) { + mhia_table[id] = mhia_void; + if (id == SWITCH_TO) { + /* + * If we are removing the SWITCH_TO hook, then + * merge is unloading so clean up the IDT as well. + */ + mkia_cleanup_idt(); + } + } +} +EXPORT_SYMBOL(mkia_remove_hook); diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/mki-adapter26.h mki-adapter26-new/arch/i386/mki-adapter26/mki-adapter26.h --- mki-adapter26-old/arch/i386/mki-adapter26/mki-adapter26.h 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/mki-adapter26.h 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,168 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: mki-adapter26.h,v 1.2 2003/11/01 00:40:45 rwb Exp $ + *************************************************************************** + */ +#ifndef MKI_ADAPTER_H +#define MKI_ADAPTER_H + +#ifndef PROT_USER +#define PROT_USER 0x8 /* page can not be accessed */ +#endif + +/**************/ +/* structures */ +/**************/ +struct mergevma { + unsigned long offset; + unsigned long vm_start; + unsigned long vm_end; + unsigned long nbytes; + unsigned long npages; + unsigned long page_size; + /* ... */ +}; + +struct pt_regs; + +/**************/ +/* functions */ +/**************/ +extern int mkia_process_owns_fpu(void *vm86p); + +extern void * mkia_kmalloc(int size, int flags); +extern void * mkia_get_free_pages(int foo, int flags); +/* values for flags */ +#define MKIA_SLEEP 0x0001 +#define MKIA_NOSLEEP 0x0002 +#define MKIA_DMA 0x0004 +#define MKIA_ZERO 0x0100 + +extern void mkia_kfree(void *pointer); +extern void mkia_free_pages(unsigned long addr, int size); + +extern void * mkia_vmalloc(int size); +extern void mkia_vfree(void *pointer); + +extern int mkia_strncmp(char *s1, char *s2, int len); +extern int mkia_strcmp(char *s1, char *s2); +extern void * mkia_strncpy(char *s1, char *s2, int len); +extern void * mkia_strcpy(char *s1, char *s2); +extern void mkia_memset(void *mem, int val, int len); +extern void mkia_memcpy(void *mem, void *mem2, int len); + + +extern int mkia_register_chrdev(int maj, char * name, void *fops); +extern void mkia_unregister_chrdev(int major, char * name); + +extern int mkia_get_fops_size(void); +extern void * + mkia_alloc_file_ops(void *read, void *write, void *readdir, + void *poll, void *ioctl, void *mmap, void *open, + void *release); +extern void mkia_free_file_ops(void *fops); + +extern void * mkia_mrgioctl_unlock_kernel(void); +extern void mkia_mrgioctl_lock_kernel(void *cookie); +extern int mkia_remap_page_range(unsigned long user_addr, + unsigned long phys_addr, + unsigned long size, + void *vma); +extern unsigned long mkia_get_vma_offset(void *vmap); +extern unsigned long mkia_get_vma_page_prot(void *vmap); +extern unsigned long mkia_get_vma_vm_start(void *vmap); +extern unsigned long mkia_get_vma_vm_end(void *vmap); +extern int mkia_get_inode_minor(void *inode); +extern int mkia_get_file_minor(void *file); + +extern int mkia_install_hook(int id, int (*hook_fn)(void *)); +extern void * mkia_alloc_waitqueuep(void); +extern void mkia_free_waitqueuep(void *wqp); +extern void * mkia_current(void); +extern int mkia_signal_pending(void *foo); +extern void * mkia_get_current_task(void); +extern int mkia_get_current_pid(void); +extern int mkia_get_current_task_index(void); +extern int mkia_call_svwait(void *wqp, volatile int *lockp, + int interruptible); +extern void mkia_wake_up(void *wqp); +extern void mkia_poll_wait(void *file, void *wqp, + void *wait); +extern void mkia_poll_wake(void *wqp); +extern void mkia_adjust_esp0(int numlongs); +extern void mkia_mod_inc_use_count(void * module); +extern void mkia_mod_dec_use_count(void * module); +extern int mkia_request_irq(int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long foo, char *bar, void *baz); +extern void mkia_free_irq(int irq, void *baz); +extern void mkia_kill_proc(int procref, int sig, int flag); +extern void mkia_file_set_private_data(void * filp, void *data); +extern void * mkia_file_get_private_data(void * filp); +extern void * mkia_file_get_f_pos_addr(void * filp); +extern long mkia_file_get_f_mode(void * filp); +extern int mkia_is_file_nonblock(void *file); +extern int mkia_copy_to_user(void *dest, void *src, int len); +extern int mkia_copy_from_user(void *dest, void *src, int len); +extern int mkia_get_user(void *data, int len); +extern int mkia_put_user(unsigned long flags, void *data, int len); + +extern int mkia_init_vnetint(void); +extern void mkia_cleanup_vnetint(void); +extern int mkia_populate_mrgvma(struct mergevma *mrgvma, void * vmap); + +extern int timeout(void (*timeout_func)(void *), + void *timeout_arg, long ticks); +extern void untimeout(int id); + +extern void mkia_post_event(void *cookie); +extern int mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry); +extern int mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry); +extern int mkia_check_vm86(void); +extern void * mkia_get_vm86p(void); +extern void mkia_set_vm86p(void *vm86p); +extern void mkia_mark_vm86(void); +extern void mkia_clear_vm86(void); +extern void mkia_set_idt_entry(unsigned short vect_num, + unsigned long *new_entry, unsigned long *prev_entry); +extern void mkia_set_idt_dpl(void); +extern void mkia_enter_debugger(int reason, int error, void *regs); +extern void mkia_post_event(void *cookie); +extern void * mkia_getparm(int request, void *parm); +extern int mkia_set_private_ldt(void *ldtp, size_t limit); +extern void mkia_pgfault_get_state(int *pfault_ok, void *fcstate); +extern void mkia_pgfault_restore_state(void *fcstate); +extern void mkia_yield(void); +extern void mkia_remove_hook(int id); + +extern mkia_64bit_t mkia_virt_to_phys(void *kaddr); +extern mkia_64bit_t * mkia_get_pagedir(void); +extern int mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte, + void *file_cookie, off_t offset); +extern mkia_64bit_t mkia_add_page_ref(unsigned long vaddr); + +extern void * mkia_get_file_cookie(int filedes); +extern void mkia_put_file_cookie(void *farg); +extern int mkia_upageflt(unsigned long address, int error_code); +extern void * mkia_alloc_priv_tss(void); +extern unsigned long mkia_mmap_k(unsigned long, size_t, + int, int, int, off_t, int *); +extern unsigned long mkia_mprotect_k(unsigned long, size_t, int); +extern int mkia_munmap_k(unsigned long, size_t); +extern int mkia_ftruncate_k(int, off_t); +extern void mhia_switch(void *, void *); +extern void mhia_switch_to(void *, void *); +extern void mhia_switch_away(void *, void *); +extern void mhia_ret_user(void *, void *); +extern void mhia_exit(void *, void *); +extern void mhia_swap(void *, void *); + +extern int printk(const char * fmt, ...); +extern void panic(const char * fmt, ...); + +#endif /* MKI_ADAPTER_H */ + diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/mkifunc.h mki-adapter26-new/arch/i386/mki-adapter26/mkifunc.h --- mki-adapter26-old/arch/i386/mki-adapter26/mkifunc.h 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/mkifunc.h 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,131 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: mkifunc.h,v 1.2 2003/11/01 00:40:45 rwb Exp $ + *************************************************************************** + * Function declarations and constants of the Linux specific Merge/Kernel + * Interface (mki). + */ + +#ifndef MKIFUNC_H +#define MKIFUNC_H + +/* take care of some types if not defined. */ +#ifndef _LINUX_TYPES_H +#include +#endif + +typedef unsigned long long mkia_64bit_t; + +#define MKI_END_USER_ADDR 0xC0000000 + +/* mki_getparm() parameter identifiers */ +#define PARM_POST_COOKIE 0 /* pointer to current LWP (lwp_t) */ +#define PARM_FRAME_BASE 1 /* stack frame base */ +#define PARM_CPU_TYPE 2 /* pointer to CPU type */ +#define PARM_PRIVATE 3 /* pointer to the location of a scratch + memory pointer */ +#define PARM_GDTP 4 /* pointer to current GDT for this LWP */ +#define PARM_LDTP 5 /* pointer to current LDT for this LWP */ +#define PARM_IDTP 6 /* pointer to current IDT for this LWP */ +#define PARM_TSSP 7 /* pointer to current TSS for this LWP */ +#define PARM_RUID 8 /* real UID for this process */ +#define PARM_TASK_MASK 9 /* Old V1 request to get Task Mask */ +/* ===== End of MKI Version 1 supported calls ====== */ + +#define PARM_TASK_MASK_V2 10 /* New V2 request to get the Task Mask, i.e. */ + /* the value for esp to get to task struct */ +#define PARM_MKI_VERSION 11 /* MKI version number */ +#define PARM_NUM_CPUS 12 /* Number of CPUs present */ +#define PARM_MAX_NUMPROCS 13 /* Maximum number of tasks */ +#define PARM_CURPROC_INDEX 14 /* Index of current task */ +#define PARM_SVWAIT_FUNC 15 /* Address of _mki_sv_wait() routine */ +#define PARM_SVWAKEUP_FUNC 16 /* Address of _mki_sv_wakeup() routine */ +#define PARM_POLLWAKE_FUNC 17 /* Address of _mki_poll_wake() routine */ +#define PARM_PREEMPT_ENABLE 18 /* preemtion enable function */ +#define PARM_PREEMPT_DISABLE 19 /* preemtion disable function */ +#define PARM_PREEMPT_COUNT 20 /* preemtion count function */ +#define PARM_HZ 21 /* get HZ value */ + +/* + * merge function offset into the hook functions table. + */ +#define SWITCH_AWAY 0 +#define SWITCH_TO 1 +#define THREAD_EXIT 2 +#define RET_USER 3 +#define SIGNAL 4 +#define QUERY 5 +#define SWAP_PAGES 6 +#define NUM_HOOKS 7 + +/* + * Index values for the os dependent portion of mki_fault_catch_t + */ +#define MKI_FC_SIZE 6 + +#define FC_FAULT_EIP 0 +#define FC_SAVE_FS 1 + +typedef struct { + int mkifc_catching_user_fault; /* Boolean */ + int mkifc_os_dependent[MKI_FC_SIZE]; /* OS dependent state */ +} mki_fault_catch_t; + +/* MKI version of vaddr_t */ +typedef unsigned long mkiul_t; + +/* MCV: MKI Caller Version */ +#define MCV_MAGIC (0xfabc0000) +#define MCV_MAGIC_MASK (0xffff0000) +#define MCV_VERSION_MASK (0x0000ffff) +#define MCV_MAKE_VER(ver) (((ver) & MCV_VERSION_MASK) | MCV_MAGIC) + +/* + * mki_enable_context_cookies data type + * supports fast merge context switch + * see platinum MKI interface doc for details. + */ +typedef struct mki_ecc { + mkiul_t mkiecc_addr; /* context base addr */ + size_t mkiecc_len; /* context size */ + void * mkiecc_cookie;/* outarg */ +} mki_ecc_t; + +#define MKI_TSS_ENTRY (GDT_ENTRY_KERNEL_BASE - 2) +#define MKI_TSS_DESC (MKI_TSS_ENTRY << 3) +#define MKI_LDT_ENTRY (GDT_ENTRY_KERNEL_BASE - 1) +#define MKI_LDT_DESC (MKI_LDT_ENTRY << 3) +#define MKI_CLEAR_GDT_AMOUNT MKI_TSS_DESC + +/* MKI info structure */ +struct mki_task_info { + void *mti_vm86p; + void *mti_merge_gdtp; + void *mti_save_ldtp; + int mti_save_ldt_size; + void *mti_merge_ldtp; + void *mti_merge_tssp; + void *mti_current_ldtp; + size_t mti_current_ldt_limit; + size_t mti_current_tss_limit; + unsigned char mti_flags; + unsigned char mti_event_pending; + int mti_fs; + int mti_gs; +}; + +typedef struct mki_task_info mkia_task_t; + +/* defines for the mki_task_flags field */ +#define MKIF_MARKED 0x01 +#define MKIF_SETLDT_DONE 0x02 +#define MKIF_DESC_ALLOCATED 0x04 +#define MKIF_GDT_SELECTOR_ADDED 0x08 +#define MKIF_TASK_CLEARED 0x10 +#define MKIF_IN_SWITCH 0x20 + +#endif /* MKIFUNC_H */ diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/mki-main.c mki-adapter26-new/arch/i386/mki-adapter26/mki-main.c --- mki-adapter26-old/arch/i386/mki-adapter26/mki-main.c 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/mki-main.c 2004-08-23 16:41:19.000000000 -0400 @@ -0,0 +1,717 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: mki-main.c,v 1.10 2004/08/23 20:41:19 lreiter Exp $ + *************************************************************************** + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mkifunc.h" +#include "mki-adapter26.h" + +static const char mkia_version[] = "1.3.7"; + +#ifndef MODULE +#error "mki-adapter must be done as a module" +#endif + +#ifdef MODULE_LICENSE +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("NeTraverse"); +MODULE_DESCRIPTION("NeTraverse MKI Adapter"); +#endif + +int +mkia_process_owns_fpu(void *vm86p) +{ + struct thread_info *thread = current_thread_info(); + return ((thread->status & TS_USEDFPU) != 0); +} +EXPORT_SYMBOL(mkia_process_owns_fpu); + +int +mkia_register_chrdev(int maj, + char *name, + void *fops) +{ + return register_chrdev(maj, name, (struct file_operations *) fops); +} +EXPORT_SYMBOL(mkia_register_chrdev); + +void +mkia_unregister_chrdev(int major, char * name) +{ + unregister_chrdev(major, name); +} +EXPORT_SYMBOL(mkia_unregister_chrdev); + + +/* + * populate a mergevma struct from a vmarea + */ +int +mkia_populate_mrgvma(struct mergevma *mrgvma, void * vmap) +{ + mrgvma->offset = + (((struct vm_area_struct *) vmap)->vm_pgoff << PAGE_SHIFT); + + mrgvma->vm_start = ((struct vm_area_struct *) vmap)->vm_start; + mrgvma->vm_end = ((struct vm_area_struct *) vmap)->vm_end; + mrgvma->nbytes = + (((mrgvma->vm_end - mrgvma->vm_start) + + PAGE_SIZE - 1) & PAGE_SIZE); + mrgvma->npages = mrgvma->nbytes >> PAGE_SHIFT; + mrgvma->page_size = PAGE_SIZE; + return 0; +} +EXPORT_SYMBOL(mkia_populate_mrgvma); + +int +mkia_remap_page_range(unsigned long user_addr, + unsigned long phys_addr, + unsigned long size, + void *vma) +{ + return remap_page_range( + (struct vm_area_struct *) vma, + user_addr, + phys_addr, + size, + ((struct vm_area_struct *) vma)->vm_page_prot); +} +EXPORT_SYMBOL(mkia_remap_page_range); + +void * +mkia_alloc_file_ops(void *read, + void *write, + void *readdir, + void *poll, + void *ioctl, + void *mmap, + void *open, + void *release) +{ + struct file_operations *ret=kmalloc(sizeof(struct file_operations), + GFP_ATOMIC); + if (!ret) + return ret; + + memset(ret, 0, sizeof(struct file_operations)); + + ret->read = read; + ret->write = write; + ret->readdir = readdir; + ret->poll = poll; + ret->ioctl = ioctl; + ret->mmap = mmap; + ret->open = open; + ret->release = release; + return ret; +} +EXPORT_SYMBOL(mkia_alloc_file_ops); + +void +mkia_free_file_ops(void * fops) +{ + kfree(fops); +} +EXPORT_SYMBOL(mkia_free_file_ops); + +int +mkia_get_inode_minor(void *inode) +{ + return MINOR(((struct inode *) inode)->i_rdev); +} +EXPORT_SYMBOL(mkia_get_inode_minor); + +int +mkia_get_file_minor(void *file) +{ + return MINOR(((struct file *)file)->f_dentry->d_inode->i_rdev); +} +EXPORT_SYMBOL(mkia_get_file_minor); + +void * +mkia_get_free_pages(int pages, int flags) +{ + int fpflags=0; + unsigned long ret; + if (flags & MKIA_NOSLEEP) fpflags |= GFP_ATOMIC; + if (flags & MKIA_SLEEP) fpflags |= GFP_KERNEL; + if (flags & MKIA_DMA) fpflags |= GFP_DMA; + ret = __get_free_pages(fpflags, pages); + if (ret && (flags & MKIA_ZERO)) + memset((void *) ret, 0, PAGE_SIZE * (pages + 1)); + return (void *) ret; +} +EXPORT_SYMBOL(mkia_get_free_pages); + +void +mkia_free_pages(unsigned long addr, int size) +{ + free_pages(addr, size); +} +EXPORT_SYMBOL(mkia_free_pages); + +#ifdef CONFIG_PREEMPT +/* + * This is set to 0 on the first call to mkia_getparm if the drivers + * know about preempt + */ +int mkia_disable_kmalloc = 1; +#else +int mkia_disable_kmalloc = 0; +#endif + +void * +mkia_kmalloc(int size, int flags) +{ + int fpflags; + unsigned long ret; + + fpflags=0; + if (mkia_disable_kmalloc) { + printk("mki-adapter: trying to run a non-preemption " + "capable Win4Lin on a preemptible kernel\n"); + return NULL; + } + + if (flags & MKIA_NOSLEEP) fpflags |= GFP_ATOMIC; + if (flags & MKIA_SLEEP) fpflags |= GFP_KERNEL; + if (flags & MKIA_DMA) fpflags |= GFP_DMA; + ret = (unsigned long) kmalloc(size, fpflags); + if (ret && (flags & MKIA_ZERO)) + memset((void *) ret, 0, size); + return (void *) ret; +} +EXPORT_SYMBOL(mkia_kmalloc); + +int +mkia_signal_pending(void *foo) +{ + return signal_pending((struct task_struct *) foo); +} + +void * +mkia_current() +{ + return current; +} + + +void +mkia_mod_inc_use_count(void * module) +{ + (void) try_module_get(module); +} +EXPORT_SYMBOL(mkia_mod_inc_use_count); + +void +mkia_mod_dec_use_count(void * module) +{ + (void) module_put(module); +} +EXPORT_SYMBOL(mkia_mod_dec_use_count); + +int +mkia_get_current_pid() +{ + return current->pid; +} +EXPORT_SYMBOL(mkia_get_current_pid); + +void * +mkia_get_current_task() +{ + return current; +} +EXPORT_SYMBOL(mkia_get_current_task); + +void +mkia_kill_proc(int procref, int sig, int foo) +{ + kill_proc(procref, sig, foo); +} +EXPORT_SYMBOL(mkia_kill_proc); + +void +mkia_file_set_private_data(void * filp, void *data) +{ + ((struct file *) filp)->private_data = data; +} +EXPORT_SYMBOL(mkia_file_set_private_data); + +void * +mkia_file_get_private_data(void * filp) +{ + return ((struct file *) filp)->private_data; +} +EXPORT_SYMBOL(mkia_file_get_private_data); + +void * +mkia_file_get_f_pos_addr(void * filp) +{ + return &(((struct file *) filp)->f_pos); +} +EXPORT_SYMBOL(mkia_file_get_f_pos_addr); + +long +mkia_file_get_f_mode(void * filp) +{ + return ((struct file *) filp)->f_mode; +} +EXPORT_SYMBOL(mkia_file_get_f_mode); + +int +mkia_is_file_nonblock(void *file) +{ + return ((((struct file *)file)->f_flags & O_NONBLOCK) != 0); +} +EXPORT_SYMBOL(mkia_is_file_nonblock); + +int +mkia_copy_to_user(void *dest, void *src, int len) +{ + return copy_to_user(dest, src, len); +} +EXPORT_SYMBOL(mkia_copy_to_user); + +int +mkia_copy_from_user(void *dest, void *src, int len) +{ + return copy_from_user(dest, src, len); +} +EXPORT_SYMBOL(mkia_copy_from_user); + +int +mkia_request_irq(int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long foo, char *bar, void *baz) +{ + return request_irq(irq, (void *)handler, foo, bar, baz); +} +EXPORT_SYMBOL(mkia_request_irq); + +void +mkia_free_irq(int irq, void *baz) +{ + free_irq(irq, baz); +} +EXPORT_SYMBOL(mkia_free_irq); + +void +mkia_kfree(void *pointer) +{ + kfree(pointer); +} +EXPORT_SYMBOL(mkia_kfree); + +void * +mkia_vmalloc(int size) +{ + return vmalloc(size); +} +EXPORT_SYMBOL(mkia_vmalloc); + +void +mkia_vfree(void *pointer) +{ + vfree(pointer); +} +EXPORT_SYMBOL(mkia_vfree); + + +int +mkia_get_user(void * ptr, int len) +{ + if (len == sizeof(unsigned char)) { + unsigned char flags; + if (get_user(flags, (unsigned char *) ptr)) + return -1; + else + return flags; + } + if (len == sizeof(unsigned short)) { + unsigned short flags; + if (get_user(flags, (unsigned short *) ptr)) + return -1; + else + return flags; + } + if (len == sizeof(unsigned long)) { + unsigned long flags; + if (get_user(flags, (unsigned long *) ptr)) + return -1; + else + return flags; + } + return 0; +} +EXPORT_SYMBOL(mkia_get_user); + +int +mkia_put_user(unsigned long flags, void * ptr, int len) +{ + if (len == 4) return put_user((unsigned long) flags, + (unsigned long *) ptr); + if (len == 2) return put_user((unsigned short) flags, + (unsigned short *) ptr); + if (len == 1) return put_user((unsigned char) flags, + (unsigned char *) ptr); + return 0; +} +EXPORT_SYMBOL(mkia_put_user); + +int +mkia_strncmp(char *s1, char *s2, int len) +{ + return strncmp(s1,s2,len); +} + +int +mkia_strcmp(char *s1, char *s2) +{ + return strcmp(s1,s2); +} + +void * +mkia_strncpy(char *s1, char *s2, int len) +{ + return strncpy(s1,s2,len); +} +EXPORT_SYMBOL(mkia_strncpy); + +void * +mkia_strcpy(char *s1, char *s2) +{ + return strcpy(s1,s2); +} + +void +mkia_memset(void *mem, int val, int len) +{ + memset(mem, val, len); +} +EXPORT_SYMBOL(mkia_memset); + +void +mkia_memcpy(void *mem, void *mem2, int len) +{ + memcpy(mem, mem2, len); +} +EXPORT_SYMBOL(mkia_memcpy); + +void * +mkia_alloc_waitqueuep(void) +{ + void *wq = NULL; + + wq = kmalloc(sizeof(wait_queue_head_t), GFP_ATOMIC); + if (!wq) + return NULL; + memset(wq, 0, sizeof(wait_queue_head_t)); + init_waitqueue_head((wait_queue_head_t *) wq); + return wq; +} +EXPORT_SYMBOL(mkia_alloc_waitqueuep); + +void +mkia_free_waitqueuep(void *waitqp) +{ + kfree(waitqp); +} +EXPORT_SYMBOL(mkia_free_waitqueuep); + +rwlock_t _mkia_waitqueue_lock = RW_LOCK_UNLOCKED; + +void +mkia_sleep_on(void *wqp) +{ + sleep_on((wait_queue_head_t *) wqp); +} + +void +mkia_interruptible_sleep_on(void *wqp) +{ + interruptible_sleep_on((wait_queue_head_t *) wqp); +} + + +void +mkia_poll_wait(void *file, void *wqp, void *wait) +{ + poll_wait((struct file *) file, (wait_queue_head_t *) wqp, + (poll_table *) wait); +} +EXPORT_SYMBOL(mkia_poll_wait); + +void +mkia_poll_wake(void *wqp) +{ + wake_up_interruptible((wait_queue_head_t *) wqp); +} +EXPORT_SYMBOL(mkia_poll_wake); + +/* + * Routine to unlock the kernel in mrgioctl for some kernel versions since + * we use finer locking than linux does and we need the kernel lock to + * be free. + */ +void * +mkia_mrgioctl_unlock_kernel(void) +{ +#ifdef CONFIG_SMP + if (current->lock_depth > 0) { + unlock_kernel(); + return (void *) 1; + } +#endif + return NULL; +} +EXPORT_SYMBOL(mkia_mrgioctl_unlock_kernel); + +/* + * Routine to re-lock the kernel in mrgioctl for 2.4 after the ioctl so + * that linux is happy + */ +void +mkia_mrgioctl_lock_kernel(void * cookie) +{ +#ifdef CONFIG_SMP + if (cookie) + lock_kernel(); +#endif +} + +void +install_mhi_hooks(void) +{ + mhi_hooks[MKI_HOOK_RET_USER] = &mhia_ret_user; + mhi_hooks[MKI_HOOK_SWITCH_TO] = &mhia_switch_to; + mhi_hooks[MKI_HOOK_SWITCH_AWAY] = &mhia_switch_away; + mhi_hooks[MKI_HOOK_EXIT] = &mhia_exit; + mhi_hooks[MKI_HOOK_SWAP] = &mhia_swap; +} + +void +remove_mhi_hooks(void) +{ + mhi_hooks[MKI_HOOK_RET_USER] = &mhi_void_hook; + mhi_hooks[MKI_HOOK_SWITCH_TO] = &mhi_void_hook; + mhi_hooks[MKI_HOOK_SWITCH_AWAY] = &mhi_void_hook; + mhi_hooks[MKI_HOOK_EXIT] = &mhi_void_hook; + mhi_hooks[MKI_HOOK_SWAP] = &mhi_void_hook; +} + + +#ifdef CONFIG_PROC_FS + +char *mki_adapter_name = "mki-adapter"; + +struct mkia_proc_string_info { + char mpsi_name[32]; + struct proc_dir_entry *mpsi_proc_entry; + char mpsi_string[120]; + int mpsi_strlen; +} mkia_proc[] = { + { "mod-struct-info", 0, "", 0 }, +#define MKIA_PROC_MOD_STRUCT_INFO 0 + { "mod-version-info", 0, "", 0 }, +#define MKIA_PROC_MOD_VERSION_INFO 1 + { "version", 0, "", 0 }, +#define MKIA_PROC_VERSION 2 +}; + +#define NUM_MKIA_PROC (sizeof(mkia_proc) / sizeof(mkia_proc[0])) + +int mkia_proc_string_read(struct file *file, char __user *buf, + size_t size, loff_t *ppos) +{ + struct mkia_proc_string_info *mpsip; + int count; + int index; + int err; + + mpsip = (struct mkia_proc_string_info *) (file->private_data); + + /* NOTE: removed ppos test to make things run on 2.6.8 kernels */ + + index = (int) *ppos; + if (index > mpsip->mpsi_strlen) + return 0; + + count = mpsip->mpsi_strlen - index; + if (count > (int) size) + count = (int) size; + + err = copy_to_user(buf, &(mpsip->mpsi_string[index]), count); + if (err) + return err; + + *ppos += count; + return count; +} + +int +mkia_proc_struct_info_open(struct inode *inode, struct file *file) +{ + struct mkia_proc_string_info *msip; + msip = &mkia_proc[MKIA_PROC_MOD_STRUCT_INFO]; + file->private_data = (void *) msip; + return 0; +} + +static struct file_operations mkia_struct_info_fops = { + .owner = THIS_MODULE, + .open = mkia_proc_struct_info_open, + .read = mkia_proc_string_read, +}; + +int +mkia_proc_version_info_open(struct inode *inode, struct file *file) +{ + struct mkia_proc_string_info *msip; + msip = &mkia_proc[MKIA_PROC_MOD_VERSION_INFO]; + file->private_data = (void *) msip; + return 0; +} + +static struct file_operations mkia_version_info_fops = { + .owner = THIS_MODULE, + .open = mkia_proc_version_info_open, + .read = mkia_proc_string_read, +}; + +int +mkia_proc_version_open(struct inode *inode, struct file *file) +{ + struct mkia_proc_string_info *msip; + msip = &mkia_proc[MKIA_PROC_VERSION]; + file->private_data = (void *) msip; + return 0; +}; + +static struct file_operations mkia_version_fops = { + .owner = THIS_MODULE, + .open = mkia_proc_version_open, + .read = mkia_proc_string_read, +}; + +struct proc_dir_entry *mkia_proc_root = NULL; + +#endif /* CONFIG_PROC_FS */ + +void cleanup_module(void); + +int +init_module(void) +{ + install_mhi_hooks(); + +#ifdef CONFIG_PROC_FS +{ + struct mkia_proc_string_info *mpsip; + int i; + + mkia_proc_root = create_proc_entry(mki_adapter_name, + S_IFDIR | 0555, NULL); + + if (mkia_proc_root == NULL) + goto init_module_fail; + + for (i = 0, mpsip = mkia_proc; i < NUM_MKIA_PROC; i++, mpsip++) { + mpsip->mpsi_proc_entry = create_proc_entry(mpsip->mpsi_name, + S_IFREG | 0444, mkia_proc_root); + if (mpsip->mpsi_proc_entry == NULL) + goto init_module_fail; + switch (i) { + case MKIA_PROC_MOD_STRUCT_INFO: + snprintf(mpsip->mpsi_string, sizeof(mpsip->mpsi_string), + "4 0x%x 0x%x 0x%x 0x%x\n", sizeof(struct module), + (int) &(((struct module *) 0)->name[0]), + (int) &(((struct module *) 0)->init), +#ifdef CONFIG_MODULE_UNLOAD + (int) &(((struct module *) 0)->exit) +#else + -1 +#endif + ); + mpsip->mpsi_proc_entry->proc_fops = &mkia_struct_info_fops; + break; + + case MKIA_PROC_MOD_VERSION_INFO: + snprintf(mpsip->mpsi_string, sizeof(mpsip->mpsi_string), + "%s\n", VERMAGIC_STRING); + mpsip->mpsi_string[sizeof(mpsip->mpsi_string) - 1] = 0; + mpsip->mpsi_proc_entry->proc_fops = &mkia_version_info_fops; + break; + + case MKIA_PROC_VERSION: + snprintf(mpsip->mpsi_string, sizeof(mpsip->mpsi_string), + "%s\n", mkia_version); + mpsip->mpsi_string[sizeof(mpsip->mpsi_string) - 1] = 0; + mpsip->mpsi_proc_entry->proc_fops = &mkia_version_fops; + break; + + default: + printk("Win4Lin: init: unknown procinfo index\n"); + goto init_module_fail; + } + mpsip->mpsi_strlen = strlen(mpsip->mpsi_string); + } +} +#endif /* CONFIG_PROC_FS */ + + return 0; + + +init_module_fail: + printk("mki-adapter: init_module failed\n"); + cleanup_module(); + return -EINVAL; +} + +void +cleanup_module(void) +{ +#ifdef CONFIG_PROC_FS +{ + struct mkia_proc_string_info *mpsip; + int i; + + for (i = 0, mpsip = mkia_proc; i < NUM_MKIA_PROC; i++, mpsip++) { + if (mpsip->mpsi_proc_entry == NULL) + continue; + remove_proc_entry(mpsip->mpsi_name, mkia_proc_root); + mpsip->mpsi_proc_entry = NULL; + } + if (mkia_proc_root) { + remove_proc_entry(mki_adapter_name, NULL); + mkia_proc_root = 0; + } +} +#endif /* CONFIG_PROC_FS */ + remove_mhi_hooks(); +} diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/mkivnet.c mki-adapter26-new/arch/i386/mki-adapter26/mkivnet.c --- mki-adapter26-old/arch/i386/mki-adapter26/mkivnet.c 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/mkivnet.c 2004-06-19 01:47:55.000000000 -0400 @@ -0,0 +1,991 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: mkivnet.c,v 1.6 2004/06/19 05:47:55 lreiter Exp $ + *************************************************************************** + * This is part of the mki module that handles all of the linux specific + * structures and function calls for virtual network (vnet). + *************************************************************************** + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vneteth.h" +#include "vnetint-pub.h" +#include "mkifunc.h" + +//#define DBGVNET(str1) printk str1 +#define DBGVNET(str1) + +spinlock_t vnet_inter_lock = SPIN_LOCK_UNLOCKED; +#define DECLARELOCKSAVE(flags) unsigned long flags=0 +#define LOCKINTERCEPTOR(flags) spin_lock_irqsave(&vnet_inter_lock, flags) +#define UNLOCKINTERCEPTOR(flags) spin_unlock_irqrestore(&vnet_inter_lock, flags) + +static struct sk_buff *mkia_checksum_help(struct sk_buff *skb) +{ + unsigned int csum; + int offset = skb->h.raw - skb->data; + + if (offset > (int)skb->len) + BUG(); + csum = skb_checksum(skb, offset, skb->len-offset, 0); + + offset = skb->tail - skb->h.raw; + if (offset <= 0) + BUG(); + if (skb->csum + 2 > offset) + BUG(); + + *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + return skb; +} + +int +mkia_ntohs(int protocol) +{ + return ntohs(protocol); +} + + +int *mkia_vni_vnetdebug = 0; +void (*mkia_vni_logfunc)(char *, ...) = 0; + +extern unsigned short eth_type_trans(struct sk_buff *skb, struct net_device *dev); +int mkia_vnetint_incoming(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt); +int mkia_vnetint_outgoing(struct sk_buff *skb, struct net_device *dev); + +struct notifier_block mkia_nb; +int mkia_interceptor_count; +struct vnetint_ifdev *mkia_vnetint_ifdev_base; +struct vnetint_proto *mkia_vnetint_proto_base; +struct vnetint_pcb *mkia_vnetint_inter_base; + +/***************************/ +/* VNET interceptor code */ +/***************************/ + +/*********************************/ +/* dummy incoming packet handler */ +int +mkia_vnetint_dummy(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt) +{ + kfree_skb(skb); + return 0; +} + +struct vnetint_proto * +mkia_vnetint_find_proto(int type) +{ + struct vnetint_proto *vp; + + for (vp = mkia_vnetint_proto_base; vp != NULL; vp = vp->next) + if (vp->ptype->type == type) + break; + + return vp; +} + + +/* Add a protocol to the list. */ +struct vnetint_proto * +mkia_vnetint_add_proto(int protocol) +{ + int type = ntohs(protocol); + struct packet_type *pt, *node; + struct vnetint_proto *vp; + DECLARELOCKSAVE(flags); + + LOCKINTERCEPTOR(flags); + if ( (vp = mkia_vnetint_find_proto(type)) ) { + vp->count++; + UNLOCKINTERCEPTOR(flags); + return vp; + } + UNLOCKINTERCEPTOR(flags); + + if (!(vp = kmalloc(sizeof(struct vnetint_proto), GFP_KERNEL))) { + return NULL; + } + vp->real_recv = NULL; + + if (!(pt = kmalloc(sizeof(struct packet_type), GFP_KERNEL))) { + kfree(vp); + return NULL; + } + + pt->type = type; + pt->func = mkia_vnetint_dummy; + pt->dev = NULL; + + dev_add_pack(pt); + + LOCKINTERCEPTOR(flags); + for (node = container_of(pt->list.next, struct packet_type, list); + ((node != NULL) && (node != pt)); + node = container_of(node->list.next, struct packet_type, list)) { + if (node->type == type && node->dev == NULL) { + vp->ptype = node; + vp->real_recv = node->func; + node->func = mkia_vnetint_incoming; + DBGVNET(("vnetint_add_proto: found the real protocol " + "handler\n")); + break; + } + } + if ((node == NULL) || (node == pt)) { + pt->func = mkia_vnetint_incoming; + vp->ptype = pt; + } + vp->protocol = protocol; + vp->next = mkia_vnetint_proto_base; + vp->count = 1; + mkia_vnetint_proto_base = vp; + UNLOCKINTERCEPTOR(flags); + + /* Remove the dummy we added, if it was extra */ + if ((node != NULL) && (node != pt)) { + dev_remove_pack(pt); + kfree(pt); + } + + return vp; +} + +/* Remove a protocol from the list. */ +void +mkia_vnetint_rmv_proto(int protocol) +{ + struct vnetint_proto *vp, **pnode; + struct packet_type *pt, *node; + struct packet_type mypt; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_rmv_proto: protocol type = 0x%0x\n", protocol)); + + LOCKINTERCEPTOR(flags); + for (pnode = &mkia_vnetint_proto_base; (*pnode) != NULL; + pnode = &((*pnode)->next)) { + vp = (*pnode); + if (vp->protocol != protocol) + continue; + + vp->count--; + if (vp->count > 0) { + UNLOCKINTERCEPTOR(flags); + return; + } + *pnode = (*pnode)->next; + UNLOCKINTERCEPTOR(flags); + if (vp->real_recv != NULL) { + /* Check that the protocol is still registered */ + pt = &mypt; + pt->type = htons(protocol); + pt->func = mkia_vnetint_dummy; + pt->dev = NULL; + dev_add_pack(pt); + for (node = container_of(pt->list.next, + struct packet_type, list); + ((node != NULL) && (node != pt)); + node = container_of(node->list.next, + struct packet_type, list)) { + + if ((node == vp->ptype) && + (node->func == mkia_vnetint_incoming)) { + node->func = vp->real_recv; + DBGVNET(("vnetint_rmv_proto: restored " + "the real protocol handler\n")); + } + } + dev_remove_pack(pt); + } else if (vp->ptype) { + dev_remove_pack(vp->ptype); + kfree(vp->ptype); + } + kfree(vp); + return; + } + UNLOCKINTERCEPTOR(flags); + + DBGVNET(("vnetint_rmv_proto: protocol 0x%x not found.\n", protocol)); +} + +/* Find device from list - call with lock set */ +struct vnetint_ifdev * +mkia_vnetint_find_dev(char *ifname) +{ + struct vnetint_ifdev *vd; + + for (vd = mkia_vnetint_ifdev_base; vd != NULL; vd = vd->next) { + DBGVNET(("vnetint_find_dev: comparing %s with %s\n", vd->ifname, ifname)); + if (strcmp(vd->ifname, ifname) == 0) + break; + } + + return vd; +} + +/* FastFind device from list - call with lock set */ +struct vnetint_ifdev * +mkia_vnetint_fastfind_dev(struct net_device *dev) +{ + struct vnetint_ifdev *vd; + + for (vd = mkia_vnetint_ifdev_base; vd != NULL; vd = vd->next) + if (vd->dev == dev) + break; + + return vd; +} + +/* Add a device to the list. */ +struct vnetint_ifdev * +mkia_vnetint_add_dev(char *ifname) +{ + struct vnetint_ifdev *vd; + struct net_device *dev = dev_get_by_name(ifname); + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_add_dev: device = %s\n", ifname)); + + if (!dev) { + DBGVNET(("vnetint_add_dev: no such device %s\n", ifname)); + return NULL; + } + + LOCKINTERCEPTOR(flags); + if ( (vd = mkia_vnetint_find_dev(ifname)) ) { + vd->count++; + UNLOCKINTERCEPTOR(flags); + dev_put(dev); + return vd; + } + UNLOCKINTERCEPTOR(flags); + + if (!(vd = kmalloc(sizeof(struct vnetint_ifdev), GFP_KERNEL))) { + dev_put(dev); + return NULL; + } + + LOCKINTERCEPTOR(flags); + vd->dev = dev; + strncpy(vd->ifname, ifname, MAXIFNAMELEN); + vd->real_xmit = dev->hard_start_xmit; + dev->hard_start_xmit = mkia_vnetint_outgoing; + vd->next = mkia_vnetint_ifdev_base; + vd->count = 1; + mkia_vnetint_ifdev_base = vd; + UNLOCKINTERCEPTOR(flags); + + return vd; +} + + +/* Remove a device from the list. */ +void +mkia_vnetint_rmv_dev(char *ifname) +{ + + struct vnetint_ifdev *vd, **pnode; + struct net_device *ndp; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_rmv_dev: device = %s\n", ifname)); + + LOCKINTERCEPTOR(flags); + for (pnode = &mkia_vnetint_ifdev_base; (*pnode) != NULL; + pnode = &((*pnode)->next)) { + vd = (*pnode); + if (strcmp(vd->ifname, ifname) == 0) { + vd->count--; + if (vd->count > 0) { + UNLOCKINTERCEPTOR(flags); + return; + } + ndp = (struct net_device *) (vd->dev); + if ((ndp != NULL) && + (ndp->hard_start_xmit == mkia_vnetint_outgoing)) { + ndp->hard_start_xmit = vd->real_xmit; + } + *pnode = (*pnode)->next; + UNLOCKINTERCEPTOR(flags); + if (vd->dev != NULL) { + dev_put(vd->dev); + } + kfree(vd); + return; + } + } + UNLOCKINTERCEPTOR(flags); + + DBGVNET(("vnetint_rmv_dev: dev %s not found.\n", ifname)); +} + + + +/* Called when network device changes state */ +int +mkia_vnetint_dev_notifier(struct notifier_block *self, + unsigned long status, + void *ptr) +{ + + struct net_device *dev = (struct net_device *)ptr; + + struct vnetint_ifdev *vd; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_dev_notifier: Device %s changed state\n", dev->name)); + + LOCKINTERCEPTOR(flags); + switch (status) { + case NETDEV_CHANGE: + DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGE\n")); + break; + case NETDEV_UP: + DBGVNET(("vnetint_dev_notifier: NETDEV_UP\n")); + vd = mkia_vnetint_fastfind_dev(dev); + if (vd) { + vd->real_xmit = dev->hard_start_xmit; + dev->hard_start_xmit = mkia_vnetint_outgoing; + } + else { + DBGVNET(("vnetint_dev_notifier: " + "dev %s not found\n", dev->name)); + } + break; + case NETDEV_DOWN: + DBGVNET(("vnetint_dev_notifier: NETDEV_DOWN\n")); + vd = mkia_vnetint_fastfind_dev(dev); + if (vd) { + DBGVNET(("vnetint_dev_notifier: " + "found device\n")); + if (dev->hard_start_xmit == + mkia_vnetint_outgoing) { + DBGVNET(("vnetint_dev_notifier: " + "restore real_xmit\n")); + dev->hard_start_xmit = vd->real_xmit; + } + vd->real_xmit = NULL; + } + else { + DBGVNET(("vnetint_dev_notifier: " + "dev %s not found\n", dev->name)); + } + break; + case NETDEV_CHANGEMTU: + DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGEMTU\n")); + break; + case NETDEV_CHANGEADDR: + DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGEADDR\n")); + break; + case NETDEV_CHANGENAME: + DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGENAME\n")); + break; + case NETDEV_REGISTER: + DBGVNET(("vnetint_dev_notifier: NETDEV_REGISTER\n")); + vd = mkia_vnetint_find_dev(dev->name); + if (vd) { + vd->dev = dev_get_by_name(dev->name); + } + break; + case NETDEV_UNREGISTER: + DBGVNET(("vnetint_dev_notifier: NETDEV_UNREGISTER\n")); + vd = mkia_vnetint_find_dev(dev->name); + if (vd) { + vd->dev = NULL; + if (dev->hard_start_xmit == + mkia_vnetint_outgoing) { + dev->hard_start_xmit = vd->real_xmit; + } + vd->real_xmit = NULL; + dev_put(dev); + } + else { + DBGVNET(("vnetint_dev_notifier: " + "dev %s not found\n", dev->name)); + } + break; + default: + DBGVNET(("vnetint_dev_notifier: " + "Unknown device status\n")); + break; + } + UNLOCKINTERCEPTOR(flags); + + DBGVNET(("vnetint_dev_notifier: done\n")); + + return 0; +} + + +/* Find interceptor on list. - call with lock set */ +struct vnetint_pcb * +mkia_vnetint_find_inter(int protocol, char *ifname) +{ + struct vnetint_pcb *vi; + + for (vi = mkia_vnetint_inter_base; vi != NULL; vi = vi->next) { + if ((strncmp(vi->ifdev->ifname, ifname, MAXIFNAMELEN) == 0) && + (vi->proto->protocol == protocol)) { + break; + } + } + + return vi; +} + + +/* FastFind interceptor on list. - call with lock set */ +static inline struct vnetint_pcb * +mkia_vnetint_fastfind_inter(int type, struct net_device *dev) +{ + struct vnetint_pcb *vi; + + for (vi = mkia_vnetint_inter_base; vi != NULL; vi = vi->next) { + if ((vi->ifdev->dev == dev) && + (vi->proto->ptype->type == type)) { + break; + } + } + + return vi; +} + + +/* Add a an interceptor to the list */ +int +mkia_vnetint_add_interceptor(struct vnetint_filter *filter) +{ + + struct vnetint_pcb *vi; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_add_interceptor: device = %8s " + "protocol type = 0x%0x filter_func = 0x%p\n", filter->ifname, + filter->protocol, filter->intercept_func)); + + if (filter->intercept_func == NULL) { + return -EINVAL; + } + + if (mkia_interceptor_count == MAX_INTERCEPTORS) { + return -ENOENT; + } + + if ((vi = mkia_vnetint_find_inter(filter->protocol, filter->ifname))) { + /* + * An interceptor aready exists for this dev/proto pair. + * Let the caller have the interceptor reference but return + * EEXIST to warn that this interceptor was aready defined. + */ + LOCKINTERCEPTOR(flags); + vi->count++; /* Increment the interceptor usage count */ + filter->handle = vi; + UNLOCKINTERCEPTOR(flags); + return -EEXIST; + } + + if (!(vi = kmalloc(sizeof(struct vnetint_pcb), GFP_KERNEL))) { + return -ENOMEM; + } + + vi->ifdev = mkia_vnetint_add_dev(filter->ifname); + if (vi->ifdev == NULL) { + kfree(vi); + return -ENXIO; + } + + vi->proto = mkia_vnetint_add_proto(filter->protocol); + if (vi->proto == NULL) { + mkia_vnetint_rmv_dev(filter->ifname); + kfree(vi); + return -ENXIO; + } + + vi->filter_func = filter->intercept_func; + memset(&(vi->status), 0, sizeof(struct vnetint_status)); + vi->status.flags = 0; + memcpy(&(vi->status.PhysMacAddr), + ((struct net_device *) vi->ifdev->dev)->dev_addr, ETH_ALEN); + vi->status.flags |= VNETINTF_MACSET; + LOCKINTERCEPTOR(flags); + vi->next = mkia_vnetint_inter_base; + vi->count = 1; + mkia_vnetint_inter_base = vi; + mkia_interceptor_count++; + filter->handle = vi; + UNLOCKINTERCEPTOR(flags); + + return(0); +} + + +/* Remove an interceptor from the list */ +void +mkia_vnetint_rmv_interceptor(struct vnetint_filter *filter) +{ + + char *ifname; + int protocol; + struct vnetint_pcb *vi, **pnode; + DECLARELOCKSAVE(flags); + + if (filter == NULL) { + DBGVNET(("vnetint_rmv_interceptor: remove all interceptors\n")); + LOCKINTERCEPTOR(flags); + for (pnode = &mkia_vnetint_inter_base; (*pnode) != NULL; + pnode = &((*pnode)->next)) { + ifname = (*pnode)->ifdev->ifname; + protocol = (*pnode)->proto->protocol; + DBGVNET(("vnetint_rmv_interceptor: device = %s, " + "protocol type = 0x%0x\n", ifname, protocol)); + mkia_interceptor_count--; + UNLOCKINTERCEPTOR(flags); + mkia_vnetint_rmv_dev(ifname); + mkia_vnetint_rmv_proto(protocol); + kfree((*pnode)); + LOCKINTERCEPTOR(flags); + } + UNLOCKINTERCEPTOR(flags); + return; + } + + vi = filter->handle; + if (vi) { + DBGVNET(("vnetint_rmv_interceptor: device = %s, " + "protocol type = 0x%0x\n", filter->ifname, + filter->protocol)); + LOCKINTERCEPTOR(flags); + for (pnode = &mkia_vnetint_inter_base; (*pnode) != NULL; + pnode = &((*pnode)->next)) { + if ((*pnode) == vi) { + vi->count--; + if ( vi->count > 0 ) { + UNLOCKINTERCEPTOR(flags); + return; + } + *pnode = (*pnode)->next; + mkia_interceptor_count--; + UNLOCKINTERCEPTOR(flags); + mkia_vnetint_rmv_dev(filter->ifname); + mkia_vnetint_rmv_proto(filter->protocol); + kfree(vi); + return; + } + } + UNLOCKINTERCEPTOR(flags); + + + } + + DBGVNET(("vnetint_rmv_interceptor: no interceptor found\n")); + + return; +} + +/* Incoming Packet Handler */ +int +mkia_vnetint_incoming(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt) +{ + + struct vnetint_proto *vp; + struct vnetint_pcb *vi; + filter_func_t filter_func = 0; + recv_func_t real_recv = 0; + int consumed; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_incoming: got packet from dev %s for " + "protocol 0x%04x\n", dev->name, ntohs(pt->type))); + + LOCKINTERCEPTOR(flags); + vp = mkia_vnetint_find_proto(pt->type); + if (vp == NULL) { + /* + * This should never happen but just to play safe ... + * Drop the packet + */ + printk("vnetint_incoming: protocol 0x%04x not registered!!!\n", + ntohs(pt->type)); + goto incoming_free_pkt; + } + real_recv = vp->real_recv; + if ((skb->pkt_type & (PACKET_HOST | PACKET_OTHERHOST)) + == (PACKET_HOST | PACKET_OTHERHOST)) { + DBGVNET(("vnetint_incoming: ignore pkt from filter)\n" + "- send it on through\n")); + skb->pkt_type &= ~PACKET_OTHERHOST; + goto pass_to_original_handler; + } + + vi = mkia_vnetint_fastfind_inter(pt->type, dev); + if (vi == NULL) { + DBGVNET(("vnetint_incoming: no interceptor for " + "dev %s, protocol 0x%x\n", dev->name, ntohs(pt->type))); + goto pass_to_original_handler; + } + + if (! VNETINT_ACT(vi)) { + DBGVNET(("vnetint_incoming: interceptor not active\n")); + goto pass_to_original_handler; + } + + vi->status.NumPktsUp++; + filter_func = vi->filter_func; + if (filter_func == NULL) { + DBGVNET(("vnetint_incoming: missing filter_func\n")); + goto pass_to_original_handler; + } + + UNLOCKINTERCEPTOR(flags); + + consumed = filter_func(skb->mac.raw, + skb->len + dev->hard_header_len, skb); + + LOCKINTERCEPTOR(flags); + + if (consumed) { + vi->status.NumPktsConsumed++; + goto incoming_free_pkt; + } + + /* + * Fall through and send the packet to the original handler since + * the filter did not full consume it. + */ +pass_to_original_handler: + /* Pass packet to original protocol handler */ + if (!real_recv) { + DBGVNET(("vnetint_incoming: no protocol - " + "dropped packet\n")); + goto incoming_free_pkt; + } + UNLOCKINTERCEPTOR(flags); + return (*real_recv)(skb, dev, pt); + +incoming_free_pkt: + UNLOCKINTERCEPTOR(flags); + kfree_skb(skb); + return 0; +} + + +/* Outgoing Packet Handler */ +int +mkia_vnetint_outgoing(struct sk_buff *skb, struct net_device *dev) +{ + + struct vnetint_ifdev *vd; + struct vnetint_pcb *vi; + xmit_func_t real_xmit = 0; + ETH_HDR_T *eth; + unsigned short protocol; + int consumed; + DECLARELOCKSAVE(flags); + + /* + * We can't rely on skb->protocol to hold a valid protocol + * type so go look in the ethernet header. + */ + eth = (ETH_HDR_T *)skb->data; + protocol = eth->type; + + DBGVNET(("vnetint_outgoing: got packet for dev %s from protocol " + "0x%04x\n", dev->name, ntohs(protocol))); + + LOCKINTERCEPTOR(flags); + + vd = mkia_vnetint_fastfind_dev(dev); + if (vd == NULL) { + /* This should never happen but just to play safe ... */ + /* Drop the packet */ + printk("vnetint_outgoing: device %p not registered!!!\n", dev); + goto outgoing_free_pkt; + } + real_xmit = vd->real_xmit; + + if ((skb->pkt_type & (PACKET_OUTGOING | PACKET_OTHERHOST)) + == (PACKET_OUTGOING | PACKET_OTHERHOST)) { + DBGVNET(("vnetint_outgoing: ignore pkt from filter " + "- send it on through\n")); + skb->pkt_type &= ~PACKET_OTHERHOST; + goto pass_to_device; + } + vi = mkia_vnetint_fastfind_inter(protocol, dev); + if (vi == NULL) { + DBGVNET(("vnetint_outgoing: no interceptor for dev %s, " + "protocol 0x%04x\n", dev->name, ntohs(protocol))); + goto pass_to_device; + } + if (! VNETINT_ACT(vi)) { + DBGVNET(("vnetint_outgoing: interceptor not active\n")); + goto pass_to_device; + } + + vi->status.NumPktsDown++; + if (vi->filter_func) { + /* mac.raw is not set by protocol driver */ + skb->mac.raw = skb->data; + + UNLOCKINTERCEPTOR(flags); + consumed = vi->filter_func(skb->data, skb->len, skb); + LOCKINTERCEPTOR(flags); + if (consumed) { + DBGVNET(("vnetinf_outgoing: filter consumed " + "packet\n")); + vi->status.NumPktsConsumed++; + goto outgoing_free_pkt; + } + } + +pass_to_device: + DBGVNET(("vnetint_outgoing: give packet to real NIC xmitfunc\n")); + /* Pass packet to the network device */ + if (!real_xmit) { + printk("vnetint_outgoing: bad device - dropped packet\n"); + goto outgoing_free_pkt; + return 0; + } + UNLOCKINTERCEPTOR(flags); + return (*real_xmit)(skb, dev); + +outgoing_free_pkt: + UNLOCKINTERCEPTOR(flags); + kfree_skb(skb); + return 0; +} + + +/* Convert Ethernet address to printable (loggable) representation. */ +char * +mkia_ether_sprintf(ETH_ADDR_T *addr) +{ + register int i; + static char etherbuf[18]; + register char *cp = etherbuf; + register unsigned char *ap = (unsigned char *)addr; + static char digits[] = "0123456789abcdef"; + + for (i = 0; i < 6; i++) { + if (*ap > 16) + *cp++ = digits[*ap >> 4]; + *cp++ = digits[*ap++ & 0xf]; + *cp++ = ':'; + } + *--cp = 0; + return (etherbuf); +} + +/* Interceptor process packet */ +void +vnetint_ProcessPacket(char *pPacket, + unsigned size, + int direction, + void *handle) +{ + + struct sk_buff *skb = dev_alloc_skb(size + 2); + struct vnetint_pcb *vi = (struct vnetint_pcb *)handle; + struct net_device *dev = vi->ifdev->dev; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_ProcessPacket: direction 0x%x:\n",direction)); + + if (dev == NULL || skb == NULL) { + /* No can do - drop packet */ + return; + } + + skb->dev = dev; + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + memcpy(skb_put(skb, size), pPacket, size); + + /* + * It seems that net/core/dev.c excpects this field to be + * initialized, so we will force it in the same way the kernel + * does if it's not initialized. + * + * This prevents the "protocol buggy" messages that were showing + * up in the syslog. + */ + if (skb->nh.raw < skb->data || skb->nh.raw > skb->tail) { + skb->nh.raw = skb->data; + } + + LOCKINTERCEPTOR(flags); + vi->status.NumPktsFromVnet++; + if (VNETINT_ACT(vi)) { + UNLOCKINTERCEPTOR(flags); + /* + * NOTE: in the following code, use of PACKET_OTHERHOST + * is nonsensical. This fact is being used to mark the + * packet as having originated in the VNET so that the + * packet can be recognized and ignored by the interceptor + * incoming and outgoing packet handlers. + */ + if (direction == DOWNSTREAM) { + /* Pass packet to the network device outbound queue */ + DBGVNET(("vnetint_ProcessPacket: sending packet down " + "to net device\n")); + skb->pkt_type = (PACKET_OUTGOING | PACKET_OTHERHOST); + + dev_queue_xmit(skb); + } + else { + /* Pass packet to the network device inbound queue */ + DBGVNET(("vnetint_ProcessPacket: sending packet up " + "to protocol\n")); + skb->protocol = eth_type_trans(skb, dev); + skb->pkt_type = (PACKET_HOST | PACKET_OTHERHOST); + netif_rx(skb); + } + DBGVNET(("vnetint_ProcessPacket done\n")); + return; + } + else { + DBGVNET(("vnetint_ProcessPacket: Dropped packet - " + "interceptor down\n")); + } + UNLOCKINTERCEPTOR(flags); + + kfree_skb(skb); +} +EXPORT_SYMBOL(vnetint_ProcessPacket); + +/* Interceptor control status */ +void +vnetint_CtrlStatus(unsigned MsgType, struct vnetint_filter *filter) +{ + + struct vnetint_pcb *vi = filter->handle; + struct net_device *ndp; + DECLARELOCKSAVE(flags); + + DBGVNET(("vnetint_CtrlStatus: MsgType=%x, filter=0x%p\n", + MsgType, filter)); + LOCKINTERCEPTOR(flags); + switch (MsgType) { + case INTERCEPTOR_GOACTIVE: + DBGVNET(("vnetint_CtrlStatus: GOACTIVE\n")); + vi->status.flags |= VNETINTF_ACTIVE; + break; + case INTERCEPTOR_GOPASSIVE: + DBGVNET(("vnetint_CtrlStatus: GOPASSIVE\n")); + vi->status.flags &= ~VNETINTF_ACTIVE; + break; + case INTERCEPTOR_GETSTATS: + DBGVNET(("vnetint_CtrlStatus: GETSTATS\n")); + if (vi->ifdev->dev) { + ndp = ((struct net_device *) vi->ifdev->dev); + memcpy(&(vi->status.PhysMacAddr), + ndp->dev_addr, ETH_ALEN); + } + else { + memset(&(vi->status.PhysMacAddr), 0, ETH_ALEN); + } + memcpy(&(filter->status), &(vi->status), + sizeof(struct vnetint_status)); + break; + case INTERCEPTOR_SETFILTER: + DBGVNET(("vnetint_CtrlStatus: SETFILTER ")); + UNLOCKINTERCEPTOR(flags); + if (filter->intercept_func) { + DBGVNET(("- add interceptor\n")); + mkia_vnetint_add_interceptor(filter); + } + else { + DBGVNET(("- remove interceptor\n")); + mkia_vnetint_rmv_interceptor(filter); + } + return; + default: + break; + } + UNLOCKINTERCEPTOR(flags); + return; +} +EXPORT_SYMBOL(vnetint_CtrlStatus); + +void +vnetint_setdebug(int *debug_mask_ptr, void (*debug_log_func)(char *, ...)) +{ + mkia_vni_vnetdebug = debug_mask_ptr; + mkia_vni_logfunc = debug_log_func; +} +EXPORT_SYMBOL(vnetint_setdebug); + +/* Interceptor copy packet*/ +unsigned +vnetint_CopyPacket(void *cookie, + unsigned offset, + char *dest, + unsigned maxpktsz) +{ + struct sk_buff *skb = cookie; + unsigned data_len = skb->tail - skb->mac.raw; + + if (skb->ip_summed == CHECKSUM_HW) { + skb = mkia_checksum_help(skb); + if (skb == NULL) { + printk("mki-adapter: skb_checksum_help failed\n"); + return 0; + } + } + + DBGVNET(("vnetint_CopyPacket: cookie = %p, offset = %d, dest = %p, " + "maxpktsz = %d\n", cookie, offset, dest, maxpktsz)); + DBGVNET(("vnetint_CopyPacket: nr_frags %d, frag_list %p\n", + skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list)); + + data_len -= offset; + if (data_len > maxpktsz) + data_len = maxpktsz; + memcpy(dest, skb->mac.raw + offset, data_len); + return data_len; +} +EXPORT_SYMBOL(vnetint_CopyPacket); + +int +mkia_init_vnetint(void) +{ + mkia_vnetint_ifdev_base = NULL; + mkia_vnetint_proto_base = NULL; + mkia_vnetint_inter_base = NULL; + mkia_interceptor_count = 0; + + mkia_nb.notifier_call = mkia_vnetint_dev_notifier; + mkia_nb.next = NULL; + mkia_nb.priority = 0; + + register_netdevice_notifier(&mkia_nb); + + return 0; +} +EXPORT_SYMBOL(mkia_init_vnetint); + +void +mkia_cleanup_vnetint(void) +{ + unregister_netdevice_notifier(&mkia_nb); + mkia_vnetint_rmv_interceptor(NULL); +} +EXPORT_SYMBOL(mkia_cleanup_vnetint); + diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/README mki-adapter26-new/arch/i386/mki-adapter26/README --- mki-adapter26-old/arch/i386/mki-adapter26/README 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/README 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,12 @@ + *************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + **************************************************************************** + * $Id: README,v 1.2 2003/11/01 00:40:45 rwb Exp $ + **************************************************************************** + +This kernel module attempts to isolate all of the functions and structures +that NeTraverse utilizes in it's binary kernel modules. + + diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/timer.c mki-adapter26-new/arch/i386/mki-adapter26/timer.c --- mki-adapter26-old/arch/i386/mki-adapter26/timer.c 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/timer.c 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,136 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: timer.c,v 1.2 2003/11/01 00:40:45 rwb Exp $ + *************************************************************************** + * + * Provide unix-like timeout() and untimeout() interfaces. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mkifunc.h" + +struct linux_unix_timer { + struct linux_unix_timer *lut_next; + struct linux_unix_timer *lut_prev; + void (*lut_unix_timeout_func)(void *); + int lut_timeoutid; + void *lut_unix_timeout_arg; + struct timer_list lut_linux_timer; +}; + +struct linux_unix_timer lut_list_head = { &lut_list_head, &lut_list_head } ; +int lut_unique_id = 1; + +spinlock_t mkia_lut_lock = SPIN_LOCK_UNLOCKED; +#define DECLARELOCKSAVE(flags) unsigned long flags=0 +#define LOCKTIMER(flags) spin_lock_irqsave(&mkia_lut_lock, flags) +#define UNLOCKTIMER(flags) spin_unlock_irqrestore(&mkia_lut_lock, flags) + +void +do_mki_timeout(unsigned long timer_arg) +{ + struct linux_unix_timer *target_lutp; + struct linux_unix_timer *lutp; + void (*timeout_func)(void *); + void *timeout_arg; + DECLARELOCKSAVE(flags); + + target_lutp = (struct linux_unix_timer *) timer_arg; + timeout_func = target_lutp->lut_unix_timeout_func; + timeout_arg = target_lutp->lut_unix_timeout_arg; + + /* See if the element is still on the active list */ + LOCKTIMER(flags); + for (lutp = lut_list_head.lut_next; lutp != &lut_list_head; + lutp = lutp->lut_next) { + if (lutp == target_lutp) + break; + } + if (lutp != &lut_list_head) { + lutp->lut_next->lut_prev = lutp->lut_prev; + lutp->lut_prev->lut_next = lutp->lut_next; + UNLOCKTIMER(flags); + + del_timer(&lutp->lut_linux_timer); + kfree(lutp); + (*timeout_func)(timeout_arg); + } else { + UNLOCKTIMER(flags); + printk("Merge: mki-adapter: not doing timeout func\n"); + printk("Merge: mki-adapter: func %p, arg %p\n", + timeout_func, timeout_arg); + (*timeout_func)(timeout_arg); + } +} + +int +timeout(void (*timeout_func)(void *), void *timeout_arg, long ticks) +{ + struct linux_unix_timer *lutp; + DECLARELOCKSAVE(flags); + + lutp = kmalloc(sizeof(*lutp), GFP_ATOMIC); + if (lutp == NULL) { + printk("Merge: timeout: kmalloc failed\n"); + return 0; + } + memset(lutp, 0, sizeof(*lutp)); + init_timer(&lutp->lut_linux_timer); + lutp->lut_linux_timer.expires = jiffies + ticks; + lutp->lut_linux_timer.data = (unsigned long) lutp; + lutp->lut_linux_timer.function = do_mki_timeout; + lutp->lut_unix_timeout_func = timeout_func; + lutp->lut_unix_timeout_arg = timeout_arg; + + LOCKTIMER(flags); + lutp->lut_timeoutid = lut_unique_id++; + lutp->lut_next = &lut_list_head; + lutp->lut_prev = lut_list_head.lut_prev; + lut_list_head.lut_prev->lut_next = lutp; + lut_list_head.lut_prev = lutp; + UNLOCKTIMER(flags); + + add_timer(&lutp->lut_linux_timer); + + return (lutp->lut_timeoutid); +} +EXPORT_SYMBOL(timeout); + +void +untimeout(int id) +{ + struct linux_unix_timer *lutp; + DECLARELOCKSAVE(flags); + + if (id == 0) + return; + + LOCKTIMER(flags); + for (lutp = lut_list_head.lut_next; lutp != &lut_list_head; + lutp = lutp->lut_next) { + if (lutp->lut_timeoutid == id) { + lutp->lut_next->lut_prev = lutp->lut_prev; + lutp->lut_prev->lut_next = lutp->lut_next; + break; + } + } + UNLOCKTIMER(flags); + + if (lutp != &lut_list_head) { + del_timer(&lutp->lut_linux_timer); + memset(lutp, 0, sizeof(*lutp)); + kfree(lutp); + } +} +EXPORT_SYMBOL(untimeout); diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/vneteth.h mki-adapter26-new/arch/i386/mki-adapter26/vneteth.h --- mki-adapter26-old/arch/i386/mki-adapter26/vneteth.h 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/vneteth.h 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,36 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: vneteth.h,v 1.2 2003/11/01 00:40:45 rwb Exp $ + *************************************************************************** + * Ethernet address structure for ease of + * access to individual components. + * + */ +#pragma pack(1) +typedef union { + struct { + u_char b0, b1, b2, b3, b4, b5; + } eab; + struct { + u_short w0, w1, w2; + } eaw; + struct { + u_long ls4; + u_short ms2; + } eal; +} ETH_ADDR_T; + +/* + * Ethernet Header + */ +typedef struct { + ETH_ADDR_T dest; + ETH_ADDR_T src; + u_short type; +} ETH_HDR_T; +#pragma pack() + diff -Naur mki-adapter26-old/arch/i386/mki-adapter26/vnetint-pub.h mki-adapter26-new/arch/i386/mki-adapter26/vnetint-pub.h --- mki-adapter26-old/arch/i386/mki-adapter26/vnetint-pub.h 1969-12-31 19:00:00.000000000 -0500 +++ mki-adapter26-new/arch/i386/mki-adapter26/vnetint-pub.h 2003-10-31 19:40:45.000000000 -0500 @@ -0,0 +1,82 @@ +/* + **************************************************************************** + * Copyright 2001-2003 by NeTraverse, Inc. + * This software is distributed under the terms of the GPL + * which is supplied in the LICENSE file with this distribution + *************************************************************************** + * $Id: vnetint-pub.h,v 1.2 2003/11/01 00:40:45 rwb Exp $ + *************************************************************************** + */ + +/* Interceptor status */ +struct vnetint_status { + int flags; + unsigned NumPktsConsumed; + unsigned NumPktsFromVnet; + unsigned NumPktsUp; + unsigned NumPktsDown; + ETH_ADDR_T PhysMacAddr; +}; + +#define MAX_INTERCEPTORS 16 +#define MAXIFNAMELEN 8 +/* Interceptor state flags */ +#define VNETINTF_ACTIVE 0x2 /* interceptor is active */ +#define VNETINTF_MACSET 0x10 /* we got a mac address */ +#define VNETINT_ACT(s) \ + (((s)->status.flags & VNETINTF_ACTIVE)) + +typedef int (*xmit_func_t)(struct sk_buff *skb, struct net_device *dev); +typedef int (*recv_func_t)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt); +typedef int (*filter_func_t)(unsigned char *ucp, unsigned int len, + struct sk_buff *skb); + +/* Interceptor device */ +struct vnetint_ifdev { + char ifname[MAXIFNAMELEN]; + int count; + void *dev; + xmit_func_t real_xmit; + struct vnetint_ifdev *next; +}; + +/* Interceptor protocol */ +struct vnetint_proto { + int protocol; + int count; + struct packet_type *ptype; + recv_func_t real_recv; + struct vnetint_proto *next; +}; + +/* Interceptor device/protocol pairs */ +struct vnetint_pcb { + int count; + struct vnetint_status status; + struct vnetint_ifdev *ifdev; + struct vnetint_proto *proto; + filter_func_t filter_func; + struct vnetint_pcb *next; +}; + +/* Interceptor filter */ +struct vnetint_filter { + struct vnetint_pcb *handle; + struct vnetint_status status; + char ifname[MAXIFNAMELEN]; + int protocol; + filter_func_t intercept_func; + void *criteria; +}; + +/* vnetint_CtrlStatus function codes */ +#define INTERCEPTOR_GOACTIVE 1 +#define INTERCEPTOR_GOPASSIVE 2 +#define INTERCEPTOR_GETSTATS 3 +#define INTERCEPTOR_NUMBER 4 +#define INTERCEPTOR_SETFILTER 5 +/* vnetint_ProcessPacket defines */ +#define UPSTREAM 1 +#define DOWNSTREAM 0 +