diff -u --recursive --new-file v2.3.31/linux/CREDITS linux/CREDITS --- v2.3.31/linux/CREDITS Tue Dec 7 09:32:38 1999 +++ linux/CREDITS Mon Dec 13 14:11:32 1999 @@ -1605,7 +1605,7 @@ S: Australia N: Greg Page -E: greg@caldera.com +E: gpage@sovereign.org D: IPX development and support N: David Parsons @@ -1821,11 +1821,14 @@ S: 23743 Groemitz S: Germany -N: Paul Russell -E: Paul.Russell@rustcorp.com.au +N: Paul `Rusty' Russell +E: rusty@linuxcare.com W: http://www.rustcorp.com D: Ruggedly handsome. -D: Developed Generic IP Firewalling Chains with Michael Neuling. +D: netfilter, ipchains with Michael Neuling. +S: 301/222 City Walk +S: Canberra ACT 2601 +S: Australia N: Thomas Sailer E: sailer@ife.ee.ethz.ch diff -u --recursive --new-file v2.3.31/linux/Documentation/Changes linux/Documentation/Changes --- v2.3.31/linux/Documentation/Changes Tue Dec 7 09:32:38 1999 +++ linux/Documentation/Changes Mon Dec 13 18:13:30 1999 @@ -57,7 +57,8 @@ - Loadlin 1.6a - Sh-utils 1.16 ; basename --v - Autofs 3.1.1 ; automount --version -- NFS 2.2beta40 ; showmount --version +- NFS (client) 2.2beta40 ; showmount --version +- nfs-utils (server) 0.1.4 - Bash 1.14.7 ; bash -version - Ncpfs 2.2.0 ; ncpmount -v - Pcmcia-cs 3.1.2 ; cardmgr -V @@ -646,9 +647,8 @@ ftp://ftp.mathematik.th-darmstadt.de/pub/linux/okir/dontuse/nfs-server-2.2beta40.tar.gz ftp://linux.nrao.edu/mirrors/fb0429.mathematik.th-darmstadt.de/pub/linux/okir/dontuse/nfs-server-2.2beta40.tar.gz -The kernel-level 12/04/98 release: -ftp://ftp.yggdrasil.com/private/hjl/knfsd-981204.tar.gz -ftp://ftp.kernel.org/pub/linux/devel/gcc/knfsd-981204.tar.gz +The kernel-level nfs-utils-0.1.4 release: +ftp://nfs.sourceforge.net/pub/nfs/nfs-utils-0.1.4.tar.gz Net-tools ========= diff -u --recursive --new-file v2.3.31/linux/Documentation/Configure.help linux/Documentation/Configure.help --- v2.3.31/linux/Documentation/Configure.help Wed Dec 8 14:11:24 1999 +++ linux/Documentation/Configure.help Mon Dec 13 18:13:30 1999 @@ -1477,10 +1477,6 @@ Chances are that you should say Y here if you compile a kernel which will run as a router and N for regular hosts. If unsure, say N. -Network packet filtering debugging -CONFIG_NETFILTER_DEBUG - Say Y to make sure packets aren't leaking. - SYN flood protection CONFIG_SYN_COOKIES Normal TCP/IP networking is open to an attack known as "SYN @@ -1496,7 +1492,7 @@ is no need for the legitimate users to change their TCP/IP software; SYN cookies work transparently to them. For technical information about SYN cookies, check out - ftp://koobera.math.uic.edu/pub/docs/syncookies-archive . + ftp://koobera.math.uic.edu/syncookies.html . If you are SYN flooded, the source address reported by the kernel is likely to have been forged by the attacker; it is only reported as @@ -4085,6 +4081,16 @@ Documentation/scsi.txt. The module will be called sg.o. If unsure, say N. +Debug new queueing code for SCSI +CONFIG_SCSI_DEBUG_QUEUES + This option turns on a lot of additional consistency checking for the new + queueing code. This will adversely affect performance, but it is likely + that bugs will be caught sooner if this is turned on. This will typically + cause the kernel to panic if an error is detected, but it would have probably + crashed if the panic weren't there. Comments/questions/problems to + linux-scsi mailing list please. See http://www.andante.org/scsi_queue.html + for more uptodate information. + Probe all LUNs on each SCSI device CONFIG_SCSI_MULTI_LUN If you have a SCSI device that supports more than one LUN (Logical @@ -8311,6 +8317,10 @@ MSDOS floppies. You will need a program called umssync in order to make use of umsdos; read Documentation/filesystems/umsdos.txt. + To get utilities for initializing/checking UMSDOS filesystem, or + latest patches and/or information, visit UMSDOS homepage at + http://www.voyager.hr/~mnalis/umsdos/ . + This option enlarges your kernel by about 28 KB and it only works if you said Y to both "fat fs support" and "msdos fs support" above. If you want to compile this as a module ( = code which can be inserted @@ -8389,33 +8399,34 @@ Most people say N here. -NFS server support (EXPERIMENTAL) +NFS server support CONFIG_NFSD - If you want your Linux box to act as a NFS *server*, so that other + If you want your Linux box to act as an NFS *server*, so that other computers on your local network which support NFS can access certain directories on your box transparently, you have two options: you can use the self-contained user space program nfsd, in which case you - should say N here, or you can say Y and use this new experimental - kernel based NFS server. The advantage of the kernel based solution - is that it is faster; it might not be completely stable yet, though. + should say N here, or you can say Y and use the kernel based NFS + server. The advantage of the kernel based solution is that it is + faster. In either case, you will need support software; the respective locations are given in the file Documentation/Changes in the NFS section. Please read the NFS-HOWTO, available from - http://metalab.unc.edu/mdw/linux.html#howto . + http://www.linuxdoc.org/HOWTO/NFS-HOWTO.html . + The NFS server is also available as a module ( = code which can be inserted in and removed from the running kernel whenever you want). The module is called nfsd.o. If you want to compile it as a module, say M here and read Documentation/modules.txt. If unsure, say N. -Emulate SUN NFS server -CONFIG_NFSD_SUN - If you would like for the server to allow clients to access - directories that are mount points on the local filesystem (this is - how nfsd behaves on Sun systems), say Y here. If unsure, say N. +Provide NFSv3 server support (EXPERIMENTAL) +CONFIG_NFSD_V3 + If you would like to include the NFSv3 server was well as the NFSv2 + server, say Y here. File locking, via the NLMv4 protocol, is not + supported yet. If unsure, say N. OS/2 HPFS filesystem support CONFIG_HPFS_FS diff -u --recursive --new-file v2.3.31/linux/Documentation/ide.txt linux/Documentation/ide.txt --- v2.3.31/linux/Documentation/ide.txt Sun Nov 7 16:37:33 1999 +++ linux/Documentation/ide.txt Wed Dec 8 15:17:55 1999 @@ -276,6 +276,8 @@ port. Should be used only as a last resort. "hdx=swapdata" : when the drive is a disk, byte swap all data + "hdxlun=xx" : set the drive last logical unit + "idebus=xx" : inform IDE driver of VESA/PCI bus speed in MHz, where "xx" is between 20 and 66 inclusive, used when tuning chipset PIO modes. diff -u --recursive --new-file v2.3.31/linux/MAINTAINERS linux/MAINTAINERS --- v2.3.31/linux/MAINTAINERS Wed Dec 8 14:11:24 1999 +++ linux/MAINTAINERS Sat Dec 11 07:39:10 1999 @@ -883,8 +883,10 @@ S: Maintained TLAN NETWORK DRIVER +P: Torben Mathiasen +M: torben.mathiasen@compaq.com L: tlan@vuser.vu.union.edu -S: Orphan +S: Maintained TOKEN-RING NETWORK DRIVER P: Paul Norton @@ -917,8 +919,9 @@ UMSDOS FILESYSTEM P: Matija Nalis -M: mnalis@jagor.srce.hr +M: Matija Nalis L: linux-kernel@vger.rutgers.edu +W: http://www.voyager.hr/~mnalis/umsdos/ S: Maintained UNIFORM CDROM DRIVER diff -u --recursive --new-file v2.3.31/linux/Makefile linux/Makefile --- v2.3.31/linux/Makefile Wed Dec 8 14:11:24 1999 +++ linux/Makefile Mon Dec 13 14:12:28 1999 @@ -1,6 +1,6 @@ VERSION = 2 PATCHLEVEL = 3 -SUBLEVEL = 31 +SUBLEVEL = 32 EXTRAVERSION = ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) @@ -121,6 +121,10 @@ DRIVERS += drivers/char/drm/drm.o endif +ifeq ($(CONFIG_AGP),y) +DRIVERS += drivers/char/agp/agp.o +endif + ifdef CONFIG_NUBUS DRIVERS := $(DRIVERS) drivers/nubus/nubus.a endif @@ -202,7 +206,7 @@ endif ifdef CONFIG_VT -DRIVERS := $(DRIVERS) drivers/video/video.a +DRIVERS := $(DRIVERS) drivers/video/video.o endif ifeq ($(CONFIG_PARIDE),y) diff -u --recursive --new-file v2.3.31/linux/arch/arm/def-configs/brutus linux/arch/arm/def-configs/brutus --- v2.3.31/linux/arch/arm/def-configs/brutus Fri Oct 22 13:21:43 1999 +++ linux/arch/arm/def-configs/brutus Sun Dec 12 10:18:43 1999 @@ -203,9 +203,6 @@ # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/bios32.c linux/arch/arm/kernel/bios32.c --- v2.3.31/linux/arch/arm/kernel/bios32.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/kernel/bios32.c Mon Dec 13 16:26:27 1999 @@ -47,6 +47,7 @@ * - (0x48) enable all memory requests from ISA to be channeled to PCI * - (0x42) disable ping-pong (as per errata) * - (0x40) enable PCI packet retry + * - (0x44) Route INTA to IRQ11 * - (0x83) don't use CPU park enable, park on last master, disable GAT bit * - (0x80) default rotating priorities * - (0x81) rotate bank 4 @@ -62,6 +63,7 @@ pci_write_config_byte(dev, 0x48, 0xff); pci_write_config_byte(dev, 0x42, 0x00); pci_write_config_byte(dev, 0x40, 0x22); + pci_write_config_word(dev, 0x44, 0xb000); pci_write_config_byte(dev, 0x83, 0x02); pci_write_config_byte(dev, 0x80, 0xe0); pci_write_config_byte(dev, 0x81, 0x01); diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/dec21285.c linux/arch/arm/kernel/dec21285.c --- v2.3.31/linux/arch/arm/kernel/dec21285.c Mon Nov 1 13:56:26 1999 +++ linux/arch/arm/kernel/dec21285.c Mon Dec 13 16:26:27 1999 @@ -205,6 +205,7 @@ void __init dc21285_init(void) { + static struct resource csrmem, csrio; unsigned int mem_size; unsigned long cntl; @@ -217,6 +218,15 @@ *CSR_PCIADDR_EXTN = 0; #ifdef CONFIG_HOST_FOOTBRIDGE + + csrio.flags = IORESOURCE_IO; + csrmem.flags = IORESOURCE_MEM; + + allocate_resource(&ioport_resource, &csrio, 128, + 0xff00, 0xffff, 128, NULL, NULL); + allocate_resource(&iomem_resource, &csrmem, 128, + 0xf4000000, 0xf8000000, 128, NULL, NULL); + /* * Map our SDRAM at a known address in PCI space, just in case * the firmware had other ideas. Using a nonzero base is @@ -224,8 +234,8 @@ * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). */ *CSR_PCICACHELINESIZE = 0x00002008; - *CSR_PCICSRBASE = 0; - *CSR_PCICSRIOBASE = 0; + *CSR_PCICSRBASE = csrmem.start; + *CSR_PCICSRIOBASE = csrio.start; *CSR_PCISDRAMBASE = virt_to_bus((void *)PAGE_OFFSET); *CSR_PCIROMBASE = 0; *CSR_PCICMD = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/hw-footbridge.c linux/arch/arm/kernel/hw-footbridge.c --- v2.3.31/linux/arch/arm/kernel/hw-footbridge.c Fri Oct 22 13:21:44 1999 +++ linux/arch/arm/kernel/hw-footbridge.c Mon Dec 13 16:26:27 1999 @@ -678,6 +678,7 @@ */ if (machine_is_netwinder()) { unsigned long flags; + extern int isapnp_disable; wb977_init(); cpld_init(); @@ -686,6 +687,15 @@ spin_lock_irqsave(&gpio_lock, flags); gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); spin_unlock_irqrestore(&gpio_lock, flags); + +#ifdef CONFIG_ISAPNP + /* + * We must not use the kernels ISAPnP code + * on the NetWinder - it will reset the settings + * for the WaveArtist chip and render it inoperable. + */ + isapnp_disable = 1; +#endif } #endif #ifdef CONFIG_CATS diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/ioport.c linux/arch/arm/kernel/ioport.c --- v2.3.31/linux/arch/arm/kernel/ioport.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/kernel/ioport.c Mon Dec 13 16:26:27 1999 @@ -15,13 +15,6 @@ #include #include -unsigned long -resource_fixup(struct pci_dev * dev, struct resource * res, - unsigned long start, unsigned long size) -{ - return start; -} - #ifdef CONFIG_CPU_32 asmlinkage int sys_iopl(unsigned long turn_on) { diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/setup.c linux/arch/arm/kernel/setup.c --- v2.3.31/linux/arch/arm/kernel/setup.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/kernel/setup.c Mon Dec 13 16:26:27 1999 @@ -249,67 +249,132 @@ #endif } +#define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define P_PFN_DOWN(x) O_PFN_DOWN((x) - PHYS_OFFSET) +#define V_PFN_DOWN(x) O_PFN_DOWN(__pa(x)) + +#define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) +#define P_PFN_UP(x) O_PFN_UP((x) - PHYS_OFFSET) +#define V_PFN_UP(x) O_PFN_UP(__pa(x)) + +#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) +#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ + (((unsigned long)(s)) & PAGE_MASK)) + +#define free_bootmem(s,sz) free_bootmem((s)< (meminfo.end + PHYS_OFFSET)) { + printk ("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx) - disabling initrd\n", + __pa(initrd_end), meminfo.end + PHYS_OFFSET); + initrd_start = 0; + initrd_end = 0; + } + } +#endif + + for (bank = 0; bank < meminfo.nr_banks; bank ++) { + unsigned int start, end; + + if (meminfo.bank[bank].size == 0) + continue; + + start = O_PFN_UP(meminfo.bank[bank].start); + end = O_PFN_DOWN(meminfo.bank[bank].size + + meminfo.bank[bank].start); + + if (end < start_pfn) + continue; + + if (start < start_pfn) + start = start_pfn; + + if (end <= start) + continue; + + if (end - start >= bootmap_pages) { + bootmap_pfn = start; + break; + } + } + + if (bootmap_pfn == 0) + BUG(); + + return bootmap_pfn; +} + /* - * Work out our memory regions. Note that "pfn" is the physical page number - * relative to the first physical page, not the physical address itself. + * Initialise the bootmem allocator. */ static void __init setup_bootmem(void) { - unsigned int end_pfn, bootmem_end; - int bank; + unsigned int end_pfn, start_pfn, bootmap_pages, bootmap_pfn; + unsigned int i; /* - * Calculate the end of memory. + * Calculate the physical address of the top of memory. */ - for (bank = 0; bank < meminfo.nr_banks; bank++) { - if (meminfo.bank[bank].size) { - unsigned long end; + meminfo.end = 0; + for (i = 0; i < meminfo.nr_banks; i++) { + unsigned long end; - end = meminfo.bank[bank].start + - meminfo.bank[bank].size; + if (meminfo.bank[i].size != 0) { + end = meminfo.bank[i].start + meminfo.bank[i].size; if (meminfo.end < end) meminfo.end = end; } } - bootmem_end = __pa(PAGE_ALIGN((unsigned long)&_end)); - end_pfn = meminfo.end >> PAGE_SHIFT; + start_pfn = O_PFN_UP(PHYS_OFFSET); + end_pfn = O_PFN_DOWN(meminfo.end); + bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); + bootmap_pfn = find_bootmap_pfn(bootmap_pages); /* * Initialise the boot-time allocator */ - bootmem_end += init_bootmem(bootmem_end >> PAGE_SHIFT, end_pfn, PHYS_OFFSET); + init_bootmem_start(bootmap_pfn, start_pfn, end_pfn); /* * Register all available RAM with the bootmem allocator. - * The address is relative to the start of physical memory. */ - for (bank = 0; bank < meminfo.nr_banks; bank ++) - free_bootmem(meminfo.bank[bank].start, meminfo.bank[bank].size); + for (i = 0; i < meminfo.nr_banks; i++) + if (meminfo.bank[i].size) + free_bootmem(O_PFN_UP(meminfo.bank[i].start), + PFN_SIZE(meminfo.bank[i].size)); /* - * reserve the following regions: - * physical page 0 - it contains the exception vectors - * kernel and the bootmem structure - * swapper page directory (if any) - * initrd (if any) + * Register the reserved regions with bootmem */ - reserve_bootmem(0, PAGE_SIZE); + reserve_bootmem(bootmap_pfn, bootmap_pages); + reserve_bootmem(V_PFN_DOWN(&_stext), PFN_RANGE(&_stext, &_end)); + #ifdef CONFIG_CPU_32 - reserve_bootmem(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(void *)); + /* + * Reserve the page tables. These are already in use. + */ + reserve_bootmem(V_PFN_DOWN(swapper_pg_dir), + PFN_SIZE(PTRS_PER_PGD * sizeof(void *))); #endif - reserve_bootmem(__pa(&_stext), bootmem_end - __pa(&_stext)); #ifdef CONFIG_BLK_DEV_INITRD - if (__pa(initrd_end) > (end_pfn << PAGE_SHIFT)) { - printk ("initrd extends beyond end of memory " - "(0x%08lx > 0x%08x) - disabling initrd\n", - __pa(initrd_end), end_pfn << PAGE_SHIFT); - initrd_start = 0; - } - if (initrd_start) - reserve_bootmem(__pa(initrd_start), - initrd_end - initrd_start); + reserve_bootmem(O_PFN_DOWN(initrd_start), + PFN_RANGE(initrd_start, initrd_end)); #endif } @@ -332,7 +397,7 @@ virt_start = __phys_to_virt(meminfo.bank[i].start); virt_end = virt_start + meminfo.bank[i].size - 1; - res = alloc_bootmem(sizeof(*res)); + res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = __virt_to_bus(virt_start); res->end = __virt_to_bus(virt_end); @@ -400,7 +465,7 @@ } for (i = 0; i < 4; i++) { - meminfo.bank[i].start = i << 26; + meminfo.bank[i].start = PHYS_OFFSET + (i << 26); meminfo.bank[i].size = params->u1.s.pages_in_bank[i] * params->u1.s.page_size; @@ -627,7 +692,7 @@ if (meminfo.nr_banks == 0) { meminfo.nr_banks = 1; - meminfo.bank[0].start = 0; + meminfo.bank[0].start = PHYS_OFFSET; if (params) meminfo.bank[0].size = params->u1.s.nr_pages << PAGE_SHIFT; else diff -u --recursive --new-file v2.3.31/linux/arch/arm/kernel/traps.c linux/arch/arm/kernel/traps.c --- v2.3.31/linux/arch/arm/kernel/traps.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/kernel/traps.c Mon Dec 13 16:26:27 1999 @@ -133,7 +133,7 @@ printk ("pc not in code space\n"); } -spinlock_t die_lock; +spinlock_t die_lock = SPIN_LOCK_UNLOCKED; /* * This function is protected against re-entrancy. diff -u --recursive --new-file v2.3.31/linux/arch/arm/mm/init.c linux/arch/arm/mm/init.c --- v2.3.31/linux/arch/arm/mm/init.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/mm/init.c Mon Dec 13 16:26:27 1999 @@ -249,7 +249,7 @@ initpages = &__init_end - &__init_begin; max_mapnr = max_low_pfn; - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); + high_memory = (void *)__va(PHYS_OFFSET + max_low_pfn * PAGE_SIZE); /* * We may have non-contiguous memory. Setup the PageSkip stuff, @@ -273,12 +273,12 @@ printk(" %ldMB", meminfo.bank[i].size >> 20); } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); + printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); printk("Memory: %luKB available (%dK code, %dK data, %dK init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), codepages >> 10, datapages >> 10, initpages >> 10); - if (PAGE_SIZE >= 16384 && max_mapnr <= 128) { + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get diff -u --recursive --new-file v2.3.31/linux/arch/arm/mm/mm-armv.c linux/arch/arm/mm/mm-armv.c --- v2.3.31/linux/arch/arm/mm/mm-armv.c Tue Dec 7 09:32:40 1999 +++ linux/arch/arm/mm/mm-armv.c Mon Dec 13 16:26:27 1999 @@ -35,6 +35,7 @@ cr_alignment &= ~4; cr_no_alignment &= ~4; set_cr(cr_alignment); + return 1; } static int __init nowrite_setup(char *__unused) @@ -42,6 +43,7 @@ cr_alignment &= ~(8|4); cr_no_alignment &= ~(8|4); set_cr(cr_alignment); + return 1; } __setup("nocache", nocache_setup); @@ -307,7 +309,7 @@ if (meminfo.bank[i].size) { unsigned int end; - end = (meminfo.bank[i].start + + end = (meminfo.bank[i].start - PHYS_OFFSET + meminfo.bank[i].size) >> PAGE_SHIFT; if (end > zone_size[0]) zone_size[0] = end; @@ -323,7 +325,7 @@ /* * Setup the above mappings */ - init_map[0].physical = PHYS_OFFSET; + init_map[0].physical = virt_to_phys(alloc_bootmem_low_pages(PAGE_SIZE)); init_map[5].physical = FLUSH_BASE_PHYS; init_map[5].virtual = FLUSH_BASE; #ifdef FLUSH_BASE_MINICACHE @@ -333,8 +335,9 @@ #endif for (i = 0; i < meminfo.nr_banks; i++) { - init_map[i+1].physical = PHYS_OFFSET + meminfo.bank[i].start; - init_map[i+1].virtual = PAGE_OFFSET + meminfo.bank[i].start; + init_map[i+1].physical = meminfo.bank[i].start; + init_map[i+1].virtual = meminfo.bank[i].start + + PAGE_OFFSET - PHYS_OFFSET; init_map[i+1].length = meminfo.bank[i].size; } @@ -378,11 +381,13 @@ struct page *pg = NULL; unsigned int i; +#define PFN(x) (((x) - PHYS_OFFSET) >> PAGE_SHIFT) + for (i = 0; i < meminfo.nr_banks; i++) { if (meminfo.bank[i].size == 0) continue; - start_pfn = meminfo.bank[i].start >> PAGE_SHIFT; + start_pfn = PFN(meminfo.bank[i].start); /* * subtle here - if we have a full bank, then @@ -393,8 +398,8 @@ set_bit(PG_skip, &pg->flags); pg->next_hash = mem_map + start_pfn; - start_pfn = PAGE_ALIGN(__pa(pg + 1)); - end_pfn = __pa(pg->next_hash) & PAGE_MASK; + start_pfn = PFN(PAGE_ALIGN(__pa(pg + 1))); + end_pfn = PFN(__pa(pg->next_hash) & PAGE_MASK); if (end_pfn != start_pfn) free_bootmem(start_pfn, end_pfn - start_pfn); @@ -402,8 +407,7 @@ pg = NULL; } - end_pfn = (meminfo.bank[i].start + - meminfo.bank[i].size) >> PAGE_SHIFT; + end_pfn = PFN(meminfo.bank[i].start + meminfo.bank[i].size); if (end_pfn != meminfo.end >> PAGE_SHIFT) pg = mem_map + end_pfn; diff -u --recursive --new-file v2.3.31/linux/arch/i386/defconfig linux/arch/i386/defconfig --- v2.3.31/linux/arch/i386/defconfig Wed Dec 8 14:11:25 1999 +++ linux/arch/i386/defconfig Tue Dec 14 00:54:24 1999 @@ -170,6 +170,7 @@ # # Some SCSI devices (e.g. CD jukebox) support multiple LUNs # +CONFIG_SCSI_DEBUG_QUEUES=y CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOGGING is not set @@ -306,6 +307,7 @@ # CONFIG_PCMCIA_NMCLAN is not set # CONFIG_PCMCIA_SMC91C92 is not set # CONFIG_PCMCIA_XIRC2PS is not set +# CONFIG_AIRONET4500_CS is not set # CONFIG_PCMCIA_3C575 is not set # CONFIG_PCMCIA_TULIP is not set # CONFIG_PCMCIA_EPIC100 is not set @@ -354,6 +356,11 @@ CONFIG_PSMOUSE=y # CONFIG_82C710_MOUSE is not set # CONFIG_PC110_PAD is not set + +# +# Joysticks +# +# CONFIG_JOYSTICK is not set # CONFIG_QIC02_TAPE is not set # @@ -367,11 +374,6 @@ # Video For Linux # # CONFIG_VIDEO_DEV is not set - -# -# Joystick support -# -# CONFIG_JOYSTICK is not set # CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set @@ -380,6 +382,9 @@ # Ftape, the floppy tape device driver # # CONFIG_FTAPE is not set +CONFIG_DRM=y +CONFIG_DRM_TDFX=y +# CONFIG_DRM_GAMMA is not set # # PCMCIA character device support @@ -409,7 +414,6 @@ # CONFIG_VFAT_FS is not set CONFIG_ISO9660_FS=y # CONFIG_JOLIET is not set -# CONFIG_UDF_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_NTFS_FS is not set # CONFIG_HPFS_FS is not set @@ -418,6 +422,7 @@ # CONFIG_ROMFS_FS is not set CONFIG_EXT2_FS=y # CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set # CONFIG_UFS_FS is not set # @@ -426,7 +431,6 @@ # CONFIG_CODA_FS is not set CONFIG_NFS_FS=y CONFIG_NFSD=y -# CONFIG_NFSD_SUN is not set CONFIG_SUNRPC=y CONFIG_LOCKD=y # CONFIG_SMB_FS is not set @@ -437,9 +441,6 @@ # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/i386/kernel/apm.c linux/arch/i386/kernel/apm.c --- v2.3.31/linux/arch/i386/kernel/apm.c Tue Nov 23 22:42:20 1999 +++ linux/arch/i386/kernel/apm.c Sat Dec 11 07:42:20 1999 @@ -380,7 +380,7 @@ __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "; cld\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -413,7 +413,7 @@ __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry)"; cld\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff -u --recursive --new-file v2.3.31/linux/arch/i386/kernel/pci-pc.c linux/arch/i386/kernel/pci-pc.c --- v2.3.31/linux/arch/i386/kernel/pci-pc.c Tue Dec 7 09:32:40 1999 +++ linux/arch/i386/kernel/pci-pc.c Sat Dec 11 07:42:20 1999 @@ -342,7 +342,7 @@ unsigned long flags; __save_flags(flags); __cli(); - __asm__("lcall (%%edi)" + __asm__("lcall (%%edi); cld" : "=a" (return_code), "=b" (address), "=c" (length), @@ -383,7 +383,7 @@ __save_flags(flags); __cli(); __asm__( - "lcall (%%edi)\n\t" + "lcall (%%edi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -427,7 +427,7 @@ unsigned short bx; unsigned short ret; - __asm__("lcall (%%edi)\n\t" + __asm__("lcall (%%edi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -448,7 +448,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -466,7 +466,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -484,7 +484,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -502,7 +502,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -520,7 +520,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -538,7 +538,7 @@ unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -702,7 +702,7 @@ __asm__("push %%es\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall (%%esi)\n\t" + "lcall (%%esi); cld\n\t" "pop %%es\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" diff -u --recursive --new-file v2.3.31/linux/arch/i386/kernel/process.c linux/arch/i386/kernel/process.c --- v2.3.31/linux/arch/i386/kernel/process.c Tue Dec 7 09:32:40 1999 +++ linux/arch/i386/kernel/process.c Sat Dec 11 07:42:20 1999 @@ -462,7 +462,7 @@ struct pt_regs * childregs; childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; - *childregs = *regs; + struct_cpy(childregs, regs); childregs->eax = 0; childregs->esp = esp; @@ -475,7 +475,7 @@ savesegment(gs,p->thread.gs); unlazy_fpu(current); - p->thread.i387 = current->thread.i387; + struct_cpy(&p->thread.i387, ¤t->thread.i387); return 0; } diff -u --recursive --new-file v2.3.31/linux/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c --- v2.3.31/linux/arch/i386/kernel/traps.c Tue Dec 7 09:32:41 1999 +++ linux/arch/i386/kernel/traps.c Mon Dec 13 14:51:09 1999 @@ -388,7 +388,7 @@ alert_counter[cpu]++; if (alert_counter[cpu] == 5*HZ) { spin_lock(&nmi_print_lock); - spin_unlock(&console_lock); // we are in trouble anyway + console_lock.lock = 0; // we are in trouble anyway printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu); show_registers(regs); printk("console shuts up ...\n"); diff -u --recursive --new-file v2.3.31/linux/arch/ppc/configs/common_defconfig linux/arch/ppc/configs/common_defconfig --- v2.3.31/linux/arch/ppc/configs/common_defconfig Tue Dec 7 09:32:41 1999 +++ linux/arch/ppc/configs/common_defconfig Sun Dec 12 10:18:43 1999 @@ -529,9 +529,6 @@ # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/ppc/configs/gemini_defconfig linux/arch/ppc/configs/gemini_defconfig --- v2.3.31/linux/arch/ppc/configs/gemini_defconfig Tue Dec 7 09:32:41 1999 +++ linux/arch/ppc/configs/gemini_defconfig Sun Dec 12 10:18:43 1999 @@ -389,9 +389,6 @@ # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/ppc/configs/oak_defconfig linux/arch/ppc/configs/oak_defconfig --- v2.3.31/linux/arch/ppc/configs/oak_defconfig Tue Dec 7 09:32:41 1999 +++ linux/arch/ppc/configs/oak_defconfig Sun Dec 12 10:18:43 1999 @@ -276,9 +276,6 @@ # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/ppc/configs/walnut_defconfig linux/arch/ppc/configs/walnut_defconfig --- v2.3.31/linux/arch/ppc/configs/walnut_defconfig Tue Dec 7 09:32:41 1999 +++ linux/arch/ppc/configs/walnut_defconfig Sun Dec 12 10:18:43 1999 @@ -276,9 +276,6 @@ # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/ppc/defconfig linux/arch/ppc/defconfig --- v2.3.31/linux/arch/ppc/defconfig Wed Dec 8 14:11:25 1999 +++ linux/arch/ppc/defconfig Sun Dec 12 10:18:43 1999 @@ -529,9 +529,6 @@ # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/sh/defconfig linux/arch/sh/defconfig --- v2.3.31/linux/arch/sh/defconfig Sun Nov 7 16:37:34 1999 +++ linux/arch/sh/defconfig Sun Dec 12 10:18:43 1999 @@ -82,9 +82,6 @@ # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff -u --recursive --new-file v2.3.31/linux/arch/sparc64/defconfig linux/arch/sparc64/defconfig --- v2.3.31/linux/arch/sparc64/defconfig Wed Dec 8 14:11:25 1999 +++ linux/arch/sparc64/defconfig Sun Dec 12 10:18:43 1999 @@ -331,9 +331,6 @@ # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set CONFIG_SUN_PARTITION=y CONFIG_NLS=y diff -u --recursive --new-file v2.3.31/linux/arch/sparc64/kernel/sys_sparc32.c linux/arch/sparc64/kernel/sys_sparc32.c --- v2.3.31/linux/arch/sparc64/kernel/sys_sparc32.c Tue Aug 31 17:29:13 1999 +++ linux/arch/sparc64/kernel/sys_sparc32.c Thu Dec 9 13:25:46 1999 @@ -2988,11 +2988,8 @@ if (mod->next == NULL) return -EINVAL; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) - if (put_user(0, ret)) - return -EFAULT; - else - return 0; + if (!MOD_CAN_QUERY(mod)) + return put_user(0, ret); space = 0; for (i = 0; i < mod->ndeps; ++i) { @@ -3008,10 +3005,7 @@ space += len; } - if (put_user(i, ret)) - return -EFAULT; - else - return 0; + return put_user(i, ret); calc_space_needed: space += len; @@ -3032,7 +3026,7 @@ if (mod->next == NULL) return -EINVAL; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) + if (!MOD_CAN_QUERY(mod)) if (put_user(0, ret)) return -EFAULT; else @@ -3076,7 +3070,7 @@ char *strings; unsigned *vals; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) + if (!MOD_CAN_QUERY(mod)) if (put_user(0, ret)) return -EFAULT; else diff -u --recursive --new-file v2.3.31/linux/drivers/acorn/char/Makefile linux/drivers/acorn/char/Makefile --- v2.3.31/linux/drivers/acorn/char/Makefile Thu Jun 17 01:11:35 1999 +++ linux/drivers/acorn/char/Makefile Mon Dec 13 16:26:27 1999 @@ -9,40 +9,40 @@ # parent makes.. # -L_TARGET := acorn-char.a +O_TARGET := acorn-char.o M_OBJS := -L_OBJS := +O_OBJS := -L_OBJS_arc := keyb_arc.o -L_OBJS_a5k := keyb_arc.o -L_OBJS_rpc := keyb_ps2.o +O_OBJS_arc := keyb_arc.o +O_OBJS_a5k := keyb_arc.o +O_OBJS_rpc := keyb_ps2.o ifeq ($(MACHINE),rpc) - ifeq ($(CONFIG_MOUSE),y) - LX_OBJS += mouse_rpc.o + ifeq ($(CONFIG_BUSMOUSE),y) + OX_OBJS += mouse_rpc.o else - ifeq ($(CONFIG_MOUSE),m) + ifeq ($(CONFIG_BUSMOUSE),m) MX_OBJS += mouse_rpc.o endif endif endif ifeq ($(CONFIG_ATOMWIDE_SERIAL),y) - L_OBJS += serial-atomwide.o + O_OBJS += serial-atomwide.o else ifeq ($(CONFIG_ATOMWIDE_SERIAL),m) - M_OBJS += serial-atomwide.o + O_OBJS += serial-atomwide.o endif endif ifeq ($(CONFIG_DUALSP_SERIAL),y) - L_OBJS += serial-dualsp.o + O_OBJS += serial-dualsp.o else ifeq ($(CONFIG_DUALSP_SERIAL),m) M_OBJS += serial-dualsp.o endif endif -L_OBJS += $(L_OBJS_$(MACHINE)) +O_OBJS += $(O_OBJS_$(MACHINE)) include $(TOPDIR)/Rules.make diff -u --recursive --new-file v2.3.31/linux/drivers/acorn/char/mouse_rpc.c linux/drivers/acorn/char/mouse_rpc.c --- v2.3.31/linux/drivers/acorn/char/mouse_rpc.c Mon Aug 2 10:19:52 1999 +++ linux/drivers/acorn/char/mouse_rpc.c Mon Dec 13 16:26:27 1999 @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -48,8 +49,7 @@ 6, "arcmouse", NULL, NULL, 7 }; -int -mouse_rpc_init(void) +static int __init mouse_rpc_init(void) { mousedev = register_busmouse(&rpcmouse); @@ -69,19 +69,13 @@ return mousedev >= 0 ? 0 : -ENODEV; } -#ifdef MODULE -int -init_module(void) -{ - return mouse_rpc_init(); -} - -int -cleanup_module(void) +static void __exit mouse_rpc_exit(void) { if (mousedev >= 0) { unregister_busmouse(mousedev); free_irq(IRQ_VSYNCPULSE, &mousedev); } } -#endif + +module_init(mouse_rpc_init); +module_exit(mouse_rpc_exit); diff -u --recursive --new-file v2.3.31/linux/drivers/acorn/scsi/fas216.c linux/drivers/acorn/scsi/fas216.c --- v2.3.31/linux/drivers/acorn/scsi/fas216.c Mon Aug 2 10:19:52 1999 +++ linux/drivers/acorn/scsi/fas216.c Mon Dec 13 16:26:27 1999 @@ -1247,7 +1247,6 @@ printk("%s%02X", i & 31 ? " " : "\n ", message[i]); printk("\n"); -reject_message: /* * Something strange seems to be happening here - * I can't use SETATN since the chip gives me an @@ -1822,6 +1821,7 @@ case READ_CAPACITY: case TEST_UNIT_READY: case MODE_SENSE: + case REQUEST_SENSE: break; default: diff -u --recursive --new-file v2.3.31/linux/drivers/acorn/scsi/powertec.c linux/drivers/acorn/scsi/powertec.c --- v2.3.31/linux/drivers/acorn/scsi/powertec.c Thu Nov 11 20:11:32 1999 +++ linux/drivers/acorn/scsi/powertec.c Mon Dec 13 16:26:27 1999 @@ -256,6 +256,9 @@ host->dma_channel = ecs[count]->dma; info = (PowerTecScsi_Info *)host->hostdata; + if (host->dma_channel != NO_DMA) + set_dma_speed(host->dma_channel, 180); + info->control.term_port = host->io_port + POWERTEC_TERM_CONTROL; info->control.terms = term[count] ? POWERTEC_TERM_ENABLE : 0; powertecscsi_terminator_ctl(host, info->control.terms); @@ -268,7 +271,7 @@ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = POWERTEC_ASYNC_PERIOD; info->info.ifcfg.sync_max_depth = POWERTEC_SYNC_DEPTH; - info->info.ifcfg.cntl3 = /*CNTL3_BS8 |*/ CNTL3_FASTSCSI | CNTL3_FASTCLK; + info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.dma.setup = powertecscsi_dma_setup; diff -u --recursive --new-file v2.3.31/linux/drivers/ap1000/ap.c linux/drivers/ap1000/ap.c --- v2.3.31/linux/drivers/ap1000/ap.c Sat May 15 15:05:35 1999 +++ linux/drivers/ap1000/ap.c Sun Dec 12 22:58:00 1999 @@ -53,7 +53,7 @@ MOD_DEC_USE_COUNT; } -static void ap_request(void) +static void ap_request(request_queue_t * q) { struct cap_request creq; unsigned int minor; @@ -160,7 +160,7 @@ #endif end_request(1); request_count--; - ap_request(); + ap_request(NULL); } @@ -271,7 +271,7 @@ return -1; } printk("ap_init: register dev %d\n", MAJOR_NR); - blk_dev[MAJOR_NR].request_fn = &ap_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &ap_request); for (i=0;inext; free_irq(APOPT0_IRQ, NULL); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); } #endif /* MODULE */ diff -u --recursive --new-file v2.3.31/linux/drivers/block/DAC960.c linux/drivers/block/DAC960.c --- v2.3.31/linux/drivers/block/DAC960.c Tue Dec 7 09:32:42 1999 +++ linux/drivers/block/DAC960.c Sun Dec 12 23:02:23 1999 @@ -1026,7 +1026,7 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) { - static void (*RequestFunctions[DAC960_MaxControllers])(void) = + static void (*RequestFunctions[DAC960_MaxControllers])(request_queue_t *) = { DAC960_RequestFunction0, DAC960_RequestFunction1, DAC960_RequestFunction2, DAC960_RequestFunction3, DAC960_RequestFunction4, DAC960_RequestFunction5, @@ -1046,8 +1046,8 @@ /* Initialize the I/O Request Function. */ - blk_dev[MajorNumber].request_fn = - RequestFunctions[Controller->ControllerNumber]; + blk_init_queue(BLK_DEFAULT_QUEUE(MajorNumber), + RequestFunctions[Controller->ControllerNumber]); /* Initialize the Disk Partitions array, Partition Sizes array, Block Sizes array, Max Sectors per Request array, and Max Segments per Request array. @@ -1113,7 +1113,7 @@ /* Remove the I/O Request Function. */ - blk_dev[MajorNumber].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MajorNumber)); /* Remove the Disk Partitions array, Partition Sizes array, Block Sizes array, Max Sectors per Request array, and Max Segments per Request array. @@ -1272,7 +1272,7 @@ boolean WaitForCommand) { IO_Request_T **RequestQueuePointer = - &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].current_request; + &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request; IO_Request_T *Request; DAC960_Command_T *Command; char *RequestBuffer; @@ -1375,7 +1375,7 @@ DAC960_RequestFunction0 is the I/O Request Function for DAC960 Controller 0. */ -static void DAC960_RequestFunction0(void) +static void DAC960_RequestFunction0(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[0]; ProcessorFlags_T ProcessorFlags; @@ -1398,7 +1398,7 @@ DAC960_RequestFunction1 is the I/O Request Function for DAC960 Controller 1. */ -static void DAC960_RequestFunction1(void) +static void DAC960_RequestFunction1(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[1]; ProcessorFlags_T ProcessorFlags; @@ -1421,7 +1421,7 @@ DAC960_RequestFunction2 is the I/O Request Function for DAC960 Controller 2. */ -static void DAC960_RequestFunction2(void) +static void DAC960_RequestFunction2(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[2]; ProcessorFlags_T ProcessorFlags; @@ -1444,7 +1444,7 @@ DAC960_RequestFunction3 is the I/O Request Function for DAC960 Controller 3. */ -static void DAC960_RequestFunction3(void) +static void DAC960_RequestFunction3(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[3]; ProcessorFlags_T ProcessorFlags; @@ -1467,7 +1467,7 @@ DAC960_RequestFunction4 is the I/O Request Function for DAC960 Controller 4. */ -static void DAC960_RequestFunction4(void) +static void DAC960_RequestFunction4(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[4]; ProcessorFlags_T ProcessorFlags; @@ -1490,7 +1490,7 @@ DAC960_RequestFunction5 is the I/O Request Function for DAC960 Controller 5. */ -static void DAC960_RequestFunction5(void) +static void DAC960_RequestFunction5(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[5]; ProcessorFlags_T ProcessorFlags; @@ -1513,7 +1513,7 @@ DAC960_RequestFunction6 is the I/O Request Function for DAC960 Controller 6. */ -static void DAC960_RequestFunction6(void) +static void DAC960_RequestFunction6(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[6]; ProcessorFlags_T ProcessorFlags; @@ -1536,7 +1536,7 @@ DAC960_RequestFunction7 is the I/O Request Function for DAC960 Controller 7. */ -static void DAC960_RequestFunction7(void) +static void DAC960_RequestFunction7(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[7]; ProcessorFlags_T ProcessorFlags; diff -u --recursive --new-file v2.3.31/linux/drivers/block/DAC960.h linux/drivers/block/DAC960.h --- v2.3.31/linux/drivers/block/DAC960.h Tue Nov 23 22:42:20 1999 +++ linux/drivers/block/DAC960.h Sun Dec 12 23:02:23 1999 @@ -2208,14 +2208,14 @@ static void DAC960_FinalizeController(DAC960_Controller_T *); static int DAC960_Finalize(NotifierBlock_T *, unsigned long, void *); -static void DAC960_RequestFunction0(void); -static void DAC960_RequestFunction1(void); -static void DAC960_RequestFunction2(void); -static void DAC960_RequestFunction3(void); -static void DAC960_RequestFunction4(void); -static void DAC960_RequestFunction5(void); -static void DAC960_RequestFunction6(void); -static void DAC960_RequestFunction7(void); +static void DAC960_RequestFunction0(request_queue_t *); +static void DAC960_RequestFunction1(request_queue_t *); +static void DAC960_RequestFunction2(request_queue_t *); +static void DAC960_RequestFunction3(request_queue_t *); +static void DAC960_RequestFunction4(request_queue_t *); +static void DAC960_RequestFunction5(request_queue_t *); +static void DAC960_RequestFunction6(request_queue_t *); +static void DAC960_RequestFunction7(request_queue_t *); static void DAC960_InterruptHandler(int, void *, Registers_T *); static void DAC960_QueueMonitoringCommand(DAC960_Command_T *); static void DAC960_MonitoringTimerFunction(unsigned long); diff -u --recursive --new-file v2.3.31/linux/drivers/block/acsi.c linux/drivers/block/acsi.c --- v2.3.31/linux/drivers/block/acsi.c Mon Aug 9 12:32:28 1999 +++ linux/drivers/block/acsi.c Sun Dec 12 23:00:35 1999 @@ -360,7 +360,7 @@ static void copy_to_acsibuffer( void ); static void copy_from_acsibuffer( void ); static void do_end_requests( void ); -static void do_acsi_request( void ); +static void do_acsi_request( request_queue_t * ); static void redo_acsi_request( void ); static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg ); @@ -938,7 +938,7 @@ * ***********************************************************************/ -static void do_acsi_request( void ) +static void do_acsi_request( request_queue_t * q ) { stdma_lock( acsi_interrupt, NULL ); @@ -1808,7 +1808,7 @@ phys_acsi_buffer = virt_to_phys( acsi_buffer ); STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ acsi_gendisk.next = gendisk_head; gendisk_head = &acsi_gendisk; @@ -1838,7 +1838,7 @@ struct gendisk ** gdp; del_timer( &acsi_timer ); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); atari_stram_free( acsi_buffer ); if (unregister_blkdev( MAJOR_NR, "ad" ) != 0) diff -u --recursive --new-file v2.3.31/linux/drivers/block/amiflop.c linux/drivers/block/amiflop.c --- v2.3.31/linux/drivers/block/amiflop.c Thu Nov 11 20:11:32 1999 +++ linux/drivers/block/amiflop.c Sun Dec 12 23:00:35 1999 @@ -1484,7 +1484,7 @@ goto repeat; } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { redo_fd_request(); } @@ -1869,7 +1869,7 @@ post_write_timer.data = 0; post_write_timer.function = post_write; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = floppy_blocksizes; blk_size[MAJOR_NR] = floppy_sizes; @@ -1911,7 +1911,7 @@ amiga_chip_free(raw_buf); blk_size[MAJOR_NR] = NULL; blksize_size[MAJOR_NR] = NULL; - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR, "fd"); } #endif diff -u --recursive --new-file v2.3.31/linux/drivers/block/ataflop.c linux/drivers/block/ataflop.c --- v2.3.31/linux/drivers/block/ataflop.c Mon Aug 9 12:32:28 1999 +++ linux/drivers/block/ataflop.c Sun Dec 12 23:00:35 1999 @@ -1529,7 +1529,7 @@ } -void do_fd_request(void) +void do_fd_request(request_queue_t * q) { unsigned long flags; @@ -2051,7 +2051,7 @@ blk_size[MAJOR_NR] = floppy_sizes; blksize_size[MAJOR_NR] = floppy_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n", DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E', @@ -2103,7 +2103,7 @@ { unregister_blkdev(MAJOR_NR, "fd"); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); timer_active &= ~(1 << FLOPPY_TIMER); timer_table[FLOPPY_TIMER].fn = 0; atari_stram_free( DMABuffer ); diff -u --recursive --new-file v2.3.31/linux/drivers/block/cpqarray.c linux/drivers/block/cpqarray.c --- v2.3.31/linux/drivers/block/cpqarray.c Tue Dec 7 09:32:42 1999 +++ linux/drivers/block/cpqarray.c Sun Dec 12 23:02:23 1999 @@ -140,14 +140,14 @@ */ #define DO_IDA_REQUEST(x) { do_ida_request(x); } -static void do_ida_request0(void) DO_IDA_REQUEST(0); -static void do_ida_request1(void) DO_IDA_REQUEST(1); -static void do_ida_request2(void) DO_IDA_REQUEST(2); -static void do_ida_request3(void) DO_IDA_REQUEST(3); -static void do_ida_request4(void) DO_IDA_REQUEST(4); -static void do_ida_request5(void) DO_IDA_REQUEST(5); -static void do_ida_request6(void) DO_IDA_REQUEST(6); -static void do_ida_request7(void) DO_IDA_REQUEST(7); +static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0); +static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1); +static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2); +static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3); +static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4); +static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5); +static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6); +static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7); static void start_io(ctlr_info_t *h); @@ -379,7 +379,7 @@ */ void __init cpqarray_init(void) { - void (*request_fns[MAX_CTLR])(void) = { + void (*request_fns[MAX_CTLR])(request_queue_t *) = { do_ida_request0, do_ida_request1, do_ida_request2, do_ida_request3, do_ida_request4, do_ida_request5, @@ -480,7 +480,9 @@ ida_gendisk[i].sizes = ida_sizes + (i*256); /* ida_gendisk[i].nr_real is handled by getgeometry */ - blk_dev[MAJOR_NR+i].request_fn = request_fns[i]; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i), request_fns[i]); + blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0); + blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256); hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256); read_ahead[MAJOR_NR+i] = READ_AHEAD; @@ -894,10 +896,13 @@ cmdlist_t *c; int seg, sect; char *lastdataend; + request_queue_t * q; struct buffer_head *bh; struct request *creq; - creq = blk_dev[MAJOR_NR+ctlr].current_request; + q = &blk_dev[MAJOR_NR+ctlr].request_queue; + + creq = q->current_request; if (creq == NULL || creq->rq_status == RQ_INACTIVE) goto doreq_done; @@ -974,7 +979,7 @@ } else { DBGPX( printk("Done with %p, queueing %p\n", creq, creq->next); ); creq->rq_status = RQ_INACTIVE; - blk_dev[MAJOR_NR+ctlr].current_request = creq->next; + q->current_request = creq->next; wake_up(&wait_for_request); } diff -u --recursive --new-file v2.3.31/linux/drivers/block/floppy.c linux/drivers/block/floppy.c --- v2.3.31/linux/drivers/block/floppy.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/block/floppy.c Sun Dec 12 23:00:34 1999 @@ -2930,7 +2930,7 @@ schedule_bh( (void *)(void *) redo_fd_request); } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { if(usage_count == 0) { printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT); @@ -4130,7 +4130,7 @@ blk_size[MAJOR_NR] = floppy_sizes; blksize_size[MAJOR_NR] = floppy_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT); config_types(); @@ -4159,7 +4159,7 @@ fdc = 0; /* reset fdc in case of unexpected interrupt */ if (floppy_grab_irq_and_dma()){ del_timer(&fd_timeout); - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR,"fd"); del_timer(&fd_timeout); return -EBUSY; @@ -4225,7 +4225,7 @@ schedule(); if (usage_count) floppy_release_irq_and_dma(); - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR,"fd"); } return have_no_fdc; @@ -4447,7 +4447,7 @@ unregister_blkdev(MAJOR_NR, "fd"); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); /* eject disk, if any */ dummy = fd_eject(0); } diff -u --recursive --new-file v2.3.31/linux/drivers/block/hd.c linux/drivers/block/hd.c --- v2.3.31/linux/drivers/block/hd.c Mon Oct 4 15:49:29 1999 +++ linux/drivers/block/hd.c Sun Dec 12 23:00:35 1999 @@ -585,7 +585,7 @@ panic("unknown hd-command"); } -static void do_hd_request (void) +static void do_hd_request (request_queue_t * q) { disable_irq(HD_IRQ); hd_request(); @@ -813,7 +813,7 @@ printk("hd: unable to get major %d for hard disk\n",MAJOR_NR); return -1; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ hd_gendisk.next = gendisk_head; gendisk_head = &hd_gendisk; diff -u --recursive --new-file v2.3.31/linux/drivers/block/icside.c linux/drivers/block/icside.c --- v2.3.31/linux/drivers/block/icside.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/block/icside.c Mon Dec 13 16:26:27 1999 @@ -210,24 +210,11 @@ /* * SG-DMA support. * - * Similar to the BM-DMA, but we use the RiscPCs IOMD - * DMA controllers. There is only one DMA controller - * per card, which means that only one drive can be - * accessed at one time. NOTE! We do not inforce that - * here, but we rely on the main IDE driver spotting - * that both interfaces use the same IRQ, which should - * guarantee this. - * - * We are limited by the drives IOR/IOW pulse time. - * The closest that we can get to the requirements is - * a type C cycle for both mode 1 and mode 2. However, - * this does give a burst of 8MB/s. - * - * This has been tested with a couple of Conner - * Peripherals 1080MB CFS1081A drives, one on each - * interface, which deliver about 2MB/s each. I - * believe that this is limited by the lack of - * on-board drive cache. + * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. + * There is only one DMA controller per card, which means that only + * one drive can be accessed at one time. NOTE! We do not enforce that + * here, but we rely on the main IDE driver spotting that both + * interfaces use the same IRQ, which should guarantee this. */ #define TABLE_SIZE 2048 @@ -286,27 +273,43 @@ } static int -icside_config_drive(ide_drive_t *drive, int mode) +icside_config_if(ide_drive_t *drive, int xfer_mode) { - int speed, err; + int func = ide_dma_off; - if (mode == 2) { - speed = XFER_MW_DMA_2; + switch (xfer_mode) { + case XFER_MW_DMA_2: + /* + * The cycle time is limited to 250ns by the r/w + * pulse width (90ns), however we should still + * have a maximum burst transfer rate of 8MB/s. + */ drive->drive_data = 250; - } else { - speed = XFER_MW_DMA_1; + break; + + case XFER_MW_DMA_1: drive->drive_data = 250; - } + break; - err = ide_config_drive_speed(drive, (byte) speed); + case XFER_MW_DMA_0: + drive->drive_data = 480; + break; - if (err == 0) { - drive->id->dma_mword &= 0x00ff; - drive->id->dma_mword |= 256 << mode; - } else + default: drive->drive_data = 0; + break; + } - return err; + if (drive->drive_data && + ide_config_drive_speed(drive, (byte) xfer_mode) == 0) + func = ide_dma_on; + else + drive->drive_data = 480; + + printk("%s: %s selected (peak %dMB/s)\n", drive->name, + ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); + + return func; } static int @@ -315,34 +318,51 @@ struct hd_driveid *id = drive->id; ide_hwif_t *hwif = HWIF(drive); int autodma = hwif->autodma; + int xfer_mode = XFER_PIO_2; + int func = ide_dma_off_quietly; - if (id && (id->capability & 1) && autodma) { - int dma_mode = 0; - - /* Consult the list of known "bad" drives */ - if (ide_dmaproc(ide_dma_bad_drive, drive)) - return hwif->dmaproc(ide_dma_off, drive); + if (!id || !(id->capability & 1) || !autodma) + goto out; - /* Enable DMA on any drive that has - * UltraDMA (mode 0/1/2) enabled - */ - if (id->field_valid & 4 && id->dma_ultra & 7) - dma_mode = 2; - - /* Enable DMA on any drive that has mode1 - * or mode2 multiword DMA enabled - */ - if (id->field_valid & 2 && id->dma_mword & 6) - dma_mode = id->dma_mword & 4 ? 2 : 1; + /* + * Consult the list of known "bad" drives + */ + if (ide_dmaproc(ide_dma_bad_drive, drive)) { + func = ide_dma_off; + goto out; + } - /* Consult the list of known "good" drives */ - if (ide_dmaproc(ide_dma_good_drive, drive)) - dma_mode = 1; + /* + * Enable DMA on any drive that has multiword DMA + */ + if (id->field_valid & 2) { + if (id->dma_mword & 4) { + xfer_mode = XFER_MW_DMA_2; + func = ide_dma_on; + } else if (id->dma_mword & 2) { + xfer_mode = XFER_MW_DMA_1; + func = ide_dma_on; + } else if (id->dma_mword & 1) { + xfer_mode = XFER_MW_DMA_0; + func = ide_dma_on; + } + goto out; + } - if (dma_mode && icside_config_drive(drive, dma_mode) == 0) - return hwif->dmaproc(ide_dma_on, drive); + /* + * Consult the list of known "good" drives + */ + if (ide_dmaproc(ide_dma_good_drive, drive)) { + if (id->eide_dma_time > 150) + goto out; + xfer_mode = XFER_MW_DMA_1; + func = ide_dma_on; } - return hwif->dmaproc(ide_dma_off_quietly, drive); + +out: + func = icside_config_if(drive, xfer_mode); + + return hwif->dmaproc(func, drive); } static int diff -u --recursive --new-file v2.3.31/linux/drivers/block/ide-cd.c linux/drivers/block/ide-cd.c --- v2.3.31/linux/drivers/block/ide-cd.c Tue Dec 7 09:32:43 1999 +++ linux/drivers/block/ide-cd.c Mon Dec 13 14:08:39 1999 @@ -313,8 +313,7 @@ static -void cdrom_analyze_sense_data (ide_drive_t *drive, - struct atapi_request_sense *reqbuf, +void cdrom_analyze_sense_data (ide_drive_t *drive, struct request_sense *reqbuf, struct packet_command *failed_command) { if (reqbuf->sense_key == NOT_READY || @@ -431,27 +430,21 @@ * In the case of NOT_READY, if SKSV is set the drive can * give us nice ETA readings. */ - if (reqbuf->sense_key == NOT_READY && - (reqbuf->sense_key_specific[0] & 0x80)) { - int progress = (reqbuf->sense_key_specific[1] << 8 | - reqbuf->sense_key_specific[2]) * 100; + if (reqbuf->sense_key == NOT_READY && (reqbuf->sks[0] & 0x80)) { + int progress = (reqbuf->sks[1] << 8 | reqbuf->sks[2]) * 100; printk(" Command is %02d%% complete\n", progress / 0xffff); } if (reqbuf->sense_key == ILLEGAL_REQUEST && - (reqbuf->sense_key_specific[0] & 0x80) != 0) { + (reqbuf->sks[0] & 0x80) != 0) { printk (" Error in %s byte %d", - (reqbuf->sense_key_specific[0] & 0x40) != 0 - ? "command packet" - : "command data", - (reqbuf->sense_key_specific[1] << 8) + - reqbuf->sense_key_specific[2]); - - if ((reqbuf->sense_key_specific[0] & 0x40) != 0) { - printk (" bit %d", - reqbuf->sense_key_specific[0] & 0x07); - } + (reqbuf->sks[0] & 0x40) != 0 ? + "command packet" : "command data", + (reqbuf->sks[1] << 8) + reqbuf->sks[2]); + + if ((reqbuf->sks[0] & 0x40) != 0) + printk (" bit %d", reqbuf->sks[0] & 0x07); printk ("\n"); } @@ -476,39 +469,25 @@ static void cdrom_queue_request_sense (ide_drive_t *drive, struct semaphore *sem, - struct atapi_request_sense *reqbuf, struct packet_command *failed_command) { struct cdrom_info *info = drive->driver_data; struct request *rq; struct packet_command *pc; - int len; - - /* If the request didn't explicitly specify where - to put the sense data, use the statically allocated structure. */ - if (reqbuf == NULL) - reqbuf = &info->sense_data; /* Make up a new request to retrieve sense information. */ - pc = &info->request_sense_pc; - memset (pc, 0, sizeof (*pc)); - - /* The request_sense structure has an odd number of (16-bit) words, - which won't work well with 32-bit transfers. However, we don't care - about the last two bytes, so just truncate the structure down - to an even length. */ - len = sizeof (*reqbuf) / 4; - len *= 4; + memset(pc, 0, sizeof (*pc)); pc->c[0] = GPCMD_REQUEST_SENSE; - pc->c[4] = (unsigned char) len; - pc->buffer = (char *)reqbuf; - pc->buflen = len; - pc->sense_data = (struct atapi_request_sense *)failed_command; - /* stuff the sense request in front of our current request */ + /* just get the first 18 bytes of the sense info, there might not + * be more available */ + pc->c[4] = pc->buflen = 18; + pc->buffer = (char *)&info->sense_data; + pc->sense_data = (struct request_sense *)failed_command; + /* stuff the sense request in front of our current request */ rq = &info->request_sense_request; ide_init_drive_cmd (rq); rq->cmd = REQUEST_SENSE_COMMAND; @@ -526,7 +505,7 @@ struct packet_command *pc = (struct packet_command *) rq->buffer; cdrom_analyze_sense_data (drive, - (struct atapi_request_sense *) (pc->buffer - pc->c[4]), + (struct request_sense *) (pc->buffer - pc->c[4]), (struct packet_command *) pc->sense_data); } if (rq->cmd == READ && !rq->current_nr_sectors) @@ -609,8 +588,7 @@ cdrom_end_request (1, drive); if ((stat & ERR_STAT) != 0) - cdrom_queue_request_sense (drive, sem, - pc->sense_data, pc); + cdrom_queue_request_sense(drive, sem, pc); } else { /* Handle errors from READ requests. */ @@ -649,8 +627,7 @@ /* If we got a CHECK_CONDITION status, queue a request sense command. */ if ((stat & ERR_STAT) != 0) - cdrom_queue_request_sense (drive, - NULL, NULL, NULL); + cdrom_queue_request_sense(drive, NULL, NULL); } } @@ -1200,9 +1177,7 @@ */ /* Forward declarations. */ -static int -cdrom_lockdoor (ide_drive_t *drive, int lockflag, - struct atapi_request_sense *reqbuf); +static int cdrom_lockdoor(ide_drive_t *drive, int lockflag); /* Interrupt routine for packet command completion. */ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive) @@ -1210,8 +1185,11 @@ int ireason, len, stat, thislen; struct request *rq = HWGROUP(drive)->rq; struct packet_command *pc = (struct packet_command *)rq->buffer; + struct cdrom_info *info = drive->driver_data; ide_startstop_t startstop; + pc->sense_data = &info->sense_data; + /* Check for errors. */ if (cdrom_decode_status (&startstop, drive, 0, &stat)) return startstop; @@ -1339,18 +1317,11 @@ } static -int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc) +int cdrom_queue_packet_command(ide_drive_t *drive, struct packet_command *pc) { - struct atapi_request_sense my_reqbuf; int retries = 10; struct request req; - /* If our caller has not provided a place to stick any sense data, - use our own area. */ - if (pc->sense_data == NULL) - pc->sense_data = &my_reqbuf; - pc->sense_data->sense_key = 0; - /* Start of retry loop. */ do { ide_init_drive_cmd (&req); @@ -1365,7 +1336,7 @@ /* The request failed. Retry if it was due to a unit attention status (usually means media was changed). */ - struct atapi_request_sense *reqbuf = pc->sense_data; + struct request_sense *reqbuf = pc->sense_data; if (reqbuf->sense_key == UNIT_ATTENTION) cdrom_saw_media_change (drive); @@ -1386,25 +1357,24 @@ } while (pc->stat != 0 && retries >= 0); /* Return an error if the command failed. */ - if (pc->stat != 0) + if (pc->stat) return -EIO; - else { - /* The command succeeded. If it was anything other than - a request sense, eject, or door lock command, - and we think that the door is presently unlocked, lock it - again. (The door was probably unlocked via an explicit - CDROMEJECT ioctl.) */ - if (CDROM_STATE_FLAGS (drive)->door_locked == 0 && - (pc->c[0] != GPCMD_TEST_UNIT_READY && - pc->c[0] != GPCMD_REQUEST_SENSE && - pc->c[0] != GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL && - pc->c[0] != GPCMD_START_STOP_UNIT && - pc->c[0] != GPCMD_MODE_SENSE_10 && - pc->c[0] != GPCMD_MODE_SELECT_10)) { - (void) cdrom_lockdoor (drive, 1, NULL); - } - return 0; + + /* The command succeeded. If it was anything other than + a request sense, eject, or door lock command, + and we think that the door is presently unlocked, lock it + again. (The door was probably unlocked via an explicit + CDROMEJECT ioctl.) */ + if (CDROM_STATE_FLAGS (drive)->door_locked == 0 && + (pc->c[0] != GPCMD_TEST_UNIT_READY && + pc->c[0] != GPCMD_REQUEST_SENSE && + pc->c[0] != GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL && + pc->c[0] != GPCMD_START_STOP_UNIT && + pc->c[0] != GPCMD_MODE_SENSE_10 && + pc->c[0] != GPCMD_MODE_SELECT_10)) { + (void) cdrom_lockdoor (drive, 1); } + return 0; } /**************************************************************************** @@ -1463,7 +1433,7 @@ * Ioctl handling. * * Routines which queue packet commands take as a final argument a pointer - * to an atapi_request_sense struct. If execution of the command results + * to a request_sense struct. If execution of the command results * in an error with a CHECK CONDITION status, this structure will be filled * with the results of the subsequent request sense command. The pointer * can also be NULL, in which case no sense information is returned. @@ -1512,18 +1482,14 @@ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; } - -static int -cdrom_check_status (ide_drive_t *drive, - struct atapi_request_sense *reqbuf) +static int cdrom_check_status (ide_drive_t *drive) { struct packet_command pc; struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; - memset (&pc, 0, sizeof (pc)); + memset(&pc, 0, sizeof(pc)); - pc.sense_data = reqbuf; pc.c[0] = GPCMD_TEST_UNIT_READY; #if ! STANDARD_ATAPI @@ -1533,39 +1499,35 @@ pc.c[7] = cdi->sanyo_slot % 3; #endif /* not STANDARD_ATAPI */ - return cdrom_queue_packet_command (drive, &pc); + return cdrom_queue_packet_command(drive, &pc); } /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ static int -cdrom_lockdoor (ide_drive_t *drive, int lockflag, - struct atapi_request_sense *reqbuf) +cdrom_lockdoor(ide_drive_t *drive, int lockflag) { - struct atapi_request_sense my_reqbuf; - int stat; + struct request_sense *sense; struct packet_command pc; - - if (reqbuf == NULL) - reqbuf = &my_reqbuf; + int stat; /* If the drive cannot lock the door, just pretend. */ if (CDROM_CONFIG_FLAGS (drive)->no_doorlock) stat = 0; else { - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; - + memset(&pc, 0, sizeof(pc)); pc.c[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; pc.c[4] = (lockflag != 0); stat = cdrom_queue_packet_command (drive, &pc); } + sense = pc.sense_data; + /* If we got an illegal field error, the drive probably cannot lock the door. */ if (stat != 0 && - reqbuf->sense_key == ILLEGAL_REQUEST && - (reqbuf->asc == 0x24 || reqbuf->asc == 0x20)) { + sense->sense_key == ILLEGAL_REQUEST && + (sense->asc == 0x24 || sense->asc == 0x20)) { printk ("%s: door locking not supported\n", drive->name); CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1; @@ -1573,7 +1535,7 @@ } /* no medium, that's alright. */ - if (stat != 0 && reqbuf->sense_key == NOT_READY && reqbuf->asc == 0x3a) + if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a) stat = 0; if (stat == 0) @@ -1585,30 +1547,25 @@ /* Eject the disk if EJECTFLAG is 0. If EJECTFLAG is 1, try to reload the disk. */ -static int -cdrom_eject (ide_drive_t *drive, int ejectflag, - struct atapi_request_sense *reqbuf) +static int cdrom_eject(ide_drive_t *drive, int ejectflag) { struct packet_command pc; - if (CDROM_CONFIG_FLAGS (drive)->no_eject && !ejectflag) + if (CDROM_CONFIG_FLAGS(drive)->no_eject && !ejectflag) return -EDRIVE_CANT_DO_THIS; /* reload fails on some drives, if the tray is locked */ - if (CDROM_STATE_FLAGS (drive)->door_locked && ejectflag) + if (CDROM_STATE_FLAGS(drive)->door_locked && ejectflag) return 0; - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; + memset(&pc, 0, sizeof (pc)); pc.c[0] = GPCMD_START_STOP_UNIT; pc.c[4] = 0x02 + (ejectflag != 0); return cdrom_queue_packet_command (drive, &pc); } -static int -cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity, - struct atapi_request_sense *reqbuf) +static int cdrom_read_capacity(ide_drive_t *drive, unsigned *capacity) { struct { __u32 lba; @@ -1618,30 +1575,25 @@ int stat; struct packet_command pc; - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; + memset(&pc, 0, sizeof (pc)); pc.c[0] = GPCMD_READ_CDVD_CAPACITY; pc.buffer = (char *)&capbuf; - pc.buflen = sizeof (capbuf); + pc.buflen = sizeof(capbuf); - stat = cdrom_queue_packet_command (drive, &pc); + stat = cdrom_queue_packet_command(drive, &pc); if (stat == 0) *capacity = be32_to_cpu(capbuf.lba); return stat; } - -static int -cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag, - int format, char *buf, int buflen, - struct atapi_request_sense *reqbuf) +static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, + int format, char *buf, int buflen) { struct packet_command pc; - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; + memset(&pc, 0, sizeof(pc)); pc.buffer = buf; pc.buflen = buflen; @@ -1650,14 +1602,16 @@ pc.c[7] = (buflen >> 8); pc.c[8] = (buflen & 0xff); pc.c[9] = (format << 6); - if (msf_flag) pc.c[1] = 2; + + if (msf_flag) + pc.c[1] = 2; + return cdrom_queue_packet_command (drive, &pc); } /* Try to read the entire TOC for the disk into our internal buffer. */ -static int -cdrom_read_toc (ide_drive_t *drive, struct atapi_request_sense *reqbuf) +static int cdrom_read_toc (ide_drive_t *drive) { int stat, ntracks, i; struct cdrom_info *info = drive->driver_data; @@ -1682,13 +1636,13 @@ /* Check to see if the existing data is still valid. If it is, just return. */ if (CDROM_STATE_FLAGS (drive)->toc_valid) - (void) cdrom_check_status (drive, NULL); + (void) cdrom_check_status(drive); if (CDROM_STATE_FLAGS (drive)->toc_valid) return 0; /* First read just the header, so we know how long the TOC is. */ stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr, - sizeof (struct atapi_toc_header), reqbuf); + sizeof (struct atapi_toc_header)); if (stat) return stat; #if ! STANDARD_ATAPI @@ -1706,7 +1660,7 @@ stat = cdrom_read_tocentry (drive, toc->hdr.first_track, 1, 0, (char *)&toc->hdr, sizeof (struct atapi_toc_header) + (ntracks + 1) * - sizeof (struct atapi_toc_entry), reqbuf); + sizeof (struct atapi_toc_entry)); if (stat && toc->hdr.first_track > 1) { /* Cds with CDI tracks only don't have any TOC entries, @@ -1723,8 +1677,7 @@ 0, (char *)&toc->hdr, sizeof (struct atapi_toc_header) + (ntracks+1) * - sizeof (struct atapi_toc_entry), - reqbuf); + sizeof (struct atapi_toc_entry)); if (stat) { return stat; } @@ -1769,8 +1722,7 @@ if (toc->hdr.first_track != CDROM_LEADOUT) { /* Read the multisession information. */ stat = cdrom_read_tocentry (drive, 0, 1, 1, - (char *)&ms_tmp, sizeof (ms_tmp), - reqbuf); + (char *)&ms_tmp, sizeof (ms_tmp)); if (stat) return stat; } else { ms_tmp.ent.addr.msf.minute = 0; @@ -1796,7 +1748,7 @@ (long *)&toc->capacity); if (stat) #endif - stat = cdrom_read_capacity (drive, &toc->capacity, reqbuf); + stat = cdrom_read_capacity (drive, &toc->capacity); if (stat) toc->capacity = 0x1fffff; /* for general /dev/cdrom like mounting, one big disc */ @@ -1829,17 +1781,14 @@ } -static int -cdrom_read_subchannel (ide_drive_t *drive, int format, - char *buf, int buflen, - struct atapi_request_sense *reqbuf) +static int cdrom_read_subchannel(ide_drive_t *drive, int format, char *buf, + int buflen) { struct packet_command pc; - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; + memset(&pc, 0, sizeof(pc)); - pc.buffer = buf; + pc.buffer = buf; pc.buflen = buflen; pc.c[0] = GPCMD_READ_SUBCHANNEL; pc.c[1] = 2; /* MSF addressing */ @@ -1847,23 +1796,20 @@ pc.c[3] = format; pc.c[7] = (buflen >> 8); pc.c[8] = (buflen & 0xff); - return cdrom_queue_packet_command (drive, &pc); + return cdrom_queue_packet_command(drive, &pc); } /* ATAPI cdrom drives are free to select the speed you request or any slower rate :-( Requesting too fast a speed will _not_ produce an error. */ -static int -cdrom_select_speed (ide_drive_t *drive, int speed, - struct atapi_request_sense *reqbuf) +static int cdrom_select_speed (ide_drive_t *drive, int speed) { struct packet_command pc; - memset (&pc, 0, sizeof (pc)); - pc.sense_data = reqbuf; + memset(&pc, 0, sizeof(pc)); if (speed == 0) - speed = 0xffff; /* set to max */ + speed = 0xffff; /* set to max */ else - speed *= 177; /* Nx to kbytes/s */ + speed *= 177; /* Nx to kbytes/s */ pc.c[0] = GPCMD_SET_SPEED; /* Read Drive speed in kbytes/second MSB */ @@ -1882,10 +1828,8 @@ } -static -int cdrom_get_toc_entry (ide_drive_t *drive, int track, - struct atapi_toc_entry **ent, - struct atapi_request_sense *reqbuf) +static int cdrom_get_toc_entry(ide_drive_t *drive, int track, + struct atapi_toc_entry **ent) { struct cdrom_info *info = drive->driver_data; struct atapi_toc *toc = info->toc; @@ -1923,7 +1867,13 @@ memcpy(pc.c, cgc->cmd, CDROM_PACKET_SIZE); pc.buffer = cgc->buffer; pc.buflen = cgc->buflen; - return cgc->stat = cdrom_queue_packet_command(drive, &pc); + cgc->stat = cdrom_queue_packet_command(drive, &pc); + + /* There was an error, assign sense. */ + if (cgc->stat) + cgc->sense = pc.sense_data; + + return cgc->stat; } static @@ -1987,7 +1937,7 @@ struct atapi_toc *toc; /* Make sure our saved TOC is valid. */ - stat = cdrom_read_toc (drive, NULL); + stat = cdrom_read_toc(drive); if (stat) return stat; toc = info->toc; @@ -2002,8 +1952,7 @@ struct cdrom_tocentry *tocentry = (struct cdrom_tocentry*) arg; struct atapi_toc_entry *toce; - stat = cdrom_get_toc_entry (drive, tocentry->cdte_track, &toce, - NULL); + stat = cdrom_get_toc_entry (drive, tocentry->cdte_track, &toce); if (stat) return stat; tocentry->cdte_ctrl = toce->control; @@ -2040,21 +1989,20 @@ int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; - struct atapi_request_sense rq; if (position) { - int stat = cdrom_lockdoor (drive, 0, &rq); + int stat = cdrom_lockdoor (drive, 0); if (stat) return stat; } - return cdrom_eject (drive, !position, NULL); + return cdrom_eject(drive, !position); } static int ide_cdrom_lock_door (struct cdrom_device_info *cdi, int lock) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; - return cdrom_lockdoor (drive, lock, NULL); + return cdrom_lockdoor (drive, lock); } static @@ -2062,14 +2010,13 @@ { int stat, attempts = 3; ide_drive_t *drive = (ide_drive_t*) cdi->handle; - struct atapi_request_sense reqbuf; struct cdrom_generic_command cgc; struct { char pad[8]; struct atapi_capabilities_page cap; } buf; - stat=cdrom_select_speed (drive, speed, &reqbuf); - if (stat<0) + + if ((stat = cdrom_select_speed (drive, speed)) < 0) return stat; init_cdrom_command(&cgc, &buf, sizeof(buf)); @@ -2100,19 +2047,19 @@ int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; + struct cdrom_info *info = drive->driver_data; if (slot_nr == CDSL_CURRENT) { - - struct atapi_request_sense sense; - int stat = cdrom_check_status (drive, &sense); - if (stat == 0 || sense.sense_key == UNIT_ATTENTION) + struct request_sense *sense = &info->sense_data; + int stat = cdrom_check_status(drive); + if (stat == 0 || sense->sense_key == UNIT_ATTENTION) return CDS_DISC_OK; - if (sense.sense_key == NOT_READY && sense.asc == 0x04 && - sense.ascq == 0x04) + if (sense->sense_key == NOT_READY && sense->asc == 0x04 && + sense->ascq == 0x04) return CDS_DISC_OK; - if (sense.sense_key == NOT_READY) { + if (sense->sense_key == NOT_READY) { /* ATAPI doesn't have anything that can help us decide whether the drive is really emtpy or the tray is just open. irk. */ @@ -2148,10 +2095,9 @@ char mcnbuf[24]; ide_drive_t *drive = (ide_drive_t*) cdi->handle; - stat = cdrom_read_subchannel (drive, 2, /* get MCN */ - mcnbuf, sizeof (mcnbuf), - NULL); - if (stat) return stat; +/* get MCN */ + if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof (mcnbuf)))) + return stat; memcpy (mcn_info->medium_catalog_number, mcnbuf+9, sizeof (mcn_info->medium_catalog_number)-1); @@ -2174,7 +2120,7 @@ ide_drive_t *drive = (ide_drive_t*) cdi->handle; if (slot_nr == CDSL_CURRENT) { - (void) cdrom_check_status (drive, NULL); + (void) cdrom_check_status(drive); CDROM_STATE_FLAGS (drive)->media_changed = 0; return CDROM_STATE_FLAGS (drive)->media_changed; } else { diff -u --recursive --new-file v2.3.31/linux/drivers/block/ide-cd.h linux/drivers/block/ide-cd.h --- v2.3.31/linux/drivers/block/ide-cd.h Fri Oct 22 13:21:47 1999 +++ linux/drivers/block/ide-cd.h Tue Dec 14 00:55:05 1999 @@ -7,6 +7,7 @@ * Copyright (C) 1998, 1999 Jens Axboe */ +#include #include /* Turn this on to have the driver print out the meanings of the @@ -95,47 +96,14 @@ __u8 reserved : 4; byte current_speed; /* Current speed of the drive */ }; -#define CDROM_STATE_FLAGS(drive) (&(((struct cdrom_info *)(drive->driver_data))->state_flags)) - -struct atapi_request_sense { -#if defined(__BIG_ENDIAN_BITFIELD) - unsigned char valid : 1; - unsigned char error_code : 7; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - unsigned char error_code : 7; - unsigned char valid : 1; -#else -#error "Please fix " -#endif - byte reserved1; -#if defined(__BIG_ENDIAN_BITFIELD) - unsigned char reserved3 : 2; - unsigned char ili : 1; - unsigned char reserved2 : 1; - unsigned char sense_key : 4; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - unsigned char sense_key : 4; - unsigned char reserved2 : 1; - unsigned char ili : 1; - unsigned char reserved3 : 2; -#else -#error "Please fix " -#endif - byte info[4]; - byte sense_len; - byte command_info[4]; - byte asc; - byte ascq; - byte fru; - byte sense_key_specific[3]; -}; +#define CDROM_STATE_FLAGS(drive) (&(((struct cdrom_info *)(drive->driver_data))->state_flags)) struct packet_command { char *buffer; int buflen; int stat; - struct atapi_request_sense *sense_data; + struct request_sense *sense_data; unsigned char c[12]; }; @@ -502,7 +470,7 @@ /* The result of the last successful request sense command on this device. */ - struct atapi_request_sense sense_data; + struct request_sense sense_data; struct request request_sense_request; struct packet_command request_sense_pc; diff -u --recursive --new-file v2.3.31/linux/drivers/block/ide-disk.c linux/drivers/block/ide-disk.c --- v2.3.31/linux/drivers/block/ide-disk.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/block/ide-disk.c Sun Dec 12 22:55:54 1999 @@ -242,7 +242,10 @@ rq->sector += nsect; #endif if ((rq->nr_sectors -= nsect) <= 0) + { + spin_unlock_irqrestore(&io_request_lock, flags); break; + } if ((rq->current_nr_sectors -= nsect) == 0) { if ((rq->bh = rq->bh->b_reqnext) != NULL) { rq->current_nr_sectors = rq->bh->b_size>>9; diff -u --recursive --new-file v2.3.31/linux/drivers/block/ide-probe.c linux/drivers/block/ide-probe.c --- v2.3.31/linux/drivers/block/ide-probe.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/block/ide-probe.c Sun Dec 12 22:58:00 1999 @@ -56,7 +56,8 @@ ide_input_data(drive, id, SECTOR_WORDS); /* read 512 bytes of id info */ ide__sti(); /* local CPU only */ ide_fix_driveid(id); - + if (!drive->forced_lun) + drive->last_lun = id->word126 & 0x7; #if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA) /* * EATA SCSI controllers do a hardware ATA emulation: @@ -703,7 +704,8 @@ static int hwif_init (ide_hwif_t *hwif) { - void (*rfn)(void); + ide_drive_t *drive; + void (*rfn)(request_queue_t *); if (!hwif->present) return 0; @@ -785,10 +787,23 @@ init_gendisk(hwif); blk_dev[hwif->major].data = hwif; - blk_dev[hwif->major].request_fn = rfn; blk_dev[hwif->major].queue = ide_get_queue; read_ahead[hwif->major] = 8; /* (4kB) */ hwif->present = 1; /* success */ + + /* + * FIXME(eric) - This needs to be tested. I *think* that this + * is correct. Also, I believe that there is no longer any + * reason to have multiple functions (do_ide[0-7]_request) + * functions - the queuedata field could be used to indicate + * the correct hardware group - either this, or we could add + * a new field to request_queue_t to hold this information. + */ + drive = &hwif->drives[0]; + blk_init_queue(&drive->queue, rfn); + + drive = &hwif->drives[1]; + blk_init_queue(&drive->queue, rfn); #if (DEBUG_SPINLOCK > 0) { diff -u --recursive --new-file v2.3.31/linux/drivers/block/ide.c linux/drivers/block/ide.c --- v2.3.31/linux/drivers/block/ide.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/block/ide.c Sun Dec 12 22:58:00 1999 @@ -493,8 +493,8 @@ if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) { add_blkdev_randomness(MAJOR(rq->rq_dev)); - hwgroup->drive->queue = rq->next; - blk_dev[MAJOR(rq->rq_dev)].current_request = NULL; + hwgroup->drive->queue.current_request = rq->next; + blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL; hwgroup->rq = NULL; end_that_request_last(rq); } @@ -755,8 +755,8 @@ } } spin_lock_irqsave(&io_request_lock, flags); - drive->queue = rq->next; - blk_dev[MAJOR(rq->rq_dev)].current_request = NULL; + drive->queue.current_request = rq->next; + blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL; HWGROUP(drive)->rq = NULL; rq->rq_status = RQ_INACTIVE; spin_unlock_irqrestore(&io_request_lock, flags); @@ -1059,7 +1059,7 @@ { ide_startstop_t startstop; unsigned long block, blockend; - struct request *rq = drive->queue; + struct request *rq = drive->queue.current_request; unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS; ide_hwif_t *hwif = HWIF(drive); @@ -1142,13 +1142,13 @@ best = NULL; drive = hwgroup->drive; do { - if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) { + if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) { if (!best || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep))) || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive)))) { struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major]; - if (bdev->current_request != &bdev->plug) + if( !bdev->request_queue.plugged ) best = drive; } } @@ -1228,8 +1228,8 @@ drive = hwgroup->drive; do { bdev = &blk_dev[HWIF(drive)->major]; - if (bdev->current_request != &bdev->plug) /* FIXME: this will do for now */ - bdev->current_request = NULL; /* (broken since patch-2.1.15) */ + if( !bdev->request_queue.plugged ) + bdev->request_queue.current_request = NULL; /* (broken since patch-2.1.15) */ if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep))) sleep = drive->sleep; } while ((drive = drive->next) != hwgroup->drive); @@ -1267,9 +1267,9 @@ drive->service_start = jiffies; bdev = &blk_dev[hwif->major]; - if (bdev->current_request == &bdev->plug) /* FIXME: paranoia */ + if( bdev->request_queue.plugged ) /* FIXME: paranoia */ printk("%s: Huh? nuking plugged queue\n", drive->name); - bdev->current_request = hwgroup->rq = drive->queue; + bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request; spin_unlock(&io_request_lock); if (!hwif->serialized) /* play it safe with buggy hardware */ ide__sti(); @@ -1283,76 +1283,76 @@ /* * ide_get_queue() returns the queue which corresponds to a given device. */ -struct request **ide_get_queue (kdev_t dev) +request_queue_t *ide_get_queue (kdev_t dev) { ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data; return &hwif->drives[DEVICE_NR(dev) & 1].queue; } -void do_ide0_request (void) +void do_ide0_request (request_queue_t *q) { ide_do_request (ide_hwifs[0].hwgroup); } #if MAX_HWIFS > 1 -void do_ide1_request (void) +void do_ide1_request (request_queue_t *q) { ide_do_request (ide_hwifs[1].hwgroup); } #endif /* MAX_HWIFS > 1 */ #if MAX_HWIFS > 2 -void do_ide2_request (void) +void do_ide2_request (request_queue_t *q) { ide_do_request (ide_hwifs[2].hwgroup); } #endif /* MAX_HWIFS > 2 */ #if MAX_HWIFS > 3 -void do_ide3_request (void) +void do_ide3_request (request_queue_t *q) { ide_do_request (ide_hwifs[3].hwgroup); } #endif /* MAX_HWIFS > 3 */ #if MAX_HWIFS > 4 -void do_ide4_request (void) +void do_ide4_request (request_queue_t *q) { ide_do_request (ide_hwifs[4].hwgroup); } #endif /* MAX_HWIFS > 4 */ #if MAX_HWIFS > 5 -void do_ide5_request (void) +void do_ide5_request (request_queue_t *q) { ide_do_request (ide_hwifs[5].hwgroup); } #endif /* MAX_HWIFS > 5 */ #if MAX_HWIFS > 6 -void do_ide6_request (void) +void do_ide6_request (request_queue_t *q) { ide_do_request (ide_hwifs[6].hwgroup); } #endif /* MAX_HWIFS > 6 */ #if MAX_HWIFS > 7 -void do_ide7_request (void) +void do_ide7_request (request_queue_t *q) { ide_do_request (ide_hwifs[7].hwgroup); } #endif /* MAX_HWIFS > 7 */ #if MAX_HWIFS > 8 -void do_ide8_request (void) +void do_ide8_request (request_queue_t *q) { ide_do_request (ide_hwifs[8].hwgroup); } #endif /* MAX_HWIFS > 8 */ #if MAX_HWIFS > 9 -void do_ide9_request (void) +void do_ide9_request (request_queue_t *q) { ide_do_request (ide_hwifs[9].hwgroup); } @@ -1576,10 +1576,12 @@ hwgroup->handler = NULL; del_timer(&hwgroup->timer); spin_unlock(&io_request_lock); + if (drive->unmask) ide__sti(); /* local CPU only */ startstop = handler(drive); /* service this interrupt, may set handler for next interrupt */ spin_lock_irq(&io_request_lock); + /* * Note that handler() may have set things up for another * interrupt to occur soon, but it cannot happen until @@ -1683,10 +1685,10 @@ if (action == ide_wait) rq->sem = &sem; spin_lock_irqsave(&io_request_lock, flags); - cur_rq = drive->queue; + cur_rq = drive->queue.current_request; if (cur_rq == NULL || action == ide_preempt) { rq->next = cur_rq; - drive->queue = rq; + drive->queue.current_request = rq; if (action == ide_preempt) hwgroup->rq = NULL; } else { @@ -1993,7 +1995,7 @@ kfree(blksize_size[hwif->major]); kfree(max_sectors[hwif->major]); kfree(max_readahead[hwif->major]); - blk_dev[hwif->major].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(hwif->major)); blk_dev[hwif->major].data = NULL; blk_dev[hwif->major].queue = NULL; blksize_size[hwif->major] = NULL; @@ -2684,6 +2686,7 @@ * * "hdx=swapdata" : when the drive is a disk, byte swap all data * "hdx=bswap" : same as above.......... + * "hdxlun=xx" : set the drive last logical unit. * "hdx=flash" : allows for more than one ata_flash disk to be * registered. In most cases, only one device * will be present. @@ -2787,6 +2790,19 @@ drive = &hwif->drives[unit]; if (strncmp(s + 4, "ide-", 4) == 0) { strncpy(drive->driver_req, s + 4, 9); + goto done; + } + /* + * Look for last lun option: "hdxlun=" + */ + if (s[3] == 'l' && s[4] == 'u' && s[5] == 'n') { + if (match_parm(&s[6], NULL, vals, 1) != 1) + goto bad_option; + if (vals[0] >= 0 && vals[0] <= 7) { + drive->last_lun = vals[0]; + drive->forced_lun = 1; + } else + printk(" -- BAD LAST LUN! Expected value from 0 to 7"); goto done; } switch (match_parm(&s[3], hd_words, vals, 3)) { diff -u --recursive --new-file v2.3.31/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- v2.3.31/linux/drivers/block/ll_rw_blk.c Thu Nov 11 20:11:33 1999 +++ linux/drivers/block/ll_rw_blk.c Sun Dec 12 22:55:54 1999 @@ -142,14 +142,49 @@ * NOTE: the device-specific queue() functions * have to be atomic! */ -static inline struct request **get_queue(kdev_t dev) +static inline request_queue_t *get_queue(kdev_t dev) { int major = MAJOR(dev); struct blk_dev_struct *bdev = blk_dev + major; if (bdev->queue) return bdev->queue(dev); - return &blk_dev[major].current_request; + return &blk_dev[major].request_queue; +} + +void blk_cleanup_queue(request_queue_t * q) +{ + memset(q, 0, sizeof(*q)); +} + +void blk_queue_headactive(request_queue_t * q, int active) +{ + q->head_active = active; +} + +void blk_queue_pluggable(request_queue_t * q, int use_plug) +{ + q->use_plug = use_plug; +} + +void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) +{ + q->request_fn = rfn; + q->current_request = NULL; + q->merge_fn = NULL; + q->merge_requests_fn = NULL; + q->plug_tq.sync = 0; + q->plug_tq.routine = &unplug_device; + q->plug_tq.data = q; + q->plugged = 0; + /* + * These booleans describe the queue properties. We set the + * default (and most common) values here. Other drivers can + * use the appropriate functions to alter the queue properties. + * as appropriate. + */ + q->use_plug = 1; + q->head_active = 1; } /* @@ -157,22 +192,18 @@ */ void unplug_device(void * data) { - struct blk_dev_struct * dev = (struct blk_dev_struct *) data; - int queue_new_request=0; + request_queue_t * q = (request_queue_t *) data; unsigned long flags; spin_lock_irqsave(&io_request_lock,flags); - if (dev->current_request == &dev->plug) { - struct request * next = dev->plug.next; - dev->current_request = next; - if (next || dev->queue) { - dev->plug.next = NULL; - queue_new_request = 1; + if( q->plugged ) + { + q->plugged = 0; + if( q->current_request != NULL ) + { + (q->request_fn)(q); } } - if (queue_new_request) - (dev->request_fn)(); - spin_unlock_irqrestore(&io_request_lock,flags); } @@ -184,12 +215,13 @@ * This is called with interrupts off and no requests on the queue. * (and with the request spinlock aquired) */ -static inline void plug_device(struct blk_dev_struct * dev) +static inline void plug_device(request_queue_t * q) { - if (dev->current_request) + if (q->current_request) return; - dev->current_request = &dev->plug; - queue_task(&dev->plug_tq, &tq_disk); + + q->plugged = 1; + queue_task(&q->plug_tq, &tq_disk); } /* @@ -221,6 +253,7 @@ prev_found = req; req->rq_status = RQ_ACTIVE; req->rq_dev = dev; + req->special = NULL; return req; } @@ -335,12 +368,11 @@ * which is important for drive_stat_acct() above. */ -void add_request(struct blk_dev_struct * dev, struct request * req) +static void add_request(request_queue_t * q, struct request * req) { int major = MAJOR(req->rq_dev); - struct request * tmp, **current_request; + struct request * tmp; unsigned long flags; - int queue_new_request = 0; drive_stat_acct(req, req->nr_sectors, 1); req->next = NULL; @@ -349,12 +381,9 @@ * We use the goto to reduce locking complexity */ spin_lock_irqsave(&io_request_lock,flags); - current_request = get_queue(req->rq_dev); - if (!(tmp = *current_request)) { - *current_request = req; - if (dev->current_request != &dev->plug) - queue_new_request = 1; + if (!(tmp = q->current_request)) { + q->current_request = req; goto out; } for ( ; tmp->next ; tmp = tmp->next) { @@ -372,26 +401,34 @@ req->next = tmp->next; tmp->next = req; -/* for SCSI devices, call request_fn unconditionally */ - if (scsi_blk_major(major)) - queue_new_request = 1; - if (major >= COMPAQ_SMART2_MAJOR+0 && - major <= COMPAQ_SMART2_MAJOR+7) - queue_new_request = 1; + /* + * FIXME(eric) I don't understand why there is a need for this + * special case code. It clearly doesn't fit any more with + * the new queueing architecture, and it got added in 2.3.10. + * I am leaving this in here until I hear back from the COMPAQ + * people. + */ + if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7) + { + (q->request_fn)(q); + } + if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7) - queue_new_request = 1; + { + (q->request_fn)(q); + } + out: - if (queue_new_request) - (dev->request_fn)(); spin_unlock_irqrestore(&io_request_lock,flags); } /* * Has to be called with the request spinlock aquired */ -static inline void attempt_merge (struct request *req, - int max_sectors, - int max_segments) +static inline void attempt_merge (request_queue_t * q, + struct request *req, + int max_sectors, + int max_segments) { struct request *next = req->next; int total_segments; @@ -407,16 +444,37 @@ total_segments--; if (total_segments > max_segments) return; + + if( q->merge_requests_fn != NULL ) + { + /* + * If we are not allowed to merge these requests, then + * return. If we are allowed to merge, then the count + * will have been updated to the appropriate number, + * and we shouldn't do it here too. + */ + if( !(q->merge_requests_fn)(q, req, next) ) + { + return; + } + } + else + { + req->nr_segments = total_segments; + } + req->bhtail->b_reqnext = next->bh; req->bhtail = next->bhtail; req->nr_sectors += next->nr_sectors; - req->nr_segments = total_segments; next->rq_status = RQ_INACTIVE; req->next = next->next; wake_up (&wait_for_request); } -void make_request(int major,int rw, struct buffer_head * bh) +static void __make_request(request_queue_t * q, + int major, + int rw, + struct buffer_head * bh) { unsigned int sector, count; struct request * req; @@ -519,13 +577,20 @@ * not to schedule or do something nonatomic */ spin_lock_irqsave(&io_request_lock,flags); - req = *get_queue(bh->b_rdev); + req = q->current_request; if (!req) { /* MD and loop can't handle plugging without deadlocking */ if (major != MD_MAJOR && major != LOOP_MAJOR && - major != DDV_MAJOR && major != NBD_MAJOR) - plug_device(blk_dev + major); /* is atomic */ + major != DDV_MAJOR && major != NBD_MAJOR + && q->use_plug) + plug_device(q); /* is atomic */ } else switch (major) { + /* + * FIXME(eric) - this entire switch statement is going away + * soon, and we will instead key off of q->head_active to decide + * whether the top request in the queue is active on the device + * or not. + */ case IDE0_MAJOR: /* same as HD_MAJOR */ case IDE1_MAJOR: case FLOPPY_MAJOR: @@ -548,7 +613,7 @@ * All other drivers need to jump over the first entry, as that * entry may be busy being processed and we thus can't change it. */ - if (req == blk_dev[major].current_request) + if (req == q->current_request) req = req->next; if (!req) break; @@ -592,25 +657,71 @@ continue; /* Can we add it to the end of this request? */ if (req->sector + req->nr_sectors == sector) { - if (req->bhtail->b_data + req->bhtail->b_size - != bh->b_data) { - if (req->nr_segments < max_segments) - req->nr_segments++; - else continue; + /* + * The merge_fn is a more advanced way + * of accomplishing the same task. Instead + * of applying a fixed limit of some sort + * we instead define a function which can + * determine whether or not it is safe to + * merge the request or not. + */ + if( q->merge_fn == NULL ) + { + if (req->bhtail->b_data + req->bhtail->b_size + != bh->b_data) { + if (req->nr_segments < max_segments) + req->nr_segments++; + else continue; + } + } + else + { + /* + * See if this queue has rules that + * may suggest that we shouldn't merge + * this + */ + if( !(q->merge_fn)(q, req, bh) ) + { + continue; + } } req->bhtail->b_reqnext = bh; req->bhtail = bh; req->nr_sectors += count; drive_stat_acct(req, count, 0); /* Can we now merge this req with the next? */ - attempt_merge(req, max_sectors, max_segments); + attempt_merge(q, req, max_sectors, max_segments); /* or to the beginning? */ } else if (req->sector - count == sector) { - if (bh->b_data + bh->b_size - != req->bh->b_data) { - if (req->nr_segments < max_segments) - req->nr_segments++; - else continue; + /* + * The merge_fn is a more advanced way + * of accomplishing the same task. Instead + * of applying a fixed limit of some sort + * we instead define a function which can + * determine whether or not it is safe to + * merge the request or not. + */ + if( q->merge_fn == NULL ) + { + if (bh->b_data + bh->b_size + != req->bh->b_data) { + if (req->nr_segments < max_segments) + req->nr_segments++; + else continue; + } + } + else + { + /* + * See if this queue has rules that + * may suggest that we shouldn't merge + * this + */ + if( !(q->merge_fn)(q, req, bh) ) + { + continue; + } } bh->b_reqnext = req->bh; req->bh = bh; @@ -645,20 +756,37 @@ req->errors = 0; req->sector = sector; req->nr_sectors = count; - req->nr_segments = 1; req->current_nr_sectors = count; + req->nr_segments = 1; /* Always 1 for a new request. */ req->buffer = bh->b_data; req->sem = NULL; req->bh = bh; req->bhtail = bh; req->next = NULL; - add_request(major+blk_dev,req); + add_request(q, req); return; end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); } +void make_request(int major,int rw, struct buffer_head * bh) +{ + request_queue_t * q; + unsigned long flags; + + q = get_queue(bh->b_dev); + + __make_request(q, major, rw, bh); + + spin_lock_irqsave(&io_request_lock,flags); + if( !q->plugged ) + (q->request_fn)(q); + spin_unlock_irqrestore(&io_request_lock,flags); +} + + + /* This function can be used to request a number of buffers from a block device. Currently the only restriction is that all buffers must belong to the same device */ @@ -667,13 +795,13 @@ { unsigned int major; int correct_size; - struct blk_dev_struct * dev; + request_queue_t * q; + unsigned long flags; int i; - dev = NULL; - if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV) - dev = blk_dev + major; - if (!dev || !dev->request_fn) { + + major = MAJOR(bh[0]->b_dev); + if (!(q = get_queue(bh[0]->b_dev))) { printk(KERN_ERR "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n", kdevname(bh[0]->b_dev), bh[0]->b_blocknr); @@ -726,8 +854,15 @@ continue; } #endif - make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]); + __make_request(q, MAJOR(bh[i]->b_rdev), rw, bh[i]); + } + + spin_lock_irqsave(&io_request_lock,flags); + if( !q->plugged ) + { + (q->request_fn)(q); } + spin_unlock_irqrestore(&io_request_lock,flags); return; sorry: @@ -801,15 +936,8 @@ struct blk_dev_struct *dev; for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) { - dev->request_fn = NULL; dev->queue = NULL; - dev->current_request = NULL; - dev->plug.rq_status = RQ_INACTIVE; - dev->plug.cmd = -1; - dev->plug.next = NULL; - dev->plug_tq.sync = 0; - dev->plug_tq.routine = &unplug_device; - dev->plug_tq.data = dev; + blk_init_queue(&dev->request_queue, NULL); } req = all_requests + NR_REQUEST; @@ -924,3 +1052,6 @@ EXPORT_SYMBOL(io_request_lock); EXPORT_SYMBOL(end_that_request_first); EXPORT_SYMBOL(end_that_request_last); +EXPORT_SYMBOL(blk_init_queue); +EXPORT_SYMBOL(blk_cleanup_queue); +EXPORT_SYMBOL(blk_queue_headactive); diff -u --recursive --new-file v2.3.31/linux/drivers/block/loop.c linux/drivers/block/loop.c --- v2.3.31/linux/drivers/block/loop.c Tue Dec 7 09:32:43 1999 +++ linux/drivers/block/loop.c Sun Dec 12 23:00:35 1999 @@ -164,7 +164,7 @@ loop_sizes[lo->lo_number] = size; } -static void do_lo_request(void) +static void do_lo_request(request_queue_t * q) { int real_block, block, offset, len, blksize, size; char *dest_addr; @@ -754,7 +754,7 @@ return -ENOMEM; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); for (i=0; i < max_loop; i++) { memset(&loop_dev[i], 0, sizeof(struct loop_device)); loop_dev[i].lo_number = i; diff -u --recursive --new-file v2.3.31/linux/drivers/block/md.c linux/drivers/block/md.c --- v2.3.31/linux/drivers/block/md.c Tue Dec 7 09:32:43 1999 +++ linux/drivers/block/md.c Sun Dec 12 23:00:35 1999 @@ -761,7 +761,7 @@ } } -static void do_md_request (void) +static void do_md_request (request_queue_t * q) { printk ("Got md request, not good..."); return; @@ -1274,8 +1274,7 @@ return (-1); } - blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST; - blk_dev[MD_MAJOR].current_request=NULL; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MD_MAJOR]=INT_MAX; memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev)); md_gendisk.next=gendisk_head; diff -u --recursive --new-file v2.3.31/linux/drivers/block/nbd.c linux/drivers/block/nbd.c --- v2.3.31/linux/drivers/block/nbd.c Thu Nov 11 20:11:33 1999 +++ linux/drivers/block/nbd.c Sun Dec 12 23:00:35 1999 @@ -290,7 +290,7 @@ #undef FAIL #define FAIL( s ) { printk( KERN_ERR "NBD, minor %d: " s "\n", dev ); goto error_out; } -static void do_nbd_request(void) +static void do_nbd_request(request_queue_t * q) { struct request *req; int dev; @@ -488,7 +488,7 @@ #endif blksize_size[MAJOR_NR] = nbd_blksizes; blk_size[MAJOR_NR] = nbd_sizes; - blk_dev[MAJOR_NR].request_fn = do_nbd_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request); for (i = 0; i < MAX_NBD; i++) { nbd_dev[i].refcnt = 0; nbd_dev[i].file = NULL; diff -u --recursive --new-file v2.3.31/linux/drivers/block/paride/pcd.c linux/drivers/block/paride/pcd.c --- v2.3.31/linux/drivers/block/paride/pcd.c Fri Oct 22 13:21:47 1999 +++ linux/drivers/block/paride/pcd.c Sun Dec 12 23:00:35 1999 @@ -220,7 +220,7 @@ static int pcd_detect(void); static void pcd_probe_capabilities(void); static void do_pcd_read_drq(void); -static void do_pcd_request(void); +static void do_pcd_request(request_queue_t * q); static void do_pcd_read(void); static int pcd_blocksizes[PCD_UNITS]; @@ -343,7 +343,7 @@ for (unit=0;unitsector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects); end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(q); } } /* main strategy routine */ @@ -598,11 +598,11 @@ if (ps2esdi_out_cmd_blk(cmd_blk)) { printk("%s: Controller failed\n", DEVICE_NAME); if ((++CURRENT->errors) < MAX_RETRIES) - return do_ps2esdi_request(); + return do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } } /* check for failure to put out the command block */ @@ -901,11 +901,11 @@ outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN); outb(CTRL_ENABLE_INTR, ESDI_CONTROL); if ((++CURRENT->errors) < MAX_RETRIES) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } break; } @@ -947,11 +947,11 @@ outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN); outb(CTRL_ENABLE_INTR, ESDI_CONTROL); if ((++CURRENT->errors) < MAX_RETRIES) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } break; @@ -961,7 +961,7 @@ outb(CTRL_ENABLE_INTR, ESDI_CONTROL); end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); break; case INT_CMD_FORMAT: @@ -993,11 +993,11 @@ if (CURRENT->nr_sectors -= CURRENT->current_nr_sectors) { CURRENT->buffer += CURRENT->current_nr_sectors * SECT_SIZE; CURRENT->sector += CURRENT->current_nr_sectors; - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } else { end_request(SUCCES); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } } diff -u --recursive --new-file v2.3.31/linux/drivers/block/rd.c linux/drivers/block/rd.c --- v2.3.31/linux/drivers/block/rd.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/block/rd.c Sun Dec 12 23:02:23 1999 @@ -181,7 +181,7 @@ * allocated size, we must get rid of it... * */ -static void rd_request(void) +static void rd_request(request_queue_t * q) { unsigned int minor; unsigned long offset, len; @@ -350,7 +350,7 @@ invalidate_buffers(MKDEV(MAJOR_NR, i)); unregister_blkdev( MAJOR_NR, "ramdisk" ); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); } /* This is the registration and initialization section of the RAM disk driver */ @@ -371,7 +371,7 @@ return -EIO; } - blk_dev[MAJOR_NR].request_fn = &rd_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &rd_request); for (i = 0; i < NUM_RAMDISKS; i++) { /* rd_size is given in kB */ diff -u --recursive --new-file v2.3.31/linux/drivers/block/swim3.c linux/drivers/block/swim3.c --- v2.3.31/linux/drivers/block/swim3.c Tue May 11 23:36:27 1999 +++ linux/drivers/block/swim3.c Sun Dec 12 23:02:23 1999 @@ -219,7 +219,7 @@ static void swim3_select(struct floppy_state *fs, int sel); static void swim3_action(struct floppy_state *fs, int action); static int swim3_readbit(struct floppy_state *fs, int bit); -static void do_fd_request(void); +static void do_fd_request(request_queue_t * q); static void start_request(struct floppy_state *fs); static void set_timeout(struct floppy_state *fs, int nticks, void (*proc)(unsigned long)); @@ -290,7 +290,7 @@ return (stat & DATA) == 0; } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { int i; for(i=0;i 2.1.0 Werner Zimmermann, Nov 29, 97 + + November 1999 -- Make kernel-parameter implementation work with 2.3.x + Removed init_module & cleanup_module in favor of + module_init & module_exit. + Torben Mathiasen */ #include @@ -351,11 +356,10 @@ static int aztGetToc(int multi); /* Kernel Interface Functions */ -void aztcd_setup(char *str, int *ints); static int check_aztcd_media_change(kdev_t full_dev); static int aztcd_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg); static void azt_transfer(void); -static void do_aztcd_request(void); +static void do_aztcd_request(request_queue_t *); static void azt_invalidate_buffers(void); int aztcd_open(struct inode *ip, struct file *fp); @@ -365,11 +369,8 @@ static int aztcd_release(struct inode * inode, struct file * file); #endif -int aztcd_init(void); -#ifdef MODULE - int init_module(void); - void cleanup_module(void); -#endif MODULE +int aztcd_init(void); + static struct file_operations azt_fops = { NULL, /* lseek - default */ block_read, /* read - general block-dev read */ @@ -1084,17 +1085,25 @@ Kernel Interface Functions ########################################################################## */ -#ifdef AZT_KERNEL_PRIOR_2_1 -void aztcd_setup(char *str, int *ints) -#else -void __init aztcd_setup(char *str, int *ints) -#endif -{ if (ints[0] > 0) - azt_port = ints[1]; - if (ints[0] > 1) - azt_cont = ints[2]; + +#ifndef MODULE +static int __init aztcd_setup(char *str) +{ + int ints[4]; + + (void)get_options(str, ARRAY_SIZE(ints), ints); + + if (ints[0] > 0) + azt_port = ints[1]; + if (ints[1] > 1) + azt_cont = ints[2]; + return 1; } +__setup("aztcd=", aztcd_setup); + +#endif /* !MODULE */ + /* * Checking if the media has been changed */ @@ -1478,7 +1487,7 @@ } } -static void do_aztcd_request(void) +static void do_aztcd_request(request_queue_t * q) { #ifdef AZT_TEST printk(" do_aztcd_request(%ld+%ld) Time:%li\n", CURRENT -> sector, CURRENT -> nr_sectors,jiffies); @@ -1614,11 +1623,7 @@ * Test for presence of drive and initialize it. Called at boot time. */ -#ifdef AZT_KERNEL_PRIOR_2_1 -int aztcd_init(void) -#else int __init aztcd_init(void) -#endif { long int count, max_count; unsigned char result[50]; int st; @@ -1798,7 +1803,7 @@ MAJOR_NR); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); #ifndef AZT_KERNEL_PRIOR_2_1 blksize_size[MAJOR_NR] = aztcd_blocksizes; #endif @@ -1815,14 +1820,7 @@ return (0); } -#ifdef MODULE - -int init_module(void) -{ - return aztcd_init(); -} - -void cleanup_module(void) +void __exit aztcd_exit(void) { if ((unregister_blkdev(MAJOR_NR, "aztcd") == -EINVAL)) { printk("What's that: can't unregister aztcd\n"); @@ -1836,8 +1834,11 @@ release_region(azt_port,4); /*proprietary interface*/ printk(KERN_INFO "aztcd module released.\n"); } -#endif MODULE +#ifdef MODULE +module_init(aztcd_init); +#endif +module_exit(aztcd_exit); /*########################################################################## Aztcd State Machine: Controls Drive Operating State @@ -2283,5 +2284,3 @@ static int azt_bcd2bin(unsigned char bcd) { return (bcd >> 4) * 10 + (bcd & 0xF); } - - diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/cdrom.c linux/drivers/cdrom/cdrom.c --- v2.3.31/linux/drivers/cdrom/cdrom.c Tue Dec 7 09:32:43 1999 +++ linux/drivers/cdrom/cdrom.c Mon Dec 13 14:08:40 1999 @@ -186,11 +186,15 @@ -- Added setup of write mode for packet writing. -- Fixed CDDA ripping with cdda2wav - accept much larger requests of number of frames and split the reads in blocks of 8. + + 3.05 Dec 13, 1999 - Jens Axboe + -- Added support for changing the region of DVD drives. + -- Added sense data to generic command. -------------------------------------------------------------------------*/ -#define REVISION "Revision: 3.05" -#define VERSION "Id: cdrom.c 3.05 1999/10/24" +#define REVISION "Revision: 3.06" +#define VERSION "Id: cdrom.c 3.06 1999/12/13" /* I use an error-log mask to give fine grain control over the type of messages dumped to the system logs. The available masks include: */ @@ -1909,7 +1913,7 @@ cgc.cmd[5] = entry.cdte_addr.msf.frame; entry.cdte_track = ti.cdti_trk1; - if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) + if (cdo->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) return -EINVAL; cgc.cmd[6] = entry.cdte_addr.msf.minute; @@ -2053,6 +2057,7 @@ case CDROM_SEND_PACKET: { __u8 *userbuf, copy = 0; + struct request_sense *sense; if (!CDROM_CAN(CDC_GENERIC_PACKET)) return -ENOSYS; cdinfo(CD_DO_IOCTL, "entering CDROM_SEND_PACKET\n"); @@ -2060,6 +2065,7 @@ copy = !!cgc.buflen; userbuf = cgc.buffer; cgc.buffer = NULL; + sense = cgc.sense; if (userbuf != NULL && copy) { /* usually commands just copy data one way, i.e. * we send a buffer to the drive and the command @@ -2090,6 +2096,10 @@ ret = cdo->generic_packet(cdi, &cgc); if (copy && !ret) __copy_to_user(userbuf, cgc.buffer, cgc.buflen); + /* copy back sense data */ + if (ret && sense != NULL) + if (copy_to_user(sense, cgc.sense, sizeof(struct request_sense))) + ret = -EFAULT; kfree(cgc.buffer); return ret; } diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/cdu31a.c linux/drivers/cdrom/cdu31a.c --- v2.3.31/linux/drivers/cdrom/cdu31a.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/cdu31a.c Mon Dec 13 14:08:40 1999 @@ -142,6 +142,11 @@ * . Work begun on fixing driver to * work under 2.1.X. Added temporary extra printks * which seem to slow it down enough to work. + * + * 9 November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen */ #include @@ -1641,7 +1646,7 @@ * data access on a CD is done sequentially, this saves a lot of operations. */ static void -do_cdu31a_request(void) +do_cdu31a_request(request_queue_t * q) { int block; int nblock; @@ -3317,11 +3322,15 @@ #ifndef MODULE /* * Set up base I/O and interrupts, called from main.c. + */ -void __init -cdu31a_setup(char *strings, - int *ints) + +static int __init cdu31a_setup(char *strings) { + int ints[4]; + + (void)get_options(strings, ARRAY_SIZE(ints), ints); + if (ints[0] > 0) { cdu31a_port = ints[1]; @@ -3341,7 +3350,12 @@ printk("CDU31A: Unknown interface type: %s\n", strings); } } + + return 1; } + +__setup("cdu31a=", cdu31a_setup); + #endif static int cdu31a_block_size; @@ -3497,7 +3511,7 @@ is_a_cdu31a = strcmp("CD-ROM CDU31A", drive_config.product_id) == 0; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = CDU31A_READAHEAD; cdu31a_block_size = 1024; /* 1kB default block size */ /* use 'mount -o block=2048' */ @@ -3539,16 +3553,9 @@ return -EIO; } -#ifdef MODULE -int -init_module(void) -{ - return cdu31a_init(); -} - -void -cleanup_module(void) +void __exit +cdu31a_exit(void) { if (unregister_cdrom(&scd_info)) { @@ -3567,4 +3574,9 @@ release_region(cdu31a_port,4); printk(KERN_INFO "cdu31a module released.\n"); } -#endif MODULE + +#ifdef MODULE +module_init(cdu31a_init); +#endif +module_exit(cdu31a_exit); + diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/cm206.c linux/drivers/cdrom/cm206.c --- v2.3.31/linux/drivers/cdrom/cm206.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/cm206.c Mon Dec 13 14:08:40 1999 @@ -151,6 +151,11 @@ 24 jan 1998 Removed the cm206_disc_status() function, as it was now dead code. The Uniform CDROM driver now provides this functionality. + +9 Nov. 1999 Make kernel-parameter implementation work with 2.3.x + Removed init_module & cleanup_module in favor of + module_init & module_exit. + Torben Mathiasen * * Parts of the code are based upon lmscd.c written by Kai Petzke, * sbpcd.c written by Eberhard Moenkeberg, and mcd.c by Martin @@ -209,6 +214,8 @@ static int cm206_base = CM206_BASE; static int cm206_irq = CM206_IRQ; +static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */ + MODULE_PARM(cm206_base, "i"); /* base */ MODULE_PARM(cm206_irq, "i"); /* irq */ MODULE_PARM(cm206, "1-2i"); /* base,irq or irq,base */ @@ -801,7 +808,7 @@ /* This is not a very smart implementation. We could optimize for consecutive block numbers. I'm not convinced this would really bring down the processor load. */ -static void do_cm206_request(void) +static void do_cm206_request(request_queue_t * q) { long int i, cd_sec_no; int quarter, error; @@ -1394,7 +1401,7 @@ cleanup(3); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = cm206_blocksizes; read_ahead[MAJOR_NR] = 16; /* reads ahead what? */ init_bh(CM206_BH, cm206_bh); @@ -1411,7 +1418,6 @@ #ifdef MODULE -static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */ void __init parse_options(void) { @@ -1428,7 +1434,7 @@ } } -int init_module(void) +int __cm206_init(void) { parse_options(); #if !defined(AUTO_PROBE_MODULE) @@ -1437,19 +1443,26 @@ return cm206_init(); } -void cleanup_module(void) +void __exit cm206_exit(void) { cleanup(4); printk(KERN_INFO "cm206 removed\n"); } + +module_init(__cm206_init); +module_exit(cm206_exit); #else /* !MODULE */ /* This setup function accepts either `auto' or numbers in the range * 3--11 (for irq) or 0x300--0x370 (for base port) or both. */ -void __init cm206_setup(char *s, int *p) + +static int __init cm206_setup(char *s) { - int i; + int i, p[4]; + + (void)get_options(s, ARRAY_SIZE(p), p); + if (!strcmp(s, "auto")) auto_probe=1; for(i=1; i<=p[0]; i++) { if (0x300 <= p[i] && i<= 0x370 && p[i] % 0x10 == 0) { @@ -1461,8 +1474,12 @@ auto_probe = 0; } } + return 1; } -#endif /* MODULE */ + +__setup("cm206=", cm206_setup); + +#endif /* !MODULE */ /* * Local variables: * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -D__SMP__ -pipe -fno-strength-reduce -m486 -DCPU=486 -D__SMP__ -DMODULE -DMODVERSIONS -include /usr/src/linux/include/linux/modversions.h -c -o cm206.o cm206.c" diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/gscd.c linux/drivers/cdrom/gscd.c --- v2.3.31/linux/drivers/cdrom/gscd.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/gscd.c Mon Dec 13 14:08:40 1999 @@ -31,6 +31,13 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + -------------------------------------------------------------------- + + 9 November 1999 -- Make kernel-parameter implementation work with 2.3.x + Removed init_module & cleanup_module in favor of + module_init & module_exit. + Torben Mathiasen */ @@ -86,7 +93,8 @@ /* Schnittstellen zum Kern/FS */ -static void do_gscd_request (void); +static void do_gscd_request (request_queue_t *); +static void __do_gscd_request (void); static int gscd_ioctl (struct inode *, struct file *, unsigned int, unsigned long); static int gscd_open (struct inode *, struct file *); static int gscd_release (struct inode *, struct file *); @@ -194,14 +202,24 @@ } -void __init gscd_setup (char *str, int *ints) +#ifndef MODULE +/* Using new interface for kernel-parameters */ + +static int __init gscd_setup (char *str) { + int ints[2]; + (void)get_options(str, ARRAY_SIZE(ints), ints); + if (ints[0] > 0) { gscd_port = ints[1]; } + return 1; } +__setup("gscd=", gscd_setup); + +#endif static int gscd_ioctl (struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg) { @@ -260,7 +278,12 @@ * I/O request routine called from Linux kernel. */ -static void do_gscd_request (void) +static void do_gscd_request (request_queue_t * q) +{ + __do_gscd_request(); +} + +static void __do_gscd_request (void) { unsigned int block,dev; unsigned int nsect; @@ -355,7 +378,7 @@ end_request(1); } } - SET_TIMER(do_gscd_request, 1); + SET_TIMER(__do_gscd_request, 1); } @@ -957,9 +980,8 @@ } #endif -#ifdef MODULE /* Init for the Module-Version */ -int init_module (void) +int init_gscd(void) { long err; @@ -978,7 +1000,7 @@ } } -void cleanup_module (void) +void __exit exit_gscd(void) { if ((unregister_blkdev(MAJOR_NR, "gscd" ) == -EINVAL)) @@ -990,7 +1012,11 @@ release_region (gscd_port,4); printk(KERN_INFO "GoldStar-module released.\n" ); } -#endif + +#ifdef MODULE +module_init(init_gscd); +#endif +module_exit(exit_gscd); /* Test for presence of drive and initialize it. Called only at boot time. */ @@ -1060,7 +1086,7 @@ return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = gscd_blocksizes; read_ahead[MAJOR_NR] = 4; diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/isp16.c linux/drivers/cdrom/isp16.c --- v2.3.31/linux/drivers/cdrom/isp16.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/isp16.c Mon Dec 13 14:08:40 1999 @@ -11,6 +11,11 @@ * Removed sound configuration. * Added "module" support. * + * 9 November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen + * * Detect cdrom interface on ISP16 sound card. * Configure cdrom interface. * @@ -69,17 +74,20 @@ MODULE_PARM(isp16_cdrom_irq, "i"); MODULE_PARM(isp16_cdrom_dma, "i"); MODULE_PARM(isp16_cdrom_type, "s"); -int init_module(void); -void cleanup_module(void); +void isp16_exit(void); #endif #define ISP16_IN(p) (outb(isp16_ctrl,ISP16_CTRL_PORT), inb(p)) #define ISP16_OUT(p,b) (outb(isp16_ctrl,ISP16_CTRL_PORT), outb(b,p)) +#ifndef MODULE -void __init -isp16_setup(char *str, int *ints) +static int +__init isp16_setup(char *str) { + int ints[4]; + + (void)get_options(str, ARRAY_SIZE(ints), ints); if ( ints[0] > 0 ) isp16_cdrom_base = ints[1]; if ( ints[0] > 1 ) @@ -88,8 +96,14 @@ isp16_cdrom_dma = ints[3]; if ( str ) isp16_cdrom_type = str; + + return 1; } +__setup("isp16=", isp16_setup); + +#endif /* MODULE */ + /* * ISP16 initialisation. * @@ -307,15 +321,15 @@ return(0); } -#ifdef MODULE -int init_module(void) -{ - return isp16_init(); -} - -void cleanup_module(void) +void __exit isp16_exit(void) { release_region(ISP16_IO_BASE, ISP16_IO_SIZE); printk(KERN_INFO "ISP16: module released.\n"); } -#endif /* MODULE */ + +#ifdef MODULE +module_init(isp16_init); +#endif +module_exit(isp16_exit); + + diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/isp16.h linux/drivers/cdrom/isp16.h --- v2.3.31/linux/drivers/cdrom/isp16.h Tue Dec 2 11:41:44 1997 +++ linux/drivers/cdrom/isp16.h Mon Dec 13 14:08:40 1999 @@ -71,5 +71,4 @@ #define ISP16_IO_BASE 0xF8D #define ISP16_IO_SIZE 5 /* ports used from 0xF8D up to 0xF91 */ -void isp16_setup(char *str, int *ints); int isp16_init(void); diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/mcd.c linux/drivers/cdrom/mcd.c --- v2.3.31/linux/drivers/cdrom/mcd.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/mcd.c Mon Dec 13 14:08:40 1999 @@ -68,6 +68,13 @@ November 1997 -- ported to the Uniform CD-ROM driver by Erik Andersen. March 1999 -- made io base and irq CONFIG_ options (Tigran Aivazian). + + November 1999 -- Make kernel-parameter implementation work with 2.3.x + Removed init_module & cleanup_module in favor of + module_init & module_exit. + Torben Mathiasen + + */ #include @@ -229,9 +236,13 @@ "mcd", /* name of the device type */ }; - -void __init mcd_setup(char *str, int *ints) +#ifndef MODULE +static int __init mcd_setup(char *str) { + int ints[9]; + + (void)get_options(str, ARRAY_SIZE(ints), ints); + if (ints[0] > 0) mcd_port = ints[1]; if (ints[0] > 1) @@ -240,8 +251,13 @@ if (ints[0] > 2) mitsumi_bug_93_wait = ints[3]; #endif /* WORK_AROUND_MITSUMI_BUG_93 */ + + return 1; } +__setup("mcd=", mcd_setup); + +#endif /* MODULE */ static int mcd_media_changed(struct cdrom_device_info * cdi, int disc_nr) { @@ -648,7 +664,7 @@ static void -do_mcd_request(void) +do_mcd_request(request_queue_t * q) { #ifdef TEST2 printk(" do_mcd_request(%ld+%ld)\n", CURRENT -> sector, CURRENT -> nr_sectors); @@ -1127,7 +1143,7 @@ /* This routine gets called during initialization if things go wrong, - * and is used in cleanup_module as well. */ + * and is used in mcd_exit as well. */ static void cleanup(int level) { switch (level) { @@ -1179,7 +1195,7 @@ } blksize_size[MAJOR_NR] = mcd_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 4; /* check for card */ @@ -1635,14 +1651,15 @@ return limit > 0 ? 0 : -1; } -#ifdef MODULE -int init_module(void) -{ - return mcd_init(); -} -void cleanup_module(void) +void __exit mcd_exit(void) { cleanup(3); } -#endif MODULE + +#ifdef MODULE +module_init(mcd_init); +#endif +module_exit(mcd_exit); + + diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/mcdx.c linux/drivers/cdrom/mcdx.c --- v2.3.31/linux/drivers/cdrom/mcdx.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/mcdx.c Mon Dec 13 14:08:40 1999 @@ -44,6 +44,10 @@ * Marcin Dalecki (improved performance, shortened code) * ... somebody forgotten? * + * 9 November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen */ @@ -208,10 +212,8 @@ /* declared in blk.h */ int mcdx_init(void); -void do_mcdx_request(void); +void do_mcdx_request(request_queue_t * q); -/* already declared in init/main */ -void mcdx_setup(char *, int *); /* Indirect exported functions. These functions are exported by their addresses, such as mcdx_open and mcdx_close in the @@ -521,7 +523,7 @@ } } -void do_mcdx_request() +void do_mcdx_request(request_queue_t * q) { int dev; struct s_drive_stuff *stuffp; @@ -770,12 +772,21 @@ return 1; } -void __init mcdx_setup(char *str, int *pi) +#ifndef MODULE +static int __init mcdx_setup(char *str) { + int pi[4]; + (void)get_options(str, ARRAY_SIZE(pi), pi); + if (pi[0] > 0) mcdx_drive_map[0][0] = pi[1]; if (pi[0] > 1) mcdx_drive_map[0][1] = pi[2]; + return 1; } +__setup("mcdx=", mcdx_setup); + +#endif + /* DIRTY PART ******************************************************/ static void mcdx_delay(struct s_drive_stuff *stuff, long jifs) @@ -953,10 +964,10 @@ } /* MODULE STUFF ***********************************************************/ -#ifdef MODULE + EXPORT_NO_SYMBOLS; -int init_module(void) +int __mcdx_init(void) { int i; int drives = 0; @@ -976,7 +987,7 @@ return 0; } -void cleanup_module(void) +void __exit mcdx_exit(void) { int i; @@ -1009,7 +1020,11 @@ #endif } -#endif MODULE +#ifdef MODULE +module_init(__mcdx_init); +#endif +module_exit(mcdx_exit); + /* Support functions ************************************************/ @@ -1116,7 +1131,7 @@ return 1; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = READ_AHEAD; blksize_size[MAJOR_NR] = mcdx_blocksizes; diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/optcd.c linux/drivers/cdrom/optcd.c --- v2.3.31/linux/drivers/cdrom/optcd.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/optcd.c Mon Dec 13 14:08:40 1999 @@ -57,6 +57,11 @@ thanks to Luke McFarlane. Also tidied up some printk behaviour. ISP16 initialization is now handled by a separate driver. + + 09-11-99 Make kernel-parameter implementation work with 2.3.x + Removed init_module & cleanup_module in favor of + module_init & module_exit. + Torben Mathiasen */ /* Includes */ @@ -1360,7 +1365,7 @@ } -static void do_optcd_request(void) +static void do_optcd_request(request_queue_t * q) { DEBUG((DEBUG_REQUEST, "do_optcd_request(%ld+%ld)", CURRENT -> sector, CURRENT -> nr_sectors)); @@ -2020,14 +2025,23 @@ NULL /* revalidate */ }; - +#ifndef MODULE /* Get kernel parameter when used as a kernel driver */ -void __init optcd_setup(char *str, int *ints) +static int optcd_setup(char *str) { + int ints[4]; + (void)get_options(str, ARRAY_SIZE(ints), ints); + if (ints[0] > 0) optcd_port = ints[1]; + + return 1; } +__setup("optcd=", optcd_setup); + +#endif MODULE + /* Test for presence of drive and initialize it. Called at boot time or during module initialisation. */ int __init optcd_init(void) @@ -2067,7 +2081,7 @@ hardsect_size[MAJOR_NR] = &hsecsize; blksize_size[MAJOR_NR] = &blksize; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 4; request_region(optcd_port, 4, "optcd"); @@ -2076,14 +2090,7 @@ } -#ifdef MODULE -int init_module(void) -{ - return optcd_init(); -} - - -void cleanup_module(void) +void __exit optcd_exit(void) { if (unregister_blkdev(MAJOR_NR, "optcd") == -EINVAL) { printk(KERN_ERR "optcd: what's that: can't unregister\n"); @@ -2092,4 +2099,10 @@ release_region(optcd_port, 4); printk(KERN_INFO "optcd: module released.\n"); } -#endif MODULE + +#ifdef MODULE +module_init(optcd_init); +#endif +module_exit(optcd_exit); + + diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/sbpcd.c linux/drivers/cdrom/sbpcd.c --- v2.3.31/linux/drivers/cdrom/sbpcd.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/sbpcd.c Mon Dec 13 14:08:40 1999 @@ -307,6 +307,11 @@ * 4.62 Fix a bug where playing audio left the drive in an unusable state. * Heiko Eissfeldt * + * November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen + * * * TODO * implement "read all subchannel data" (96 bytes per frame) @@ -324,6 +329,7 @@ #include +#include #include #include #include @@ -4794,7 +4800,7 @@ /* * I/O request routine, called from Linux kernel. */ -static void DO_SBPCD_REQUEST(void) +static void DO_SBPCD_REQUEST(request_queue_t * q) { u_int block; u_int nsect; @@ -5457,12 +5463,15 @@ * bytes above). * */ + #if (SBPCD_ISSUE-1) -static void __init sbpcd_setup(const char *s, int *p) +static int sbpcd_setup(char *s) #else -void __init sbpcd_setup(const char *s, int *p) +int sbpcd_setup(char *s) #endif { + int p[4]; + (void)get_options(s, ARRAY_SIZE(p), p); setup_done++; msg(DBG_INI,"sbpcd_setup called with %04X,%s\n",p[1], s); sbpro_type=0; /* default: "LaserMate" */ @@ -5494,7 +5503,13 @@ } } else CDi_data=sbpcd_ioaddr+2; + + return 1; } + +__setup("sbpcd=", sbpcd_setup); + + /*==========================================================================*/ /* * Sequoia S-1000 CD-ROM Interface Configuration @@ -5569,7 +5584,7 @@ * Called once at boot or load time. */ #ifdef MODULE -int init_module(void) +int __init __SBPCD_INIT(void) #else int __init SBPCD_INIT(void) #endif MODULE @@ -5616,7 +5631,7 @@ else if (sbpcd[port_index+1]==1) type=str_sb; else if (sbpcd[port_index+1]==3) type=str_t16; else type=str_lm; - sbpcd_setup(type, addr); + sbpcd_setup((char *)type); #if DISTRIBUTION msg(DBG_INF,"Scanning 0x%X (%s)...\n", CDo_command, type); #endif DISTRIBUTION @@ -5725,7 +5740,7 @@ goto init_done; #endif MODULE } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512); request_region(CDo_command,4,major_name); @@ -5808,7 +5823,7 @@ } /*==========================================================================*/ #ifdef MODULE -void cleanup_module(void) +void sbpcd_exit(void) { int j; @@ -5833,6 +5848,14 @@ } msg(DBG_INF, "%s module released.\n", major_name); } + + +#ifdef MODULE +module_init(__SBPCD_INIT) /*HACK!*/; +#endif +module_exit(sbpcd_exit); + + #endif MODULE /*==========================================================================*/ /* diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/sjcd.c linux/drivers/cdrom/sjcd.c --- v2.3.31/linux/drivers/cdrom/sjcd.c Tue Jul 6 19:05:48 1999 +++ linux/drivers/cdrom/sjcd.c Mon Dec 13 14:08:40 1999 @@ -49,6 +49,10 @@ * the previous version of this driver. Coded added by Anthony Barbachan * from bugfix tip originally suggested by Alan Cox. * + * November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen */ #define SJCD_VERSION_MAJOR 1 @@ -163,12 +167,21 @@ * Set up device, i.e., use command line data to set * base address. */ -void __init sjcd_setup( char *str, int *ints ) +#ifndef MODULE +static int __init sjcd_setup( char *str) { + int ints[2]; + (void)get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] > 0) sjcd_base = ints[1]; + + return 1; } +__setup("sjcd=", sjcd_setup); + +#endif + /* * Special converters. */ @@ -1272,7 +1285,7 @@ SJCD_SET_TIMER( sjcd_poll, 1 ); } -static void do_sjcd_request( void ){ +static void do_sjcd_request( request_queue_t * q ){ #if defined( SJCD_TRACE ) printk( "SJCD: do_sjcd_request(%ld+%ld)\n", CURRENT->sector, CURRENT->nr_sectors ); @@ -1475,7 +1488,7 @@ return( -EIO ); } - blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[ MAJOR_NR ] = 4; if( check_region( sjcd_base, 4 ) ){ @@ -1577,18 +1590,18 @@ return(0); } -#ifdef MODULE - -int init_module(void) -{ - return sjcd_init(); -} -void cleanup_module(void) +void __exit sjcd_exit(void) { if ( sjcd_cleanup() ) printk( "SJCD: module: cannot be removed.\n" ); else printk(KERN_INFO "SJCD: module: removed.\n"); } + +#ifdef MODULE +module_init(sjcd_init); #endif +module_exit(sjcd_exit); + + diff -u --recursive --new-file v2.3.31/linux/drivers/cdrom/sonycd535.c linux/drivers/cdrom/sonycd535.c --- v2.3.31/linux/drivers/cdrom/sonycd535.c Tue Aug 31 17:29:13 1999 +++ linux/drivers/cdrom/sonycd535.c Mon Dec 13 14:08:40 1999 @@ -31,6 +31,11 @@ * More changes to support CDU-510/515 series * (Claudio Porfiri) * + * November 1999 -- Make kernel-parameter implementation work with 2.3.x + * Removed init_module & cleanup_module in favor of + * module_init & module_exit. + * Torben Mathiasen + * * Things to do: * - handle errors and status better, put everything into a single word * - use interrupts (code mostly there, but a big hole still missing) @@ -781,7 +786,7 @@ * data access on a CD is done sequentially, this saves a lot of operations. */ static void -do_cdu535_request(void) +do_cdu535_request(request_queue_t * q) { unsigned int dev; unsigned int read_size; @@ -1601,7 +1606,7 @@ MAJOR_NR, CDU535_MESSAGE_NAME); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = &sonycd535_block_size; read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ @@ -1648,6 +1653,7 @@ } #ifndef MODULE + /* * accept "kernel command line" parameters * (added by emoenke@gwdg.de) @@ -1657,9 +1663,11 @@ * * the address value has to be the existing CDROM port address. */ -void __init -sonycd535_setup(char *strings, int *ints) +static int __init +sonycd535_setup(char *strings) { + int ints[3]; + (void)get_options(strings, ARRAY_SIZE(ints), ints); /* if IRQ change and default io base desired, * then call with io base of 0 */ @@ -1671,17 +1679,16 @@ if ((strings != NULL) && (*strings != '\0')) printk(CDU535_MESSAGE_NAME ": Warning: Unknown interface type: %s\n", strings); + + return 1; } -#else /* MODULE */ +__setup("sonycd535=", sonycd535_setup); -int init_module(void) -{ - return sony535_init(); -} +#endif /* MODULE */ -void -cleanup_module(void) +void __exit +sony535_exit(void) { int i; @@ -1696,4 +1703,10 @@ else printk(KERN_INFO CDU535_HANDLE " module released\n"); } -#endif /* MODULE */ + +#ifdef MODULE +module_init(sony535_init); +#endif +module_exit(sony535_exit); + + diff -u --recursive --new-file v2.3.31/linux/drivers/char/Config.in linux/drivers/char/Config.in --- v2.3.31/linux/drivers/char/Config.in Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/Config.in Mon Dec 13 14:10:09 1999 @@ -214,12 +214,10 @@ fi endmenu -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM - dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM - if [ "$CONFIG_DRM" = "y" ]; then - dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m - fi +bool 'Direct Rendering Manager (XFree86 DRI support)' CONFIG_DRM +dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM +if [ "$CONFIG_DRM" = "y" ]; then + dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m fi if [ "$CONFIG_PCMCIA" != "n" ]; then @@ -227,8 +225,8 @@ fi if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - dep_tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP m - if [ "$CONFIG_AGP" = "m" ]; then + tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP + if [ "$CONFIG_AGP" != "n" ]; then bool ' Intel 440LX/BX/GX support' CONFIG_AGP_INTEL bool ' Intel I810/I810 DC100/I810e support' CONFIG_AGP_I810 bool ' VIA VP3/MVP3/Apollo Pro support' CONFIG_AGP_VIA diff -u --recursive --new-file v2.3.31/linux/drivers/char/Makefile linux/drivers/char/Makefile --- v2.3.31/linux/drivers/char/Makefile Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/Makefile Thu Dec 9 17:00:37 1999 @@ -636,9 +636,15 @@ endif endif -ifeq ($(CONFIG_AGP), m) +ifeq ($(CONFIG_AGP), y) + SUB_DIRS += agp ALL_SUB_DIRS += agp MOD_SUB_DIRS += agp +else + ifeq ($(CONFIG_AGP), m) + ALL_SUB_DIRS += agp + MOD_SUB_DIRS += agp + endif endif include $(TOPDIR)/Rules.make diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/Makefile linux/drivers/char/agp/Makefile --- v2.3.31/linux/drivers/char/agp/Makefile Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/agp/Makefile Thu Dec 9 17:00:37 1999 @@ -3,30 +3,17 @@ # space ioctl interface to use agp memory. It also adds a kernel interface # that other drivers could use to manipulate agp memory. -M_OBJS := agpgart.o +O_TARGET := agp.o -CFLAGS_agp_backend.o := - -ifdef CONFIG_AGP_I810 -CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_I810 -endif -ifdef CONFIG_AGP_INTEL -CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_GENERIC -endif -ifdef CONFIG_AGP_VIA -CFLAGS_agp_backend.o += -DAGP_BUILD_VIA_GENERIC -endif -ifdef CONFIG_AGP_AMD -CFLAGS_agp_backend.o += -DAGP_BUILD_AMD_IRONGATE -endif -ifdef CONFIG_AGP_SIS -CFLAGS_agp_backend.o += -DAGP_BUILD_SIS_GENERIC -endif -ifdef CONFIG_AGP_ALI -CFLAGS_agp_backend.o += -DAGP_BUILD_ALI_M1541 +ifeq ($(CONFIG_AGP),y) + O_OBJS += agpgart.o +else + ifeq ($(CONFIG_AGP), m) + M_OBJS += agpgart.o + endif endif include $(TOPDIR)/Rules.make -agpgart.o: agp_backend.o agpgart_fe.o - $(LD) $(LD_RFLAG) -r -o $@ agp_backend.o agpgart_fe.o +agpgart.o: agpgart_be.o agpgart_fe.o + $(LD) $(LD_RFLAG) -r -o $@ agpgart_be.o agpgart_fe.o diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/agp.h linux/drivers/char/agp/agp.h --- v2.3.31/linux/drivers/char/agp/agp.h Wed Dec 31 16:00:00 1969 +++ linux/drivers/char/agp/agp.h Mon Dec 13 14:10:09 1999 @@ -0,0 +1,257 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_PRIV_H +#define _AGP_BACKEND_PRIV_H 1 + +#include + +enum aper_size_type { + U8_APER_SIZE, + U16_APER_SIZE, + U32_APER_SIZE, + FIXED_APER_SIZE +}; + +typedef struct _gatt_mask { + unsigned long mask; + u32 type; + /* totally device specific, for integrated chipsets that + * might have different types of memory masks. For other + * devices this will probably be ignored */ +} gatt_mask; + +typedef struct _aper_size_info_8 { + int size; + int num_entries; + int page_order; + u8 size_value; +} aper_size_info_8; + +typedef struct _aper_size_info_16 { + int size; + int num_entries; + int page_order; + u16 size_value; +} aper_size_info_16; + +typedef struct _aper_size_info_32 { + int size; + int num_entries; + int page_order; + u32 size_value; +} aper_size_info_32; + +typedef struct _aper_size_info_fixed { + int size; + int num_entries; + int page_order; +} aper_size_info_fixed; + +struct agp_bridge_data { + agp_version *version; + void *aperture_sizes; + void *previous_size; + void *current_size; + void *dev_private_data; + struct pci_dev *dev; + gatt_mask *masks; + unsigned long *gatt_table; + unsigned long *gatt_table_real; + unsigned long scratch_page; + unsigned long gart_bus_addr; + unsigned long gatt_bus_addr; + u32 mode; + enum chipset_type type; + enum aper_size_type size_type; + u32 *key_list; + atomic_t current_memory_agp; + atomic_t agp_in_use; + int max_memory_agp; /* in number of pages */ + int needs_scratch_page; + int aperture_size_idx; + int num_aperture_sizes; + int num_of_masks; + int capndx; + + /* Links to driver specific functions */ + + int (*fetch_size) (void); + int (*configure) (void); + void (*agp_enable) (u32); + void (*cleanup) (void); + void (*tlb_flush) (agp_memory *); + unsigned long (*mask_memory) (unsigned long, int); + void (*cache_flush) (void); + int (*create_gatt_table) (void); + int (*free_gatt_table) (void); + int (*insert_memory) (agp_memory *, off_t, int); + int (*remove_memory) (agp_memory *, off_t, int); + agp_memory *(*alloc_by_type) (size_t, int); + void (*free_by_type) (agp_memory *); + + /* Links to vendor/device specific setup functions */ +#ifdef CONFIG_AGP_INTEL + void (*intel_generic_setup) (void); +#endif +#ifdef CONFIG_AGP_I810 + void (*intel_i810_setup) (struct pci_dev *); +#endif +#ifdef CONFIG_AGP_VIA + void (*via_generic_setup) (void); +#endif +#ifdef CONFIG_AGP_SIS + void (*sis_generic_setup) (void); +#endif +#ifdef CONFIG_AGP_AMD + void (*amd_irongate_setup) (void); +#endif +#ifdef CONFIG_AGP_ALI + void (*ali_generic_setup) (void); +#endif +}; + +#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val) +#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val) +#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val) + +#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr)) +#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr)) +#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr)) + +#define CACHE_FLUSH agp_bridge.cache_flush +#define A_SIZE_8(x) ((aper_size_info_8 *) x) +#define A_SIZE_16(x) ((aper_size_info_16 *) x) +#define A_SIZE_32(x) ((aper_size_info_32 *) x) +#define A_SIZE_FIX(x) ((aper_size_info_fixed *) x) +#define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i) +#define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i) +#define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i) +#define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i) +#define MAXKEY (4096 * 32) + +#ifndef min +#define min(a,b) (((a)<(b))?(a):(b)) +#endif + +#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) + +#ifndef PCI_DEVICE_ID_VIA_82C691_0 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#endif +#ifndef PCI_DEVICE_ID_VIA_82C691_1 +#define PCI_DEVICE_ID_VIA_82C691_1 0x8691 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_0 +#define PCI_DEVICE_ID_INTEL_810_0 0x7120 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0 +#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_E_0 +#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124 +#endif +#ifndef PCI_DEVICE_ID_INTEL_82443GX_0 +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_1 +#define PCI_DEVICE_ID_INTEL_810_1 0x7121 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1 +#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_E_1 +#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125 +#endif +#ifndef PCI_DEVICE_ID_INTEL_82443GX_1 +#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 +#endif +#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 +#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 +#endif +#ifndef PCI_VENDOR_ID_AL +#define PCI_VENDOR_ID_AL 0x10b9 +#endif +#ifndef PCI_DEVICE_ID_AL_M1541_0 +#define PCI_DEVICE_ID_AL_M1541_0 0x1541 +#endif + +/* intel register */ +#define INTEL_APBASE 0x10 +#define INTEL_APSIZE 0xb4 +#define INTEL_ATTBASE 0xb8 +#define INTEL_AGPCTRL 0xb0 +#define INTEL_NBXCFG 0x50 +#define INTEL_ERRSTS 0x91 + +/* intel i810 registers */ +#define I810_GMADDR 0x10 +#define I810_MMADDR 0x14 +#define I810_PTE_BASE 0x10000 +#define I810_PTE_MAIN_UNCACHED 0x00000000 +#define I810_PTE_LOCAL 0x00000002 +#define I810_PTE_VALID 0x00000001 +#define I810_SMRAM_MISCC 0x70 +#define I810_GFX_MEM_WIN_SIZE 0x00010000 +#define I810_GFX_MEM_WIN_32M 0x00010000 +#define I810_GMS 0x000000c0 +#define I810_GMS_DISABLE 0x00000000 +#define I810_PGETBL_CTL 0x2020 +#define I810_PGETBL_ENABLED 0x00000001 +#define I810_DRAM_CTL 0x3000 +#define I810_DRAM_ROW_0 0x00000001 +#define I810_DRAM_ROW_0_SDRAM 0x00000001 + +/* VIA register */ +#define VIA_APBASE 0x10 +#define VIA_GARTCTRL 0x80 +#define VIA_APSIZE 0x84 +#define VIA_ATTBASE 0x88 + +/* SiS registers */ +#define SIS_APBASE 0x10 +#define SIS_ATTBASE 0x90 +#define SIS_APSIZE 0x94 +#define SIS_TLBCNTRL 0x97 +#define SIS_TLBFLUSH 0x98 + +/* AMD registers */ +#define AMD_APBASE 0x10 +#define AMD_MMBASE 0x14 +#define AMD_APSIZE 0xac +#define AMD_MODECNTL 0xb0 +#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ +#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ +#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ +#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ + +/* ALi registers */ +#define ALI_APBASE 0x10 +#define ALI_AGPCTRL 0xb8 +#define ALI_ATTBASE 0xbc +#define ALI_TLBCTRL 0xc0 + +#endif /* _AGP_BACKEND_PRIV_H */ diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/agp_backend.c linux/drivers/char/agp/agp_backend.c --- v2.3.31/linux/drivers/char/agp/agp_backend.c Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/agp/agp_backend.c Wed Dec 31 16:00:00 1969 @@ -1,1987 +0,0 @@ -/* - * AGPGART module version 0.99 - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -#define EXPORT_SYMTAB -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "agp_backendP.h" - -static struct agp_bridge_data agp_bridge; - -#define CACHE_FLUSH agp_bridge.cache_flush - -MODULE_AUTHOR("Jeff Hartmann "); -MODULE_PARM(agp_try_unsupported, "1i"); -EXPORT_SYMBOL(agp_free_memory); -EXPORT_SYMBOL(agp_allocate_memory); -EXPORT_SYMBOL(agp_copy_info); -EXPORT_SYMBOL(agp_bind_memory); -EXPORT_SYMBOL(agp_unbind_memory); -EXPORT_SYMBOL(agp_enable); -EXPORT_SYMBOL(agp_backend_acquire); -EXPORT_SYMBOL(agp_backend_release); - -static int agp_try_unsupported __initdata = 0; - -#ifdef __SMP__ -static atomic_t cpus_waiting; -#endif - -int agp_backend_acquire(void) -{ - atomic_inc(&(agp_bridge.agp_in_use)); - - if (atomic_read(&(agp_bridge.agp_in_use)) != 1) { - atomic_dec(&(agp_bridge.agp_in_use)); - return -EBUSY; - } - MOD_INC_USE_COUNT; - return 0; -} - -void agp_backend_release(void) -{ - atomic_dec(&(agp_bridge.agp_in_use)); - MOD_DEC_USE_COUNT; -} - -static void flush_cache(void) -{ - asm volatile ("wbinvd":::"memory"); -} - -#ifdef __SMP__ -static void ipi_handler(void *null) -{ - flush_cache(); - atomic_dec(&cpus_waiting); - while (atomic_read(&cpus_waiting) > 0) - barrier(); -} - -static void smp_flush_cache(void) -{ - atomic_set(&cpus_waiting, smp_num_cpus - 1); - if (smp_call_function(ipi_handler, NULL, 1, 0) != 0) - panic("agpgart: timed out waiting for the other CPUs!\n"); - flush_cache(); - while (atomic_read(&cpus_waiting) > 0) - barrier(); -} -#endif - -/* - * Basic Page Allocation Routines - - * These routines handle page allocation - * and by default they reserve the allocated - * memory. They also handle incrementing the - * current_memory_agp value, Which is checked - * against a maximum value. - */ - -static void *agp_alloc_page(void) -{ - void *pt; - - pt = (void *) __get_free_page(GFP_KERNEL); - if (pt == NULL) { - return NULL; - } - atomic_inc(&(mem_map[MAP_NR(pt)].count)); - set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); - atomic_inc(&(agp_bridge.current_memory_agp)); - return pt; -} - -static void agp_destroy_page(void *pt) -{ - if (pt == NULL) - return; - - atomic_dec(&(mem_map[MAP_NR(pt)].count)); - clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); - wake_up(&mem_map[MAP_NR(pt)].wait); - free_page((unsigned long) pt); - atomic_dec(&(agp_bridge.current_memory_agp)); -} - -/* End Basic Page Allocation Routines */ - -/* - * Generic routines for handling agp_memory structures - - * They use the basic page allocation routines to do the - * brunt of the work. - */ - -#define MAXKEY (4096 * 32) - -static void agp_free_key(int key) -{ - - if (key < 0) { - return; - } - if (key < MAXKEY) { - clear_bit(key, agp_bridge.key_list); - } -} - -static int agp_get_key(void) -{ - int bit; - - bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); - if (bit < MAXKEY) { - set_bit(bit, agp_bridge.key_list); - return bit; - } - return -1; -} - -static agp_memory *agp_create_memory(int scratch_pages) -{ - agp_memory *new; - - new = kmalloc(sizeof(agp_memory), GFP_KERNEL); - - if (new == NULL) { - return NULL; - } - memset(new, 0, sizeof(agp_memory)); - new->key = agp_get_key(); - - if (new->key < 0) { - kfree(new); - return NULL; - } - new->memory = vmalloc(PAGE_SIZE * scratch_pages); - - if (new->memory == NULL) { - agp_free_key(new->key); - kfree(new); - return NULL; - } - new->num_scratch_pages = scratch_pages; - return new; -} - -void agp_free_memory(agp_memory * curr) -{ - int i; - - if (curr == NULL) { - return; - } - if (curr->is_bound == TRUE) { - agp_unbind_memory(curr); - } - if (curr->type != 0) { - agp_bridge.free_by_type(curr); - MOD_DEC_USE_COUNT; - return; - } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->memory[i] &= ~(0x00000fff); - agp_destroy_page((void *) phys_to_virt(curr->memory[i])); - } - } - agp_free_key(curr->key); - vfree(curr->memory); - kfree(curr); - MOD_DEC_USE_COUNT; -} - -#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) - -agp_memory *agp_allocate_memory(size_t page_count, u32 type) -{ - int scratch_pages; - agp_memory *new; - int i; - - if ((atomic_read(&(agp_bridge.current_memory_agp)) + page_count) > - agp_bridge.max_memory_agp) { - return NULL; - } - if (type != 0) { - new = agp_bridge.alloc_by_type(page_count, type); - return new; - } - scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; - - new = agp_create_memory(scratch_pages); - - if (new == NULL) { - return NULL; - } - for (i = 0; i < page_count; i++) { - new->memory[i] = (unsigned long) agp_alloc_page(); - - if ((void *) new->memory[i] == NULL) { - /* Free this structure */ - agp_free_memory(new); - return NULL; - } - new->memory[i] = - agp_bridge.mask_memory(virt_to_phys((void *) new->memory[i]), type); - new->page_count++; - } - - MOD_INC_USE_COUNT; - return new; -} - -/* End - Generic routines for handling agp_memory structures */ - -static int agp_return_size(void) -{ - int current_size; - void *temp; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - current_size = ((aper_size_info_8 *) temp)->size; - break; - case U16_APER_SIZE: - current_size = ((aper_size_info_16 *) temp)->size; - break; - case U32_APER_SIZE: - current_size = ((aper_size_info_32 *) temp)->size; - break; - case FIXED_APER_SIZE: - current_size = ((aper_size_info_fixed *) temp)->size; - break; - default: - current_size = 0; - break; - } - - return current_size; -} - -/* Routine to copy over information structure */ - -void agp_copy_info(agp_kern_info * info) -{ - memset(info, 0, sizeof(agp_kern_info)); - info->version.major = agp_bridge.version->major; - info->version.minor = agp_bridge.version->minor; - info->device = agp_bridge.dev; - info->chipset = agp_bridge.type; - info->mode = agp_bridge.mode; - info->aper_base = agp_bridge.gart_bus_addr; - info->aper_size = agp_return_size(); - info->max_memory = agp_bridge.max_memory_agp; - info->current_memory = atomic_read(&agp_bridge.current_memory_agp); -} - -/* End - Routine to copy over information structure */ - -/* - * Routines for handling swapping of agp_memory into the GATT - - * These routines take agp_memory and insert them into the GATT. - * They call device specific routines to actually write to the GATT. - */ - -int agp_bind_memory(agp_memory * curr, off_t pg_start) -{ - int ret_val; - - if ((curr == NULL) || (curr->is_bound == TRUE)) { - return -EINVAL; - } - if (curr->is_flushed == FALSE) { - CACHE_FLUSH(); - curr->is_flushed = TRUE; - } - ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); - - if (ret_val != 0) { - return ret_val; - } - curr->is_bound = TRUE; - curr->pg_start = pg_start; - return 0; -} - -int agp_unbind_memory(agp_memory * curr) -{ - int ret_val; - - if (curr == NULL) { - return -EINVAL; - } - if (curr->is_bound != TRUE) { - return -EINVAL; - } - ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); - - if (ret_val != 0) { - return ret_val; - } - curr->is_bound = FALSE; - curr->pg_start = 0; - return 0; -} - -/* End - Routines for handling swapping of agp_memory into the GATT */ - -/* - * Driver routines - start - * Currently this module supports the - * i810, 440lx, 440bx, 440gx, via vp3, via mvp3, - * amd irongate, ALi M1541 and generic support for the - * SiS chipsets. - */ - -/* Generic Agp routines - Start */ - -static void agp_generic_agp_enable(u32 mode) -{ - struct pci_dev *device = NULL; - u32 command, scratch, cap_id; - u8 cap_ptr; - - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, - &command); - - /* - * PASS1: go throu all devices that claim to be - * AGP devices and collect their data. - */ - - while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { - pci_read_config_dword(device, 0x04, &scratch); - - if (!(scratch & 0x00100000)) - continue; - - pci_read_config_byte(device, 0x34, &cap_ptr); - - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(device, cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible settings, - * and adjust the readqueue to the minimum. - */ - - pci_read_config_dword(device, cap_ptr + 4, &scratch); - - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min((mode & 0xff000000), min((command & 0xff000000), (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && (scratch & 0x00000200) && (mode & 0x00000200))) - command &= ~0x00000200; - - /* disable FW if it's not supported */ - if (!((command & 0x00000010) && (scratch & 0x00000010) && (mode & 0x00000010))) - command &= ~0x00000010; - - if (!((command & 4) && (scratch & 4) && (mode & 4))) - command &= ~0x00000004; - - if (!((command & 2) && (scratch & 2) && (mode & 2))) - command &= ~0x00000002; - - if (!((command & 1) && (scratch & 1) && (mode & 1))) - command &= ~0x00000001; - } - } - /* - * PASS2: Figure out the 4X/2X/1X setting and enable the - * target (our motherboard chipset). - */ - - if (command & 4) { - command &= ~3; /* 4X */ - } - if (command & 2) { - command &= ~5; /* 2X */ - } - if (command & 1) { - command &= ~6; /* 1X */ - } - command |= 0x00000100; - - pci_write_config_dword(agp_bridge.dev, - agp_bridge.capndx + 8, - command); - - /* - * PASS3: Go throu all AGP devices and update the - * command registers. - */ - - while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { - pci_read_config_dword(device, 0x04, &scratch); - - if (!(scratch & 0x00100000)) - continue; - - pci_read_config_byte(device, 0x34, &cap_ptr); - - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(device, cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } -} - -static int agp_generic_create_gatt_table(void) -{ - char *table; - char *table_end; - int size; - int page_order; - int num_entries; - int i; - void *temp; - - table = NULL; - i = agp_bridge.aperture_size_idx; - temp = agp_bridge.current_size; - size = page_order = num_entries = 0; - - if (agp_bridge.size_type != FIXED_APER_SIZE) { - do { - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - size = ((aper_size_info_8 *) temp)->size; - page_order = ((aper_size_info_8 *) temp)->page_order; - num_entries = ((aper_size_info_8 *) temp)->num_entries; - break; - case U16_APER_SIZE: - size = ((aper_size_info_16 *) temp)->size; - page_order = ((aper_size_info_16 *) temp)->page_order; - num_entries = ((aper_size_info_16 *) temp)->num_entries; - break; - case U32_APER_SIZE: - size = ((aper_size_info_32 *) temp)->size; - page_order = ((aper_size_info_32 *) temp)->page_order; - num_entries = ((aper_size_info_32 *) temp)->num_entries; - break; - /* This case will never really happen */ - case FIXED_APER_SIZE: - default: - size = page_order = num_entries = 0; - break; - } - - table = (char *) __get_free_pages(GFP_KERNEL, page_order); - - if (table == NULL) { - i++; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_8 *) agp_bridge.aperture_sizes) + i); - break; - case U16_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_16 *) agp_bridge.aperture_sizes) + i); - break; - case U32_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_32 *) agp_bridge.aperture_sizes) + i); - break; - /* This case will never really happen */ - case FIXED_APER_SIZE: - default: - size = page_order = num_entries = 0; - break; - } - } else { - agp_bridge.aperture_size_idx = i; - } - } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes)); - } else { - size = ((aper_size_info_fixed *) temp)->size; - page_order = ((aper_size_info_fixed *) temp)->page_order; - num_entries = ((aper_size_info_fixed *) temp)->num_entries; - table = (char *) __get_free_pages(GFP_KERNEL, page_order); - } - - if (table == NULL) { - return -ENOMEM; - } - table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - set_bit(PG_reserved, &mem_map[i].flags); - } - - agp_bridge.gatt_table_real = (unsigned long *) table; - CACHE_FLUSH(); - agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), - (PAGE_SIZE * (1 << page_order))); - CACHE_FLUSH(); - - if (agp_bridge.gatt_table == NULL) { - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - clear_bit(PG_reserved, &mem_map[i].flags); - } - - free_pages((unsigned long) table, page_order); - - return -ENOMEM; - } - agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); - - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; - } - - return 0; -} - -static int agp_generic_free_gatt_table(void) -{ - int i; - int page_order; - char *table, *table_end; - void *temp; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - page_order = ((aper_size_info_8 *) temp)->page_order; - break; - case U16_APER_SIZE: - page_order = ((aper_size_info_16 *) temp)->page_order; - break; - case U32_APER_SIZE: - page_order = ((aper_size_info_32 *) temp)->page_order; - break; - case FIXED_APER_SIZE: - page_order = ((aper_size_info_fixed *) temp)->page_order; - break; - default: - page_order = 0; - break; - } - - /* Do not worry about freeing memory, because if this is - * called, then all agp memory is deallocated and removed - * from the table. - */ - - iounmap(agp_bridge.gatt_table); - table = (char *) agp_bridge.gatt_table_real; - table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - clear_bit(PG_reserved, &mem_map[i].flags); - } - - free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); - return 0; -} - -static int agp_generic_insert_memory(agp_memory * mem, - off_t pg_start, int type) -{ - int i, j, num_entries; - void *temp; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - num_entries = ((aper_size_info_8 *) temp)->num_entries; - break; - case U16_APER_SIZE: - num_entries = ((aper_size_info_16 *) temp)->num_entries; - break; - case U32_APER_SIZE: - num_entries = ((aper_size_info_32 *) temp)->num_entries; - break; - case FIXED_APER_SIZE: - num_entries = ((aper_size_info_fixed *) temp)->num_entries; - break; - default: - num_entries = 0; - break; - } - - if (type != 0 || mem->type != 0) { - /* The generic routines know nothing of memory types */ - return -EINVAL; - } - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - j = pg_start; - - while (j < (pg_start + mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge.gatt_table[j] = mem->memory[i]; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - - if (type != 0 || mem->type != 0) { - /* The generic routines know nothing of memory types */ - return -EINVAL; - } - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) -{ - return NULL; -} - -static void agp_generic_free_by_type(agp_memory * curr) -{ - if (curr->memory != NULL) { - vfree(curr->memory); - } - agp_free_key(curr->key); - kfree(curr); -} - -void agp_enable(u32 mode) -{ - agp_bridge.agp_enable(mode); -} - -/* End - Generic Agp routines */ - -#ifdef AGP_BUILD_INTEL_I810 - -static aper_size_info_fixed intel_i810_sizes[] = -{ - {64, 16384, 4}, - /* The 32M mode still requires a 64k gatt */ - {32, 8192, 4} -}; - -#define AGP_DCACHE_MEMORY 1 - -static gatt_mask intel_i810_masks[] = -{ - {I810_PTE_VALID, 0}, - {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY} -}; - -static struct _intel_i810_private { - struct pci_dev *i810_dev; /* device one */ - volatile unsigned char *registers; - int num_dcache_entries; -} intel_i810_private; - -static int intel_i810_fetch_size(void) -{ - u32 smram_miscc; - aper_size_info_fixed *values; - - pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); - values = (aper_size_info_fixed *) agp_bridge.aperture_sizes; - - if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - printk("agpgart: i810 is disabled\n"); - return 0; - } - if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + 1); - agp_bridge.aperture_size_idx = 1; - return values[1].size; - } else { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values); - agp_bridge.aperture_size_idx = 0; - return values[0].size; - } - - return 0; -} - -static int intel_i810_configure(void) -{ - aper_size_info_fixed *current_size; - u32 temp; - int i; - - current_size = (aper_size_info_fixed *) agp_bridge.current_size; - - pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); - temp &= 0xfff80000; - - intel_i810_private.registers = - (volatile unsigned char *) ioremap(temp, 128 * 4096); - - if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) - & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { - /* This will need to be dynamically assigned */ - printk("agpgart: detected 4MB dedicated video ram.\n"); - intel_i810_private.num_dcache_entries = 1024; - } - pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, - agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); - CACHE_FLUSH(); - - if (agp_bridge.needs_scratch_page == TRUE) { - for (i = 0; i < current_size->num_entries; i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); - } - } - return 0; -} - -static void intel_i810_cleanup(void) -{ - OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); - iounmap((void *) intel_i810_private.registers); -} - -static void intel_i810_tlbflush(agp_memory * mem) -{ - return; -} - -static void intel_i810_agp_enable(u32 mode) -{ - return; -} - -static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, - int type) -{ - int i, j, num_entries; - void *temp; - - temp = agp_bridge.current_size; - num_entries = ((aper_size_info_fixed *) temp)->num_entries; - - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - for (j = pg_start; j < (pg_start + mem->page_count); j++) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { - return -EBUSY; - } - } - - if (type != 0 || mem->type != 0) { - if ((type == AGP_DCACHE_MEMORY) && - (mem->type == AGP_DCACHE_MEMORY)) { - /* special insert */ - - for (i = pg_start; i < (pg_start + mem->page_count); i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID); - } - - agp_bridge.tlb_flush(mem); - return 0; - } - return -EINVAL; - } - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (j * 4), mem->memory[i]); - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) -{ - agp_memory *new; - - if (type == AGP_DCACHE_MEMORY) { - if (pg_count != intel_i810_private.num_dcache_entries) { - return NULL; - } - new = agp_create_memory(1); - - if (new == NULL) { - return NULL; - } - new->type = AGP_DCACHE_MEMORY; - new->page_count = pg_count; - new->num_scratch_pages = 0; - vfree(new->memory); - return new; - } - return NULL; -} - -static void intel_i810_free_by_type(agp_memory * curr) -{ - agp_free_key(curr->key); - kfree(curr); -} - -static unsigned long intel_i810_mask_memory(unsigned long addr, int type) -{ - /* Type checking must be done elsewhere */ - return addr | agp_bridge.masks[type].mask; -} - -static void intel_i810_setup(struct pci_dev *i810_dev) -{ - intel_i810_private.i810_dev = i810_dev; - - agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 2; - agp_bridge.aperture_sizes = (void *) intel_i810_sizes; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - agp_bridge.dev_private_data = (void *) &intel_i810_private; - agp_bridge.needs_scratch_page = TRUE; - agp_bridge.configure = intel_i810_configure; - agp_bridge.fetch_size = intel_i810_fetch_size; - agp_bridge.cleanup = intel_i810_cleanup; - agp_bridge.tlb_flush = intel_i810_tlbflush; - agp_bridge.mask_memory = intel_i810_mask_memory; - agp_bridge.agp_enable = intel_i810_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = intel_i810_insert_entries; - agp_bridge.remove_memory = intel_i810_remove_entries; - agp_bridge.alloc_by_type = intel_i810_alloc_by_type; - agp_bridge.free_by_type = intel_i810_free_by_type; -} - -#endif - -#ifdef AGP_BUILD_INTEL_GENERIC - -static int intel_fetch_size(void) -{ - int i; - u16 temp; - aper_size_info_16 *values; - - pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); - (void *) values = agp_bridge.aperture_sizes; - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static void intel_tlbflush(agp_memory * mem) -{ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); -} - -static void intel_cleanup(void) -{ - u16 temp; - aper_size_info_16 *previous_size; - - previous_size = (aper_size_info_16 *) agp_bridge.previous_size; - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value); -} - -static int intel_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_16 *current_size; - - current_size = (aper_size_info_16 *) agp_bridge.current_size; - - /* aperture size */ - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); - - /* paccfg/nbxcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9)); - /* clear any possible error conditions */ - pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); - return 0; -} - -static unsigned long intel_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - - -/* Setup function */ -static gatt_mask intel_generic_masks[] = -{ - {0x00000017, 0} -}; - -static aper_size_info_16 intel_generic_sizes[7] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 32}, - {64, 16384, 4, 48}, - {32, 8192, 3, 56}, - {16, 4096, 2, 60}, - {8, 2048, 1, 62}, - {4, 1024, 0, 63} -}; - -static void intel_generic_setup(void) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_generic_sizes; - agp_bridge.size_type = U16_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_configure; - agp_bridge.fetch_size = intel_fetch_size; - agp_bridge.cleanup = intel_cleanup; - agp_bridge.tlb_flush = intel_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; -} - -#endif - -#ifdef AGP_BUILD_VIA_GENERIC - -static int via_fetch_size(void) -{ - int i; - u8 temp; - aper_size_info_8 *values; - - (void *) values = agp_bridge.aperture_sizes; - pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static int via_configure(void) -{ - u32 temp; - aper_size_info_8 *current_size; - - current_size = (aper_size_info_8 *) agp_bridge.current_size; - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, current_size->size_value); - /* address to map too */ - pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* GART control register */ - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); - - /* attbase - aperture GATT base */ - pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, - (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); - return 0; -} - -static void via_cleanup(void) -{ - aper_size_info_8 *previous_size; - - previous_size = (aper_size_info_8 *) agp_bridge.previous_size; - pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0); - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value); -} - -static void via_tlbflush(agp_memory * mem) -{ - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); -} - -static unsigned long via_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static aper_size_info_8 via_generic_sizes[7] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 128}, - {64, 16384, 4, 192}, - {32, 8192, 3, 224}, - {16, 4096, 2, 240}, - {8, 2048, 1, 248}, - {4, 1024, 0, 252} -}; - -static gatt_mask via_generic_masks[] = -{ - {0x00000000, 0} -}; - -static void via_generic_setup(void) -{ - agp_bridge.masks = via_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) via_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = via_configure; - agp_bridge.fetch_size = via_fetch_size; - agp_bridge.cleanup = via_cleanup; - agp_bridge.tlb_flush = via_tlbflush; - agp_bridge.mask_memory = via_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; -} - -#endif - -#ifdef AGP_BUILD_SIS_GENERIC - -static int sis_fetch_size(void) -{ - u8 temp_size; - int i; - aper_size_info_8 *values; - - pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); - (void *) values = agp_bridge.aperture_sizes; - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if ((temp_size == values[i].size_value) || - ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - - -static void sis_tlbflush(agp_memory * mem) -{ - pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); -} - -static int sis_configure(void) -{ - u32 temp; - aper_size_info_8 *current_size; - - current_size = (aper_size_info_8 *) agp_bridge.current_size; - pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); - pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, agp_bridge.gatt_bus_addr); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, current_size->size_value); - return 0; -} - -static void sis_cleanup(void) -{ - aper_size_info_8 *previous_size; - - previous_size = (aper_size_info_8 *) agp_bridge.previous_size; - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); -} - -static unsigned long sis_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static aper_size_info_8 sis_generic_sizes[7] = -{ - {256, 65536, 6, 99}, - {128, 32768, 5, 83}, - {64, 16384, 4, 67}, - {32, 8192, 3, 51}, - {16, 4096, 2, 35}, - {8, 2048, 1, 19}, - {4, 1024, 0, 3} -}; - -static gatt_mask sis_generic_masks[] = -{ - {0x00000000, 0} -}; - -static void sis_generic_setup(void) -{ - agp_bridge.masks = sis_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) sis_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = sis_configure; - agp_bridge.fetch_size = sis_fetch_size; - agp_bridge.cleanup = sis_cleanup; - agp_bridge.tlb_flush = sis_tlbflush; - agp_bridge.mask_memory = sis_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; -} - -#endif - -#ifdef AGP_BUILD_AMD_IRONGATE - -static struct _amd_irongate_private { - volatile unsigned char *registers; -} amd_irongate_private; - -static int amd_irongate_fetch_size(void) -{ - int i; - u32 temp; - aper_size_info_32 *values; - - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = (temp & 0x0000000e); - (void *) values = agp_bridge.aperture_sizes; - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static int amd_irongate_configure(void) -{ - aper_size_info_32 *current_size; - u32 temp; - u16 enable_reg; - - current_size = (aper_size_info_32 *) agp_bridge.current_size; - - /* Get the memory mapped registers */ - pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); - temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - amd_irongate_private.registers = (volatile unsigned char *) ioremap(temp, 4096); - - /* Write out the address of the gatt table */ - OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, agp_bridge.gatt_bus_addr); - - /* Write the Sync register */ - pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); - - /* Write the enable register */ - enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); - enable_reg = (enable_reg | 0x0004); - OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); - - /* Write out the size register */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); - - /* Flush the tlb */ - OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); - - /* Get the address for the gart region */ - pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); - temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - agp_bridge.gart_bus_addr = temp; - return 0; -} - -static void amd_irongate_cleanup(void) -{ - aper_size_info_32 *previous_size; - u32 temp; - u16 enable_reg; - - previous_size = (aper_size_info_32 *) agp_bridge.previous_size; - - enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); - enable_reg = (enable_reg & ~(0x0004)); - OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); - - /* Write back the previous size and disable gart translation */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = ((temp & ~(0x0000000f)) | previous_size->size_value); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); - iounmap((void *) amd_irongate_private.registers); -} - -/* - * This routine could be implemented by taking the addresses - * written to the GATT, and flushing them individually. However - * currently it just flushes the whole table. Which is probably - * more efficent, since agp_memory blocks can be a large number of - * entries. - */ - -static void amd_irongate_tlbflush(agp_memory * temp) -{ - OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); -} - -static unsigned long amd_irongate_mask_memory(unsigned long addr, int type) -{ - /* Only type 0 is supported by the irongate */ - - return addr | agp_bridge.masks[0].mask; -} - -static aper_size_info_32 amd_irongate_sizes[7] = -{ - {2048, 524288, 9, 0x0000000c}, - {1024, 262144, 8, 0x0000000a}, - {512, 131072, 7, 0x00000008}, - {256, 65536, 6, 0x00000006}, - {128, 32768, 5, 0x00000004}, - {64, 16384, 4, 0x00000002}, - {32, 8192, 3, 0x00000000} -}; - -static gatt_mask amd_irongate_masks[] = -{ - {0x00000001, 0} -}; - -static void amd_irongate_setup(void) -{ - agp_bridge.masks = amd_irongate_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; - agp_bridge.size_type = U32_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = (void *) &amd_irongate_private; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = amd_irongate_configure; - agp_bridge.fetch_size = amd_irongate_fetch_size; - agp_bridge.cleanup = amd_irongate_cleanup; - agp_bridge.tlb_flush = amd_irongate_tlbflush; - agp_bridge.mask_memory = amd_irongate_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; -} - -#endif - -#ifdef AGP_BUILD_ALI_M1541 - -static int ali_fetch_size(void) -{ - int i; - u32 temp; - aper_size_info_32 *values; - - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - temp &= ~(0xfffffff0); - (void *) values = agp_bridge.aperture_sizes; - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static void ali_tlbflush(agp_memory * mem) -{ - u32 temp; - - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, - ((temp & 0xffffff00) | 0x00000090)); - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, - ((temp & 0xffffff00) | 0x00000010)); -} - -static void ali_cleanup(void) -{ - aper_size_info_32 *previous_size; - u32 temp; - - previous_size = (aper_size_info_32 *) agp_bridge.previous_size; - - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, - ((temp & 0xffffff00) | 0x00000090)); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, previous_size->size_value); -} - -static int ali_configure(void) -{ - u32 temp; - aper_size_info_32 *current_size; - - current_size = (aper_size_info_32 *) agp_bridge.current_size; - - /* aperture size and gatt addr */ - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, - agp_bridge.gatt_bus_addr | current_size->size_value); - - /* tlb control */ - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, - ((temp & 0xffffff00) | 0x00000010)); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - return 0; -} - -static unsigned long ali_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - - -/* Setup function */ -static gatt_mask ali_generic_masks[] = -{ - {0x00000000, 0} -}; - -static aper_size_info_32 ali_generic_sizes[7] = -{ - {256, 65536, 6, 10}, - {128, 32768, 5, 9}, - {64, 16384, 4, 8}, - {32, 8192, 3, 7}, - {16, 4096, 2, 6}, - {8, 2048, 1, 4}, - {4, 1024, 0, 3} -}; - -static void ali_generic_setup(void) -{ - agp_bridge.masks = ali_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) ali_generic_sizes; - agp_bridge.size_type = U32_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = ali_configure; - agp_bridge.fetch_size = ali_fetch_size; - agp_bridge.cleanup = ali_cleanup; - agp_bridge.tlb_flush = ali_tlbflush; - agp_bridge.mask_memory = ali_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; -} - -#endif - - - -/* Supported Device Scanning routine */ - -static void agp_find_supported_device(void) -{ - struct pci_dev *dev = NULL; - u8 cap_ptr = 0x00; - u32 cap_id, scratch; - - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) { - agp_bridge.type = NOT_SUPPORTED; - return; - } - agp_bridge.dev = dev; - - /* Need to test for I810 here */ -#ifdef AGP_BUILD_INTEL_I810 - if (dev->vendor == PCI_VENDOR_ID_INTEL) { - struct pci_dev *i810_dev; - - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_810_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_1, - NULL); - if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810, but could not find the secondary device.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - printk("agpgart: Detected an Intel i810 Chipset.\n"); - agp_bridge.type = INTEL_I810; - agp_bridge.intel_i810_setup(i810_dev); - return; - - case PCI_DEVICE_ID_INTEL_810_DC100_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_DC100_1, - NULL); - if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810 DC100, but could not find the secondary device.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - printk("agpgart: Detected an Intel i810 DC100 Chipset.\n"); - agp_bridge.type = INTEL_I810; - agp_bridge.intel_i810_setup(i810_dev); - return; - - case PCI_DEVICE_ID_INTEL_810_E_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_E_1, - NULL); - if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810 E, but could not find the secondary device.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - printk("agpgart: Detected an Intel i810 E Chipset.\n"); - agp_bridge.type = INTEL_I810; - agp_bridge.intel_i810_setup(i810_dev); - return; - default: - break; - } - } -#endif - /* find capndx */ - pci_read_config_dword(dev, 0x04, &scratch); - - if (!(scratch & 0x00100000)) { - agp_bridge.type = NOT_SUPPORTED; - return; - } - pci_read_config_byte(dev, 0x34, &cap_ptr); - - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(dev, cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - if (cap_ptr == 0x00) { - agp_bridge.type = NOT_SUPPORTED; - return; - } - agp_bridge.capndx = cap_ptr; - - /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, - &agp_bridge.mode); - - switch (dev->vendor) { -#ifdef AGP_BUILD_INTEL_GENERIC - case PCI_VENDOR_ID_INTEL: - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_82443LX_0: - agp_bridge.type = INTEL_LX; - printk("agpgart: Detected an Intel 440LX Chipset.\n"); - agp_bridge.intel_generic_setup(); - return; - - case PCI_DEVICE_ID_INTEL_82443BX_0: - agp_bridge.type = INTEL_BX; - printk("agpgart: Detected an Intel 440BX Chipset.\n"); - agp_bridge.intel_generic_setup(); - return; - - case PCI_DEVICE_ID_INTEL_82443GX_0: - agp_bridge.type = INTEL_GX; - printk("agpgart: Detected an Intel 440GX Chipset.\n"); - agp_bridge.intel_generic_setup(); - return; - - default: - if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic intel routines for device id: %x\n", dev->device); - agp_bridge.type = INTEL_GENERIC; - agp_bridge.intel_generic_setup(); - return; - } else { - printk("agpgart: Unsupported intel chipset, you might want to try agp_try_unsupported=1.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - } - break; -#endif - -#ifdef AGP_BUILD_VIA_GENERIC - case PCI_VENDOR_ID_VIA: - switch (dev->device) { - case PCI_DEVICE_ID_VIA_82C597_0: - agp_bridge.type = VIA_VP3; - printk("agpgart: Detected a VIA VP3 Chipset.\n"); - agp_bridge.via_generic_setup(); - return; - - case PCI_DEVICE_ID_VIA_82C598_0: - agp_bridge.type = VIA_MVP3; - printk("agpgart: Detected a VIA MVP3 Chipset.\n"); - agp_bridge.via_generic_setup(); - return; - - case PCI_DEVICE_ID_VIA_82C691_0: - agp_bridge.type = VIA_APOLLO_PRO; - printk("agpgart: Detected a VIA Apollo Pro Chipset.\n"); - agp_bridge.via_generic_setup(); - return; - - default: - if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic VIA routines for device id: %x\n", dev->device); - agp_bridge.type = VIA_GENERIC; - agp_bridge.via_generic_setup(); - return; - } else { - printk("agpgart: Unsupported VIA chipset, you might want to try agp_try_unsupported=1.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - } - break; -#endif - -#ifdef AGP_BUILD_SIS_GENERIC - case PCI_VENDOR_ID_SI: - switch (dev->device) { - /* ToDo need to find out the specific devices supported */ - default: - if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic SiS routines for device id: %x\n", dev->device); - agp_bridge.type = SIS_GENERIC; - agp_bridge.sis_generic_setup(); - return; - } else { - printk("agpgart: Unsupported SiS chipset, you might want to try agp_try_unsupported=1.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - } - break; -#endif - -#ifdef AGP_BUILD_AMD_IRONGATE - case PCI_VENDOR_ID_AMD: - switch (dev->device) { - case PCI_DEVICE_ID_AMD_IRONGATE_0: - agp_bridge.type = AMD_IRONGATE; - printk("agpgart: Detected an AMD Irongate Chipset.\n"); - agp_bridge.amd_irongate_setup(); - return; - - default: - if (agp_try_unsupported != 0) { - printk("agpgart: Trying Amd irongate routines for device id: %x\n", dev->device); - agp_bridge.type = AMD_GENERIC; - agp_bridge.amd_irongate_setup(); - return; - } else { - printk("agpgart: Unsupported Amd chipset, you might want to try agp_try_unsupported=1.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - } - break; -#endif - -#ifdef AGP_BUILD_ALI_M1541 - case PCI_VENDOR_ID_AL: - switch (dev->device) { - case PCI_DEVICE_ID_AL_M1541_0: - agp_bridge.type = ALI_M1541; - printk("agpgart: Detected an ALi M1541 Chipset\n"); - agp_bridge.ali_generic_setup(); - return; - default: - if (agp_try_unsupported != 0) { - printk("agpgart: Trying ALi generic routines for device id: %x\n", dev->device); - agp_bridge.type = ALI_GENERIC; - agp_bridge.ali_generic_setup(); - return; - } else { - printk("agpgart: Unsupported ALi chipset, you might want to type agp_try_unsupported=1.\n"); - agp_bridge.type = NOT_SUPPORTED; - return; - } - } - break; -#endif - default: - agp_bridge.type = NOT_SUPPORTED; - return; - } -} - -struct agp_max_table { - int mem; - int agp; -}; - -static struct agp_max_table agp_maxes_table[9] = -{ - {0, 0}, - {32, 4}, - {64, 28}, - {128, 96}, - {256, 204}, - {512, 440}, - {1024, 942}, - {2048, 1920}, - {4096, 3932} -}; - -static int agp_find_max(void) -{ - int memory; - float t; - int index; - int result; - - memory = virt_to_phys(high_memory) / 0x100000; - index = 0; - - while ((memory > agp_maxes_table[index].mem) && - (index < 8)) { - index++; - } - - t = (memory - agp_maxes_table[index - 1].mem) / - (agp_maxes_table[index].mem - agp_maxes_table[index - 1].mem); - - result = agp_maxes_table[index - 1].agp + - (t * (agp_maxes_table[index].agp - agp_maxes_table[index - 1].agp)); - - printk("agpgart: Maximum main memory to use for agp memory: %dM\n", result); - result = (result * 0x100000) / 4096; - return result; -} - -#define AGPGART_VERSION_MAJOR 0 -#define AGPGART_VERSION_MINOR 99 - -static agp_version agp_current_version = -{ - AGPGART_VERSION_MAJOR, - AGPGART_VERSION_MINOR -}; - -static int agp_backend_initialize(void) -{ - int size_value; - - memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); - agp_bridge.type = NOT_SUPPORTED; -#ifdef AGP_BUILD_INTEL_GENERIC - agp_bridge.intel_generic_setup = intel_generic_setup; -#endif -#ifdef AGP_BUILD_INTEL_I810 - agp_bridge.intel_i810_setup = intel_i810_setup; -#endif -#ifdef AGP_BUILD_VIA_GENERIC - agp_bridge.via_generic_setup = via_generic_setup; -#endif -#ifdef AGP_BUILD_SIS_GENERIC - agp_bridge.sis_generic_setup = sis_generic_setup; -#endif -#ifdef AGP_BUILD_AMD_IRONGATE - agp_bridge.amd_irongate_setup = amd_irongate_setup; -#endif -#ifdef AGP_BUILD_ALI_M1541 - agp_bridge.ali_generic_setup = ali_generic_setup; -#endif - agp_bridge.max_memory_agp = agp_find_max(); - agp_bridge.version = &agp_current_version; - agp_find_supported_device(); - - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page = (unsigned long) agp_alloc_page(); - - if ((void *) (agp_bridge.scratch_page) == NULL) { - printk("agpgart: unable to get memory for scratch page.\n"); - return -ENOMEM; - } - agp_bridge.scratch_page = virt_to_phys((void *) agp_bridge.scratch_page); - agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0); - } - if (agp_bridge.type == NOT_SUPPORTED) { - printk("agpgart: no supported devices found.\n"); - return -EINVAL; - } - size_value = agp_bridge.fetch_size(); - - if (size_value == 0) { - printk("agpgart: unable to detrimine aperture size.\n"); - return -EINVAL; - } - if (agp_bridge.create_gatt_table()) { - printk("agpgart: unable to get memory for graphics translation table.\n"); - return -ENOMEM; - } - agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); - - if (agp_bridge.key_list == NULL) { - printk("agpgart: error allocating memory for key lists.\n"); - agp_bridge.free_gatt_table(); - return -ENOMEM; - } - memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); - - if (agp_bridge.configure()) { - printk("agpgart: error configuring host chipset.\n"); - agp_bridge.free_gatt_table(); - vfree(agp_bridge.key_list); - return -EINVAL; - } - printk("agpgart: Physical address of the agp aperture: 0x%lx\n", agp_bridge.gart_bus_addr); - printk("agpgart: Agp aperture is %dM in size.\n", size_value); - return 0; -} - -static void agp_backend_cleanup(void) -{ - agp_bridge.cleanup(); - agp_bridge.free_gatt_table(); - vfree(agp_bridge.key_list); - - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page &= ~(0x00000fff); - agp_destroy_page((void *) phys_to_virt(agp_bridge.scratch_page)); - } -} - -extern int agp_frontend_initialize(void); -extern void agp_frontend_cleanup(void); - -#ifdef MODULE -int init_module(void) -{ - int ret_val; - - printk("Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", - AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); - ret_val = agp_backend_initialize(); - - if (ret_val != 0) { - return ret_val; - } - ret_val = agp_frontend_initialize(); - - if (ret_val != 0) { - agp_backend_cleanup(); - return ret_val; - } - return 0; -} - -void cleanup_module(void) -{ - agp_frontend_cleanup(); - agp_backend_cleanup(); -} - -#endif diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/agp_backendP.h linux/drivers/char/agp/agp_backendP.h --- v2.3.31/linux/drivers/char/agp/agp_backendP.h Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/agp/agp_backendP.h Wed Dec 31 16:00:00 1969 @@ -1,244 +0,0 @@ -/* - * AGPGART module version 0.99 - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _AGP_BACKEND_PRIV_H -#define _AGP_BACKEND_PRIV_H 1 - -enum aper_size_type { - U8_APER_SIZE, - U16_APER_SIZE, - U32_APER_SIZE, - FIXED_APER_SIZE -}; - -typedef struct _gatt_mask { - unsigned long mask; - u32 type; - /* totally device specific, for integrated chipsets that - * might have different types of memory masks. For other - * devices this will probably be ignored */ -} gatt_mask; - -typedef struct _aper_size_info_8 { - int size; - int num_entries; - int page_order; - u8 size_value; -} aper_size_info_8; - -typedef struct _aper_size_info_16 { - int size; - int num_entries; - int page_order; - u16 size_value; -} aper_size_info_16; - -typedef struct _aper_size_info_32 { - int size; - int num_entries; - int page_order; - u32 size_value; -} aper_size_info_32; - -typedef struct _aper_size_info_fixed { - int size; - int num_entries; - int page_order; -} aper_size_info_fixed; - -struct agp_bridge_data { - agp_version *version; - void *aperture_sizes; - void *previous_size; - void *current_size; - void *dev_private_data; - struct pci_dev *dev; - gatt_mask *masks; - unsigned long *gatt_table; - unsigned long *gatt_table_real; - unsigned long scratch_page; - unsigned long gart_bus_addr; - unsigned long gatt_bus_addr; - u32 mode; - enum chipset_type type; - enum aper_size_type size_type; - u32 *key_list; - atomic_t current_memory_agp; - atomic_t agp_in_use; - int max_memory_agp; /* in number of pages */ - int needs_scratch_page; - int aperture_size_idx; - int num_aperture_sizes; - int num_of_masks; - int capndx; - - /* Links to driver specific functions */ - - int (*fetch_size) (void); /* returns the index into the size table */ - int (*configure) (void); - void (*agp_enable) (u32); - void (*cleanup) (void); - void (*tlb_flush) (agp_memory *); - unsigned long (*mask_memory) (unsigned long, int); - void (*cache_flush) (void); - int (*create_gatt_table) (void); - int (*free_gatt_table) (void); - int (*insert_memory) (agp_memory *, off_t, int); - int (*remove_memory) (agp_memory *, off_t, int); - agp_memory *(*alloc_by_type) (size_t, int); - void (*free_by_type) (agp_memory *); - - /* Links to vendor/device specific setup functions */ -#ifdef AGP_BUILD_INTEL_GENERIC - void (*intel_generic_setup) (void); -#endif -#ifdef AGP_BUILD_INTEL_I810 - void (*intel_i810_setup) (struct pci_dev *); -#endif -#ifdef AGP_BUILD_VIA_GENERIC - void (*via_generic_setup) (void); -#endif -#ifdef AGP_BUILD_SIS_GENERIC - void (*sis_generic_setup) (void); -#endif -#ifdef AGP_BUILD_AMD_IRONGATE - void (*amd_irongate_setup) (void); -#endif -#ifdef AGP_BUILD_ALI_M1541 - void (*ali_generic_setup) (void); -#endif -}; - -#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val) -#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val) -#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val) - -#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr)) -#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr)) -#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr)) - -#ifndef min -#define min(a,b) (((a)<(b))?(a):(b)) -#endif - -#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) - -#ifndef PCI_DEVICE_ID_VIA_82C691_0 -#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 -#endif -#ifndef PCI_DEVICE_ID_VIA_82C691_1 -#define PCI_DEVICE_ID_VIA_82C691_1 0x8691 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_0 -#define PCI_DEVICE_ID_INTEL_810_0 0x7120 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0 -#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_E_0 -#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124 -#endif -#ifndef PCI_DEVICE_ID_INTEL_82443GX_0 -#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_1 -#define PCI_DEVICE_ID_INTEL_810_1 0x7121 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1 -#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123 -#endif -#ifndef PCI_DEVICE_ID_INTEL_810_E_1 -#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125 -#endif -#ifndef PCI_DEVICE_ID_INTEL_82443GX_1 -#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 -#endif -#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 -#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 -#endif -#ifndef PCI_VENDOR_ID_AL -#define PCI_VENDOR_ID_AL 0x10b9 -#endif -#ifndef PCI_DEVICE_ID_AL_M1541_0 -#define PCI_DEVICE_ID_AL_M1541_0 0x1541 -#endif - -/* intel register */ -#define INTEL_APBASE 0x10 -#define INTEL_APSIZE 0xb4 -#define INTEL_ATTBASE 0xb8 -#define INTEL_AGPCTRL 0xb0 -#define INTEL_NBXCFG 0x50 -#define INTEL_ERRSTS 0x91 - -/* intel i810 registers */ -#define I810_GMADDR 0x10 -#define I810_MMADDR 0x14 -#define I810_PTE_BASE 0x10000 -#define I810_PTE_MAIN_UNCACHED 0x00000000 -#define I810_PTE_LOCAL 0x00000002 -#define I810_PTE_VALID 0x00000001 -#define I810_SMRAM_MISCC 0x70 -#define I810_GFX_MEM_WIN_SIZE 0x00010000 -#define I810_GFX_MEM_WIN_32M 0x00010000 -#define I810_GMS 0x000000c0 -#define I810_GMS_DISABLE 0x00000000 -#define I810_PGETBL_CTL 0x2020 -#define I810_PGETBL_ENABLED 0x00000001 -#define I810_DRAM_CTL 0x3000 -#define I810_DRAM_ROW_0 0x00000001 -#define I810_DRAM_ROW_0_SDRAM 0x00000001 - -/* VIA register */ -#define VIA_APBASE 0x10 -#define VIA_GARTCTRL 0x80 -#define VIA_APSIZE 0x84 -#define VIA_ATTBASE 0x88 - -/* SiS registers */ -#define SIS_APBASE 0x10 -#define SIS_ATTBASE 0x90 -#define SIS_APSIZE 0x94 -#define SIS_TLBCNTRL 0x97 -#define SIS_TLBFLUSH 0x98 - -/* AMD registers */ -#define AMD_APBASE 0x10 -#define AMD_MMBASE 0x14 -#define AMD_APSIZE 0xac -#define AMD_MODECNTL 0xb0 -#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ -#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ -#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ -#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ - -/* ALi registers */ -#define ALI_APBASE 0x10 -#define ALI_AGPCTRL 0xb8 -#define ALI_ATTBASE 0xbc -#define ALI_TLBCTRL 0xc0 - -#endif /* _AGP_BACKEND_PRIV_H */ diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/agpgart_be.c linux/drivers/char/agp/agpgart_be.c --- v2.3.31/linux/drivers/char/agp/agpgart_be.c Wed Dec 31 16:00:00 1969 +++ linux/drivers/char/agp/agpgart_be.c Mon Dec 13 14:10:09 1999 @@ -0,0 +1,2052 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#define EXPORT_SYMTAB +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "agp.h" + +MODULE_AUTHOR("Jeff Hartmann "); +MODULE_PARM(agp_try_unsupported, "1i"); +EXPORT_SYMBOL(agp_free_memory); +EXPORT_SYMBOL(agp_allocate_memory); +EXPORT_SYMBOL(agp_copy_info); +EXPORT_SYMBOL(agp_bind_memory); +EXPORT_SYMBOL(agp_unbind_memory); +EXPORT_SYMBOL(agp_enable); +EXPORT_SYMBOL(agp_backend_acquire); +EXPORT_SYMBOL(agp_backend_release); + +static void flush_cache(void); + +static struct agp_bridge_data agp_bridge; +static int agp_try_unsupported __initdata = 0; +#ifdef __SMP__ +static atomic_t cpus_waiting; + +static void ipi_handler(void *null) +{ + flush_cache(); + atomic_dec(&cpus_waiting); + while (atomic_read(&cpus_waiting) > 0) + barrier(); +} + +static void smp_flush_cache(void) +{ + atomic_set(&cpus_waiting, smp_num_cpus - 1); + if (smp_call_function(ipi_handler, NULL, 1, 0) != 0) + panic("agpgart: timed out waiting for the other CPUs!\n"); + flush_cache(); + while (atomic_read(&cpus_waiting) > 0) + barrier(); +} +#define global_cache_flush smp_flush_cache +#else /* __SMP__ */ +#define global_cache_flush flush_cache +#endif /* __SMP__ */ + +static void flush_cache(void) +{ + asm volatile ("wbinvd":::"memory"); +} + + +int agp_backend_acquire(void) +{ + atomic_inc(&agp_bridge.agp_in_use); + + if (atomic_read(&agp_bridge.agp_in_use) != 1) { + atomic_dec(&agp_bridge.agp_in_use); + return -EBUSY; + } + MOD_INC_USE_COUNT; + return 0; +} + +void agp_backend_release(void) +{ + atomic_dec(&agp_bridge.agp_in_use); + MOD_DEC_USE_COUNT; +} + +/* + * Basic Page Allocation Routines - + * These routines handle page allocation + * and by default they reserve the allocated + * memory. They also handle incrementing the + * current_memory_agp value, Which is checked + * against a maximum value. + */ + +static unsigned long agp_alloc_page(void) +{ + void *pt; + + pt = (void *) __get_free_page(GFP_KERNEL); + if (pt == NULL) { + return 0; + } + atomic_inc(&mem_map[MAP_NR(pt)].count); + set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); + atomic_inc(&agp_bridge.current_memory_agp); + return (unsigned long) pt; +} + +static void agp_destroy_page(unsigned long page) +{ + void *pt = (void *) page; + + if (pt == NULL) { + return; + } + atomic_dec(&mem_map[MAP_NR(pt)].count); + clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); + wake_up(&mem_map[MAP_NR(pt)].wait); + free_page((unsigned long) pt); + atomic_dec(&agp_bridge.current_memory_agp); +} + +/* End Basic Page Allocation Routines */ + +/* + * Generic routines for handling agp_memory structures - + * They use the basic page allocation routines to do the + * brunt of the work. + */ + + +static void agp_free_key(int key) +{ + + if (key < 0) { + return; + } + if (key < MAXKEY) { + clear_bit(key, agp_bridge.key_list); + } +} + +static int agp_get_key(void) +{ + int bit; + + bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); + if (bit < MAXKEY) { + set_bit(bit, agp_bridge.key_list); + return bit; + } + return -1; +} + +static agp_memory *agp_create_memory(int scratch_pages) +{ + agp_memory *new; + + new = kmalloc(sizeof(agp_memory), GFP_KERNEL); + + if (new == NULL) { + return NULL; + } + memset(new, 0, sizeof(agp_memory)); + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + new->memory = vmalloc(PAGE_SIZE * scratch_pages); + + if (new->memory == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = scratch_pages; + return new; +} + +void agp_free_memory(agp_memory * curr) +{ + int i; + + if (curr == NULL) { + return; + } + if (curr->is_bound == TRUE) { + agp_unbind_memory(curr); + } + if (curr->type != 0) { + agp_bridge.free_by_type(curr); + MOD_DEC_USE_COUNT; + return; + } + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + curr->memory[i] &= ~(0x00000fff); + agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); + } + } + agp_free_key(curr->key); + vfree(curr->memory); + kfree(curr); + MOD_DEC_USE_COUNT; +} + +#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) + +agp_memory *agp_allocate_memory(size_t page_count, u32 type) +{ + int scratch_pages; + agp_memory *new; + int i; + + if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) > + agp_bridge.max_memory_agp) { + return NULL; + } + if (type != 0) { + new = agp_bridge.alloc_by_type(page_count, type); + return new; + } + scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + + new = agp_create_memory(scratch_pages); + + if (new == NULL) { + return NULL; + } + for (i = 0; i < page_count; i++) { + new->memory[i] = agp_alloc_page(); + + if (new->memory[i] == 0) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = + agp_bridge.mask_memory( + virt_to_phys((void *) new->memory[i]), + type); + new->page_count++; + } + + MOD_INC_USE_COUNT; + return new; +} + +/* End - Generic routines for handling agp_memory structures */ + +static int agp_return_size(void) +{ + int current_size; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + current_size = A_SIZE_8(temp)->size; + break; + case U16_APER_SIZE: + current_size = A_SIZE_16(temp)->size; + break; + case U32_APER_SIZE: + current_size = A_SIZE_32(temp)->size; + break; + case FIXED_APER_SIZE: + current_size = A_SIZE_FIX(temp)->size; + break; + default: + current_size = 0; + break; + } + + return current_size; +} + +/* Routine to copy over information structure */ + +void agp_copy_info(agp_kern_info * info) +{ + memset(info, 0, sizeof(agp_kern_info)); + info->version.major = agp_bridge.version->major; + info->version.minor = agp_bridge.version->minor; + info->device = agp_bridge.dev; + info->chipset = agp_bridge.type; + info->mode = agp_bridge.mode; + info->aper_base = agp_bridge.gart_bus_addr; + info->aper_size = agp_return_size(); + info->max_memory = agp_bridge.max_memory_agp; + info->current_memory = atomic_read(&agp_bridge.current_memory_agp); +} + +/* End - Routine to copy over information structure */ + +/* + * Routines for handling swapping of agp_memory into the GATT - + * These routines take agp_memory and insert them into the GATT. + * They call device specific routines to actually write to the GATT. + */ + +int agp_bind_memory(agp_memory * curr, off_t pg_start) +{ + int ret_val; + + if ((curr == NULL) || (curr->is_bound == TRUE)) { + return -EINVAL; + } + if (curr->is_flushed == FALSE) { + CACHE_FLUSH(); + curr->is_flushed = TRUE; + } + ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); + + if (ret_val != 0) { + return ret_val; + } + curr->is_bound = TRUE; + curr->pg_start = pg_start; + return 0; +} + +int agp_unbind_memory(agp_memory * curr) +{ + int ret_val; + + if (curr == NULL) { + return -EINVAL; + } + if (curr->is_bound != TRUE) { + return -EINVAL; + } + ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); + + if (ret_val != 0) { + return ret_val; + } + curr->is_bound = FALSE; + curr->pg_start = 0; + return 0; +} + +/* End - Routines for handling swapping of agp_memory into the GATT */ + +/* + * Driver routines - start + * Currently this module supports the + * i810, 440lx, 440bx, 440gx, via vp3, via mvp3, + * amd irongate, ALi M1541 and generic support for the + * SiS chipsets. + */ + +/* Generic Agp routines - Start */ + +static void agp_generic_agp_enable(u32 mode) +{ + struct pci_dev *device = NULL; + u32 command, scratch, cap_id; + u8 cap_ptr; + + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + 4, + &command); + + /* + * PASS1: go throu all devices that claim to be + * AGP devices and collect their data. + */ + + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, + device)) != NULL) { + pci_read_config_dword(device, 0x04, &scratch); + + if (!(scratch & 0x00100000)) + continue; + + pci_read_config_byte(device, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(device, + cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr != 0x00) { + /* + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. + */ + + pci_read_config_dword(device, cap_ptr + 4, &scratch); + + /* adjust RQ depth */ + command = + ((command & ~0xff000000) | + min((mode & 0xff000000), + min((command & 0xff000000), + (scratch & 0xff000000)))); + + /* disable SBA if it's not supported */ + if (!((command & 0x00000200) && + (scratch & 0x00000200) && + (mode & 0x00000200))) + command &= ~0x00000200; + + /* disable FW if it's not supported */ + if (!((command & 0x00000010) && + (scratch & 0x00000010) && + (mode & 0x00000010))) + command &= ~0x00000010; + + if (!((command & 4) && + (scratch & 4) && + (mode & 4))) + command &= ~0x00000004; + + if (!((command & 2) && + (scratch & 2) && + (mode & 2))) + command &= ~0x00000002; + + if (!((command & 1) && + (scratch & 1) && + (mode & 1))) + command &= ~0x00000001; + } + } + /* + * PASS2: Figure out the 4X/2X/1X setting and enable the + * target (our motherboard chipset). + */ + + if (command & 4) { + command &= ~3; /* 4X */ + } + if (command & 2) { + command &= ~5; /* 2X */ + } + if (command & 1) { + command &= ~6; /* 1X */ + } + command |= 0x00000100; + + pci_write_config_dword(agp_bridge.dev, + agp_bridge.capndx + 8, + command); + + /* + * PASS3: Go throu all AGP devices and update the + * command registers. + */ + + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, + device)) != NULL) { + pci_read_config_dword(device, 0x04, &scratch); + + if (!(scratch & 0x00100000)) + continue; + + pci_read_config_byte(device, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(device, + cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr != 0x00) + pci_write_config_dword(device, cap_ptr + 8, command); + } +} + +static int agp_generic_create_gatt_table(void) +{ + char *table; + char *table_end; + int size; + int page_order; + int num_entries; + int i; + void *temp; + + table = NULL; + i = agp_bridge.aperture_size_idx; + temp = agp_bridge.current_size; + size = page_order = num_entries = 0; + + if (agp_bridge.size_type != FIXED_APER_SIZE) { + do { + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + size = A_SIZE_8(temp)->size; + page_order = + A_SIZE_8(temp)->page_order; + num_entries = + A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + size = A_SIZE_16(temp)->size; + page_order = A_SIZE_16(temp)->page_order; + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + size = A_SIZE_32(temp)->size; + page_order = A_SIZE_32(temp)->page_order; + num_entries = A_SIZE_32(temp)->num_entries; + break; + /* This case will never really happen. */ + case FIXED_APER_SIZE: + default: + size = page_order = num_entries = 0; + break; + } + + table = (char *) __get_free_pages(GFP_KERNEL, + page_order); + + if (table == NULL) { + i++; + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + agp_bridge.current_size = A_IDX8(); + break; + case U16_APER_SIZE: + agp_bridge.current_size = A_IDX16(); + break; + case U32_APER_SIZE: + agp_bridge.current_size = A_IDX32(); + break; + /* This case will never really + * happen. + */ + case FIXED_APER_SIZE: + default: + agp_bridge.current_size = + agp_bridge.current_size; + break; + } + } else { + agp_bridge.aperture_size_idx = i; + } + } while ((table == NULL) && + (i < agp_bridge.num_aperture_sizes)); + } else { + size = ((aper_size_info_fixed *) temp)->size; + page_order = ((aper_size_info_fixed *) temp)->page_order; + num_entries = ((aper_size_info_fixed *) temp)->num_entries; + table = (char *) __get_free_pages(GFP_KERNEL, page_order); + } + + if (table == NULL) { + return -ENOMEM; + } + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + set_bit(PG_reserved, &mem_map[i].flags); + } + + agp_bridge.gatt_table_real = (unsigned long *) table; + CACHE_FLUSH(); + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + CACHE_FLUSH(); + + if (agp_bridge.gatt_table == NULL) { + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + clear_bit(PG_reserved, &mem_map[i].flags); + } + + free_pages((unsigned long) table, page_order); + + return -ENOMEM; + } + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = + (unsigned long) agp_bridge.scratch_page; + } + + return 0; +} + +static int agp_generic_free_gatt_table(void) +{ + int i; + int page_order; + char *table, *table_end; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + page_order = A_SIZE_8(temp)->page_order; + break; + case U16_APER_SIZE: + page_order = A_SIZE_16(temp)->page_order; + break; + case U32_APER_SIZE: + page_order = A_SIZE_32(temp)->page_order; + break; + case FIXED_APER_SIZE: + page_order = A_SIZE_FIX(temp)->page_order; + break; + default: + page_order = 0; + break; + } + + /* Do not worry about freeing memory, because if this is + * called, then all agp memory is deallocated and removed + * from the table. + */ + + iounmap(agp_bridge.gatt_table); + table = (char *) agp_bridge.gatt_table_real; + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + clear_bit(PG_reserved, &mem_map[i].flags); + } + + free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); + return 0; +} + +static int agp_generic_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + num_entries = A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = A_SIZE_32(temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = A_SIZE_FIX(temp)->num_entries; + break; + default: + num_entries = 0; + break; + } + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + j = pg_start; + + while (j < (pg_start + mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + agp_bridge.gatt_table[j] = mem->memory[i]; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + agp_bridge.gatt_table[i] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) +{ + return NULL; +} + +static void agp_generic_free_by_type(agp_memory * curr) +{ + if (curr->memory != NULL) { + vfree(curr->memory); + } + agp_free_key(curr->key); + kfree(curr); +} + +void agp_enable(u32 mode) +{ + agp_bridge.agp_enable(mode); +} + +/* End - Generic Agp routines */ + +#ifdef CONFIG_AGP_I810 +static aper_size_info_fixed intel_i810_sizes[] = +{ + {64, 16384, 4}, + /* The 32M mode still requires a 64k gatt */ + {32, 8192, 4} +}; + +#define AGP_DCACHE_MEMORY 1 + +static gatt_mask intel_i810_masks[] = +{ + {I810_PTE_VALID, 0}, + {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY} +}; + +static struct _intel_i810_private { + struct pci_dev *i810_dev; /* device one */ + volatile u8 *registers; + int num_dcache_entries; +} intel_i810_private; + +static int intel_i810_fetch_size(void) +{ + u32 smram_miscc; + aper_size_info_fixed *values; + + pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); + values = A_SIZE_FIX(agp_bridge.aperture_sizes); + + if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { + printk("agpgart: i810 is disabled\n"); + return 0; + } + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + 1); + agp_bridge.aperture_size_idx = 1; + return values[1].size; + } else { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values); + agp_bridge.aperture_size_idx = 0; + return values[0].size; + } + + return 0; +} + +static int intel_i810_configure(void) +{ + aper_size_info_fixed *current_size; + u32 temp; + int i; + + current_size = A_SIZE_FIX(agp_bridge.current_size); + + pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); + temp &= 0xfff80000; + + intel_i810_private.registers = + (volatile u8 *) ioremap(temp, 128 * 4096); + + if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + /* This will need to be dynamically assigned */ + printk(KERN_INFO + "agpgart: detected 4MB dedicated video ram.\n"); + intel_i810_private.num_dcache_entries = 1024; + } + pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, + agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + CACHE_FLUSH(); + + if (agp_bridge.needs_scratch_page == TRUE) { + for (i = 0; i < current_size->num_entries; i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + } + return 0; +} + +static void intel_i810_cleanup(void) +{ + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); + iounmap((void *) intel_i810_private.registers); +} + +static void intel_i810_tlbflush(agp_memory * mem) +{ + return; +} + +static void intel_i810_agp_enable(u32 mode) +{ + return; +} + +static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_FIX(temp)->num_entries; + + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + for (j = pg_start; j < (pg_start + mem->page_count); j++) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + } + + if (type != 0 || mem->type != 0) { + if ((type == AGP_DCACHE_MEMORY) && + (mem->type == AGP_DCACHE_MEMORY)) { + /* special insert */ + + for (i = pg_start; + i < (pg_start + mem->page_count); i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + (i * 4096) | I810_PTE_LOCAL | + I810_PTE_VALID); + } + + agp_bridge.tlb_flush(mem); + return 0; + } + return -EINVAL; + } + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (j * 4), mem->memory[i]); + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) +{ + agp_memory *new; + + if (type == AGP_DCACHE_MEMORY) { + if (pg_count != intel_i810_private.num_dcache_entries) { + return NULL; + } + new = agp_create_memory(1); + + if (new == NULL) { + return NULL; + } + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + vfree(new->memory); + return new; + } + return NULL; +} + +static void intel_i810_free_by_type(agp_memory * curr) +{ + agp_free_key(curr->key); + kfree(curr); +} + +static unsigned long intel_i810_mask_memory(unsigned long addr, int type) +{ + /* Type checking must be done elsewhere */ + return addr | agp_bridge.masks[type].mask; +} + +static void intel_i810_setup(struct pci_dev *i810_dev) +{ + intel_i810_private.i810_dev = i810_dev; + + agp_bridge.masks = intel_i810_masks; + agp_bridge.num_of_masks = 2; + agp_bridge.aperture_sizes = (void *) intel_i810_sizes; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.num_aperture_sizes = 2; + agp_bridge.dev_private_data = (void *) &intel_i810_private; + agp_bridge.needs_scratch_page = TRUE; + agp_bridge.configure = intel_i810_configure; + agp_bridge.fetch_size = intel_i810_fetch_size; + agp_bridge.cleanup = intel_i810_cleanup; + agp_bridge.tlb_flush = intel_i810_tlbflush; + agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.agp_enable = intel_i810_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = intel_i810_insert_entries; + agp_bridge.remove_memory = intel_i810_remove_entries; + agp_bridge.alloc_by_type = intel_i810_alloc_by_type; + agp_bridge.free_by_type = intel_i810_free_by_type; +} + +#endif + +#ifdef CONFIG_AGP_INTEL + +static int intel_fetch_size(void) +{ + int i; + u16 temp; + aper_size_info_16 *values; + + pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); + values = A_SIZE_16(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void intel_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); +} + +static void intel_cleanup(void) +{ + u16 temp; + aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge.previous_size); + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + previous_size->size_value); +} + +static int intel_configure(void) +{ + u32 temp; + u16 temp2; + aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); + + /* paccfg/nbxcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, + (temp2 & ~(1 << 10)) | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); + return 0; +} + +static unsigned long intel_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + + +/* Setup function */ +static gatt_mask intel_generic_masks[] = +{ + {0x00000017, 0} +}; + +static aper_size_info_16 intel_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static void intel_generic_setup(void) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_generic_sizes; + agp_bridge.size_type = U16_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_configure; + agp_bridge.fetch_size = intel_fetch_size; + agp_bridge.cleanup = intel_cleanup; + agp_bridge.tlb_flush = intel_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef CONFIG_AGP_VIA + +static int via_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int via_configure(void) +{ + u32 temp; + aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + current_size->size_value); + /* address to map too */ + pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* GART control register */ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, + (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); + return 0; +} + +static void via_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + previous_size->size_value); +} + +static void via_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); +} + +static unsigned long via_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_8 via_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 128}, + {64, 16384, 4, 192}, + {32, 8192, 3, 224}, + {16, 4096, 2, 240}, + {8, 2048, 1, 248}, + {4, 1024, 0, 252} +}; + +static gatt_mask via_generic_masks[] = +{ + {0x00000000, 0} +}; + +static void via_generic_setup(void) +{ + agp_bridge.masks = via_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) via_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = via_configure; + agp_bridge.fetch_size = via_fetch_size; + agp_bridge.cleanup = via_cleanup; + agp_bridge.tlb_flush = via_tlbflush; + agp_bridge.mask_memory = via_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef CONFIG_AGP_SIS + +static int sis_fetch_size(void) +{ + u8 temp_size; + int i; + aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); + values = A_SIZE_8(agp_bridge.aperture_sizes); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if ((temp_size == values[i].size_value) || + ((temp_size & ~(0x03)) == + (values[i].size_value & ~(0x03)))) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + + +static void sis_tlbflush(agp_memory * mem) +{ + pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); +} + +static int sis_configure(void) +{ + u32 temp; + aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); + pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, + agp_bridge.gatt_bus_addr); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + current_size->size_value); + return 0; +} + +static void sis_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + (previous_size->size_value & ~(0x03))); +} + +static unsigned long sis_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_8 sis_generic_sizes[7] = +{ + {256, 65536, 6, 99}, + {128, 32768, 5, 83}, + {64, 16384, 4, 67}, + {32, 8192, 3, 51}, + {16, 4096, 2, 35}, + {8, 2048, 1, 19}, + {4, 1024, 0, 3} +}; + +static gatt_mask sis_generic_masks[] = +{ + {0x00000000, 0} +}; + +static void sis_generic_setup(void) +{ + agp_bridge.masks = sis_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) sis_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = sis_configure; + agp_bridge.fetch_size = sis_fetch_size; + agp_bridge.cleanup = sis_cleanup; + agp_bridge.tlb_flush = sis_tlbflush; + agp_bridge.mask_memory = sis_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef CONFIG_AGP_AMD + +static struct _amd_irongate_private { + volatile u8 *registers; +} amd_irongate_private; + +static int amd_irongate_fetch_size(void) +{ + int i; + u32 temp; + aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (temp & 0x0000000e); + values = A_SIZE_32(agp_bridge.aperture_sizes); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int amd_irongate_configure(void) +{ + aper_size_info_32 *current_size; + u32 temp; + u16 enable_reg; + + current_size = A_SIZE_32(agp_bridge.current_size); + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096); + + /* Write out the address of the gatt table */ + OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* Write the Sync register */ + pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); + + /* Write the enable register */ + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg | 0x0004); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write out the size register */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + + /* Flush the tlb */ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); + + /* Get the address for the gart region */ + pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge.gart_bus_addr = temp; + return 0; +} + +static void amd_irongate_cleanup(void) +{ + aper_size_info_32 *previous_size; + u32 temp; + u16 enable_reg; + + previous_size = A_SIZE_32(agp_bridge.previous_size); + + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg & ~(0x0004)); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write back the previous size and disable gart translation */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + iounmap((void *) amd_irongate_private.registers); +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficent, since agp_memory blocks can be a large number of + * entries. + */ + +static void amd_irongate_tlbflush(agp_memory * temp) +{ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); +} + +static unsigned long amd_irongate_mask_memory(unsigned long addr, int type) +{ + /* Only type 0 is supported by the irongate */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_32 amd_irongate_sizes[7] = +{ + {2048, 524288, 9, 0x0000000c}, + {1024, 262144, 8, 0x0000000a}, + {512, 131072, 7, 0x00000008}, + {256, 65536, 6, 0x00000006}, + {128, 32768, 5, 0x00000004}, + {64, 16384, 4, 0x00000002}, + {32, 8192, 3, 0x00000000} +}; + +static gatt_mask amd_irongate_masks[] = +{ + {0x00000001, 0} +}; + +static void amd_irongate_setup(void) +{ + agp_bridge.masks = amd_irongate_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; + agp_bridge.size_type = U32_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = (void *) &amd_irongate_private; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = amd_irongate_configure; + agp_bridge.fetch_size = amd_irongate_fetch_size; + agp_bridge.cleanup = amd_irongate_cleanup; + agp_bridge.tlb_flush = amd_irongate_tlbflush; + agp_bridge.mask_memory = amd_irongate_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef CONFIG_AGP_ALI + +static int ali_fetch_size(void) +{ + int i; + u32 temp; + aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + temp &= ~(0xfffffff0); + values = A_SIZE_32(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void ali_tlbflush(agp_memory * mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000090)); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000010)); +} + +static void ali_cleanup(void) +{ + aper_size_info_32 *previous_size; + u32 temp; + + previous_size = A_SIZE_32(agp_bridge.previous_size); + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000090)); + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + previous_size->size_value); +} + +static int ali_configure(void) +{ + u32 temp; + aper_size_info_32 *current_size; + + current_size = A_SIZE_32(agp_bridge.current_size); + + /* aperture size and gatt addr */ + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + agp_bridge.gatt_bus_addr | current_size->size_value); + + /* tlb control */ + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000010)); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + return 0; +} + +static unsigned long ali_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + + +/* Setup function */ +static gatt_mask ali_generic_masks[] = +{ + {0x00000000, 0} +}; + +static aper_size_info_32 ali_generic_sizes[7] = +{ + {256, 65536, 6, 10}, + {128, 32768, 5, 9}, + {64, 16384, 4, 8}, + {32, 8192, 3, 7}, + {16, 4096, 2, 6}, + {8, 2048, 1, 4}, + {4, 1024, 0, 3} +}; + +static void ali_generic_setup(void) +{ + agp_bridge.masks = ali_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) ali_generic_sizes; + agp_bridge.size_type = U32_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = ali_configure; + agp_bridge.fetch_size = ali_fetch_size; + agp_bridge.cleanup = ali_cleanup; + agp_bridge.tlb_flush = ali_tlbflush; + agp_bridge.mask_memory = ali_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + + + +/* Supported Device Scanning routine */ + +static void agp_find_supported_device(void) +{ + struct pci_dev *dev = NULL; + u8 cap_ptr = 0x00; + u32 cap_id, scratch; + + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + agp_bridge.dev = dev; + + /* Need to test for I810 here */ +#ifdef CONFIG_AGP_I810 + if (dev->vendor == PCI_VENDOR_ID_INTEL) { + struct pci_dev *i810_dev; + + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_810_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810," + " but could not find the secondary" + " device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk(KERN_INFO "agpgart: Detected an Intel " + "i810 Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + + case PCI_DEVICE_ID_INTEL_810_DC100_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_DC100_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810 " + "DC100, but could not find the " + "secondary device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk(KERN_INFO "agpgart: Detected an Intel i810 " + "DC100 Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + + case PCI_DEVICE_ID_INTEL_810_E_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_E_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810 E" + ", but could not find the secondary " + "device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk(KERN_INFO "agpgart: Detected an Intel i810 E " + "Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + default: + break; + } + } +#endif + /* find capndx */ + pci_read_config_dword(dev, 0x04, &scratch); + + if (!(scratch & 0x00100000)) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + pci_read_config_byte(dev, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(dev, cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr == 0x00) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + agp_bridge.capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + 4, + &agp_bridge.mode); + + switch (dev->vendor) { +#ifdef CONFIG_AGP_INTEL + case PCI_VENDOR_ID_INTEL: + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_82443LX_0: + agp_bridge.type = INTEL_LX; + printk(KERN_INFO "agpgart: Detected an Intel 440LX" + " Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + case PCI_DEVICE_ID_INTEL_82443BX_0: + agp_bridge.type = INTEL_BX; + printk(KERN_INFO "agpgart: Detected an Intel 440BX " + "Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + case PCI_DEVICE_ID_INTEL_82443GX_0: + agp_bridge.type = INTEL_GX; + printk(KERN_INFO "agpgart: Detected an Intel 440GX " + "Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic intel " + "routines for device id: %x\n", + dev->device); + agp_bridge.type = INTEL_GENERIC; + agp_bridge.intel_generic_setup(); + return; + } else { + printk("agpgart: Unsupported intel chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef CONFIG_AGP_VIA + case PCI_VENDOR_ID_VIA: + switch (dev->device) { + case PCI_DEVICE_ID_VIA_82C597_0: + agp_bridge.type = VIA_VP3; + printk(KERN_INFO "agpgart: Detected a VIA VP3 " + "Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + case PCI_DEVICE_ID_VIA_82C598_0: + agp_bridge.type = VIA_MVP3; + printk(KERN_INFO "agpgart: Detected a VIA MVP3 " + "Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + case PCI_DEVICE_ID_VIA_82C691_0: + agp_bridge.type = VIA_APOLLO_PRO; + printk(KERN_INFO "agpgart: Detected a VIA Apollo " + "Pro Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic VIA routines" + " for device id: %x\n", dev->device); + agp_bridge.type = VIA_GENERIC; + agp_bridge.via_generic_setup(); + return; + } else { + printk("agpgart: Unsupported VIA chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef CONFIG_AGP_SIS + case PCI_VENDOR_ID_SI: + switch (dev->device) { + /* ToDo need to find out the + * specific devices supported. + */ + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic SiS routines" + " for device id: %x\n", dev->device); + agp_bridge.type = SIS_GENERIC; + agp_bridge.sis_generic_setup(); + return; + } else { + printk("agpgart: Unsupported SiS chipset, " + "you might want to try " + "agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef CONFIG_AGP_AMD + case PCI_VENDOR_ID_AMD: + switch (dev->device) { + case PCI_DEVICE_ID_AMD_IRONGATE_0: + agp_bridge.type = AMD_IRONGATE; + printk(KERN_INFO "agpgart: Detected an AMD Irongate" + " Chipset.\n"); + agp_bridge.amd_irongate_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying Amd irongate" + " routines for device id: %x\n", + dev->device); + agp_bridge.type = AMD_GENERIC; + agp_bridge.amd_irongate_setup(); + return; + } else { + printk("agpgart: Unsupported Amd chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef CONFIG_AGP_ALI + case PCI_VENDOR_ID_AL: + switch (dev->device) { + case PCI_DEVICE_ID_AL_M1541_0: + agp_bridge.type = ALI_M1541; + printk(KERN_INFO "agpgart: Detected an ALi M1541" + " Chipset\n"); + agp_bridge.ali_generic_setup(); + return; + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying ALi generic routines" + " for device id: %x\n", dev->device); + agp_bridge.type = ALI_GENERIC; + agp_bridge.ali_generic_setup(); + return; + } else { + printk("agpgart: Unsupported ALi chipset," + " you might want to type " + "agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + default: + agp_bridge.type = NOT_SUPPORTED; + return; + } +} + +struct agp_max_table { + int mem; + int agp; +}; + +static struct agp_max_table maxes_table[9] = +{ + {0, 0}, + {32, 4}, + {64, 28}, + {128, 96}, + {256, 204}, + {512, 440}, + {1024, 942}, + {2048, 1920}, + {4096, 3932} +}; + +static int agp_find_max(void) +{ + int memory; + float t; + int index; + int result; + + memory = virt_to_phys(high_memory) / 0x100000; + index = 0; + + while ((memory > maxes_table[index].mem) && + (index < 8)) { + index++; + } + + t = (memory - maxes_table[index - 1].mem) / + (maxes_table[index].mem - maxes_table[index - 1].mem); + + result = maxes_table[index - 1].agp + + (t * (maxes_table[index].agp - maxes_table[index - 1].agp)); + + printk(KERN_INFO "agpgart: Maximum main memory to use " + "for agp memory: %dM\n", result); + result = (result * 0x100000) / 4096; + return result; +} + +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 99 + +static agp_version agp_current_version = +{ + AGPGART_VERSION_MAJOR, + AGPGART_VERSION_MINOR +}; + +static int agp_backend_initialize(void) +{ + int size_value; + + memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); + agp_bridge.type = NOT_SUPPORTED; +#ifdef CONFIG_AGP_INTEL + agp_bridge.intel_generic_setup = intel_generic_setup; +#endif +#ifdef CONFIG_AGP_I810 + agp_bridge.intel_i810_setup = intel_i810_setup; +#endif +#ifdef CONFIG_AGP_VIA + agp_bridge.via_generic_setup = via_generic_setup; +#endif +#ifdef CONFIG_AGP_SIS + agp_bridge.sis_generic_setup = sis_generic_setup; +#endif +#ifdef CONFIG_AGP_AMD + agp_bridge.amd_irongate_setup = amd_irongate_setup; +#endif +#ifdef CONFIG_AGP_ALI + agp_bridge.ali_generic_setup = ali_generic_setup; +#endif + agp_bridge.max_memory_agp = agp_find_max(); + agp_bridge.version = &agp_current_version; + agp_find_supported_device(); + + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page = agp_alloc_page(); + + if (agp_bridge.scratch_page == 0) { + printk("agpgart: unable to get memory for " + "scratch page.\n"); + return -ENOMEM; + } + agp_bridge.scratch_page = + virt_to_phys((void *) agp_bridge.scratch_page); + agp_bridge.scratch_page = + agp_bridge.mask_memory(agp_bridge.scratch_page, 0); + } + if (agp_bridge.type == NOT_SUPPORTED) { + printk("agpgart: no supported devices found.\n"); + return -EINVAL; + } + size_value = agp_bridge.fetch_size(); + + if (size_value == 0) { + printk("agpgart: unable to detrimine aperture size.\n"); + return -EINVAL; + } + if (agp_bridge.create_gatt_table()) { + printk("agpgart: unable to get memory for graphics " + "translation table.\n"); + return -ENOMEM; + } + agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); + + if (agp_bridge.key_list == NULL) { + printk("agpgart: error allocating memory for key lists.\n"); + agp_bridge.free_gatt_table(); + return -ENOMEM; + } + memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); + + if (agp_bridge.configure()) { + printk("agpgart: error configuring host chipset.\n"); + agp_bridge.free_gatt_table(); + vfree(agp_bridge.key_list); + return -EINVAL; + } + printk(KERN_INFO "agpgart: Physical address of the agp aperture:" + " 0x%lx\n", agp_bridge.gart_bus_addr); + printk(KERN_INFO "agpgart: Agp aperture is %dM in size.\n", + size_value); + return 0; +} + +static void agp_backend_cleanup(void) +{ + agp_bridge.cleanup(); + agp_bridge.free_gatt_table(); + vfree(agp_bridge.key_list); + + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page &= ~(0x00000fff); + agp_destroy_page((unsigned long) + phys_to_virt(agp_bridge.scratch_page)); + } +} + +extern int agp_frontend_initialize(void); +extern void agp_frontend_cleanup(void); + +static int __init agp_init(void) +{ + int ret_val; + + printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", + AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); + ret_val = agp_backend_initialize(); + + if (ret_val != 0) { + return ret_val; + } + ret_val = agp_frontend_initialize(); + + if (ret_val != 0) { + agp_backend_cleanup(); + return ret_val; + } + return 0; +} + +static void __exit agp_cleanup(void) +{ + agp_frontend_cleanup(); + agp_backend_cleanup(); +} + +module_init(agp_init); +module_exit(agp_cleanup); diff -u --recursive --new-file v2.3.31/linux/drivers/char/agp/agpgart_fe.c linux/drivers/char/agp/agpgart_fe.c --- v2.3.31/linux/drivers/char/agp/agpgart_fe.c Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/agp/agpgart_fe.c Thu Dec 9 17:02:05 1999 @@ -1,8 +1,8 @@ /* * AGPGART module frontend version 0.99 * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,7 +25,6 @@ */ #define __NO_VERSION__ -#include #include #include #include @@ -44,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -187,7 +185,8 @@ agp_segment *user_seg; int i; - seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL); + seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), + GFP_KERNEL); if (seg == NULL) { kfree(region->seg_list); return -ENOMEM; @@ -373,8 +372,8 @@ priv = agp_find_private(temp->pid); if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); - clear_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } client = client->next; kfree(temp); @@ -439,8 +438,8 @@ priv = agp_find_private(clients->pid); if (priv != NULL) { - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); - set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } clients = clients->next; } @@ -453,7 +452,7 @@ { agp_client *clients; - clear_bit(AGP_FF_IS_VALID, &(controller_priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); clients = controller->clients; while (clients != NULL) { @@ -462,7 +461,7 @@ priv = agp_find_private(clients->pid); if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); } clients = clients->next; } @@ -610,7 +609,7 @@ AGP_UNLOCK(); return -EPERM; } - if (!(test_bit(AGP_FF_IS_VALID, &(priv->access_flags)))) { + if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) { AGP_UNLOCK(); return -EPERM; } @@ -620,7 +619,7 @@ current_size = current_size * 0x100000; offset = vma->vm_pgoff << PAGE_SHIFT; - if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { if ((size + offset) > current_size) { AGP_UNLOCK(); return -EINVAL; @@ -631,11 +630,13 @@ AGP_UNLOCK(); return -EPERM; } - if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) { + if (!agp_find_seg_in_client(client, offset, + size, vma->vm_page_prot)) { AGP_UNLOCK(); return -EINVAL; } - if (remap_page_range(vma->vm_start, (kerninfo.aper_base + offset), + if (remap_page_range(vma->vm_start, + (kerninfo.aper_base + offset), size, vma->vm_page_prot)) { AGP_UNLOCK(); return -EAGAIN; @@ -643,7 +644,7 @@ AGP_UNLOCK(); return 0; } - if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { if (size != current_size) { AGP_UNLOCK(); return -EINVAL; @@ -666,19 +667,20 @@ AGP_LOCK(); - if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { agp_controller *controller; controller = agp_find_controller_by_pid(priv->my_pid); if (controller != NULL) { if (controller == agp_fe.current_controller) { - agp_controller_release_current(controller, priv); + agp_controller_release_current(controller, + priv); } agp_remove_controller(controller); } } - if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { agp_remove_client(priv->my_pid); } agp_remove_file_private(priv); @@ -707,18 +709,18 @@ return -ENOMEM; } memset(priv, 0, sizeof(agp_file_private)); - set_bit(AGP_FF_ALLOW_CLIENT, &(priv->access_flags)); + set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); priv->my_pid = current->pid; if ((current->uid == 0) || (current->suid == 0)) { /* Root priv, can be controller */ - set_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)); + set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); } client = agp_find_client_by_pid(current->pid); if (client != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); } file->private_data = (void *) priv; agp_insert_file_private(priv); @@ -754,7 +756,8 @@ userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); + userinfo.bridge_id = kerninfo.device->vendor | + (kerninfo.device->device << 16); userinfo.agp_mode = kerninfo.mode; userinfo.aper_base = kerninfo.aper_base; userinfo.aper_size = kerninfo.aper_size; @@ -770,7 +773,7 @@ static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) { agp_controller *controller; - if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)))) { + if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) { return -EPERM; } if (agp_fe.current_controller != NULL) { @@ -798,8 +801,8 @@ agp_controller_make_current(controller); } - set_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); return 0; } @@ -837,8 +840,10 @@ client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); } if (client == NULL) { /* client is already removed */ @@ -848,12 +853,14 @@ } else { agp_segment *segment; - segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), GFP_KERNEL); + segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), + GFP_KERNEL); if (segment == NULL) { return -ENOMEM; } - if (copy_from_user(segment, (void *) reserve.seg_list, GFP_KERNEL)) { + if (copy_from_user(segment, (void *) reserve.seg_list, + GFP_KERNEL)) { kfree(segment); return -EFAULT; } @@ -870,8 +877,10 @@ client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); } return agp_create_segment(client, &reserve); } else { @@ -972,10 +981,12 @@ return -EBUSY; } if (cmd != AGPIOC_ACQUIRE) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &(curr_priv->access_flags)))) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, + &curr_priv->access_flags))) { return -EPERM; } - /* Use the original pid of the controller, in case it's threaded */ + /* Use the original pid of the controller, + * in case it's threaded */ if (agp_fe.current_controller->pid != curr_priv->my_pid) { return -EBUSY; diff -u --recursive --new-file v2.3.31/linux/drivers/char/busmouse.c linux/drivers/char/busmouse.c --- v2.3.31/linux/drivers/char/busmouse.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/char/busmouse.c Mon Dec 13 16:26:27 1999 @@ -49,7 +49,6 @@ /*#define BROKEN_MOUSE*/ extern int sun_mouse_init(void); -extern void mouse_rpc_init (void); struct busmouse_data { struct miscdevice miscdev; @@ -431,9 +430,6 @@ { #ifdef CONFIG_SUN_MOUSE sun_mouse_init(); -#endif -#ifdef CONFIG_RPCMOUSE - mouse_rpc_init(); #endif return 0; } diff -u --recursive --new-file v2.3.31/linux/drivers/char/drm/drmP.h linux/drivers/char/drm/drmP.h --- v2.3.31/linux/drivers/char/drm/drmP.h Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/drm/drmP.h Tue Dec 14 00:55:06 1999 @@ -116,7 +116,6 @@ #endif /* Generic cmpxchg added in 2.3.x */ -#if CPU != 386 #ifndef __HAVE_ARCH_CMPXCHG /* Include this here so that driver can be used with older kernels. */ @@ -151,10 +150,6 @@ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ (unsigned long)(n),sizeof(*(ptr)))) #endif -#else - /* Compiling for a 386 proper... */ -#error DRI not supported on Intel 80386 -#endif /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ @@ -468,6 +463,7 @@ /* Misc. support (init.c) */ extern int drm_flags; extern void drm_parse_options(char *s); +extern int drm_cpu_valid(void); /* Device support (fops.c) */ diff -u --recursive --new-file v2.3.31/linux/drivers/char/drm/fops.c linux/drivers/char/drm/fops.c --- v2.3.31/linux/drivers/char/drm/fops.c Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/drm/fops.c Fri Dec 10 15:34:45 1999 @@ -40,6 +40,7 @@ drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ + if (!drm_cpu_valid()) return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); diff -u --recursive --new-file v2.3.31/linux/drivers/char/drm/init.c linux/drivers/char/drm/init.c --- v2.3.31/linux/drivers/char/drm/init.c Wed Dec 8 14:11:25 1999 +++ linux/drivers/char/drm/init.c Fri Dec 10 15:34:45 1999 @@ -97,3 +97,13 @@ } } +/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0 + * otherwise. */ + +int drm_cpu_valid(void) +{ +#if defined(__i386__) + if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */ +#endif + return 1; +} diff -u --recursive --new-file v2.3.31/linux/drivers/char/misc.c linux/drivers/char/misc.c --- v2.3.31/linux/drivers/char/misc.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/char/misc.c Mon Dec 13 16:26:27 1999 @@ -79,9 +79,6 @@ extern int pc110pad_init(void); extern int pmu_device_init(void); extern int qpmouse_init(void); -extern int ds1620_init(void); -extern int nwbutton_init(void); -extern int nwflash_init(void); static int misc_read_proc(char *buf, char **start, off_t offset, int len, int *eof, void *private) @@ -237,15 +234,6 @@ #endif #ifdef CONFIG_SGI streamable_init (); -#endif -#ifdef CONFIG_DS1620 - ds1620_init(); -#endif -#ifdef CONFIG_NWBUTTON - nwbutton_init(); -#endif -#ifdef CONFIG_NWFLASH - nwflash_init(); #endif #ifdef CONFIG_SGI_NEWPORT_GFX gfx_register (); diff -u --recursive --new-file v2.3.31/linux/drivers/char/msp3400.c linux/drivers/char/msp3400.c --- v2.3.31/linux/drivers/char/msp3400.c Tue Aug 31 17:29:13 1999 +++ linux/drivers/char/msp3400.c Wed Dec 8 15:17:55 1999 @@ -76,6 +76,7 @@ int mode; int norm; int stereo; + int nicam_on; int main, second; /* sound carrier */ int left, right; /* volume */ @@ -151,27 +152,39 @@ static int msp3400c_read(struct i2c_bus *bus, int dev, int addr) { - int ret=0; - short val = 0; - i2c_start(bus); - if (0 != i2c_sendbyte(bus, I2C_MSP3400C,2000) || - 0 != i2c_sendbyte(bus, dev+1, 0) || - 0 != i2c_sendbyte(bus, addr >> 8, 0) || - 0 != i2c_sendbyte(bus, addr & 0xff, 0)) { - ret = -1; - } else { + int err,ret; + short val=0; + + for (err = 0; err < 3;) { + ret = 0; i2c_start(bus); - if (0 != i2c_sendbyte(bus, I2C_MSP3400C+1,2000)) { + if (0 != i2c_sendbyte(bus, I2C_MSP3400C,2000) || + 0 != i2c_sendbyte(bus, dev+1, 0) || + 0 != i2c_sendbyte(bus, addr >> 8, 0) || + 0 != i2c_sendbyte(bus, addr & 0xff, 0)) { ret = -1; } else { - val |= (int)i2c_readbyte(bus,0) << 8; - val |= (int)i2c_readbyte(bus,1); + i2c_start(bus); + if (0 != i2c_sendbyte(bus, I2C_MSP3400C+1,2000)) { + ret = -1; + } else { + val |= (int)i2c_readbyte(bus,0) << 8; + val |= (int)i2c_readbyte(bus,1); + } } + i2c_stop(bus); + if (0 == ret) + break; + + /* some I/O error */ + err++; + printk(KERN_WARNING "msp34xx: I/O error #%d (read 0x%02x/0x%02x)\n", + err, dev, addr); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(HZ/10); } - i2c_stop(bus); if (-1 == ret) { - printk(KERN_WARNING "msp3400: I/O error, trying reset (read %s 0x%x)\n", - (dev == I2C_MSP3400C_DEM) ? "Demod" : "Audio", addr); + printk(KERN_WARNING "msp34xx: giving up, reseting chip. Sound will go off, sorry folks :-|\n"); msp3400c_reset(bus); } return val; @@ -180,20 +193,31 @@ static int msp3400c_write(struct i2c_bus *bus, int dev, int addr, int val) { - int ret = 0; + int ret,err; - i2c_start(bus); - if (0 != i2c_sendbyte(bus, I2C_MSP3400C,2000) || - 0 != i2c_sendbyte(bus, dev, 0) || - 0 != i2c_sendbyte(bus, addr >> 8, 0) || - 0 != i2c_sendbyte(bus, addr & 0xff, 0) || - 0 != i2c_sendbyte(bus, val >> 8, 0) || - 0 != i2c_sendbyte(bus, val & 0xff, 0)) - ret = -1; - i2c_stop(bus); + for (err = 0; err < 3;) { + ret = 0; + i2c_start(bus); + if (0 != i2c_sendbyte(bus, I2C_MSP3400C,2000) || + 0 != i2c_sendbyte(bus, dev, 0) || + 0 != i2c_sendbyte(bus, addr >> 8, 0) || + 0 != i2c_sendbyte(bus, addr & 0xff, 0) || + 0 != i2c_sendbyte(bus, val >> 8, 0) || + 0 != i2c_sendbyte(bus, val & 0xff, 0)) + ret = -1; + i2c_stop(bus); + if (0 == ret) + break; + + /* some I/O error */ + err++; + printk(KERN_WARNING "msp34xx: I/O error #%d (write 0x%02x/0x%02x)\n", + err, dev, addr); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(HZ/10); + } if (-1 == ret) { - printk(KERN_WARNING "msp3400: I/O error, trying reset (write %s 0x%x)\n", - (dev == I2C_MSP3400C_DEM) ? "Demod" : "Audio", addr); + printk(KERN_WARNING "msp34xx: giving up, reseting chip. Sound will go off, sorry folks :-|\n"); msp3400c_reset(bus); } return ret; @@ -383,12 +407,12 @@ static void msp3400c_setstereo(struct msp3400c *msp, int mode) { int nicam=0; /* channel source: FM/AM or nicam */ - + int src=0; + /* switch demodulator */ switch (msp->mode) { case MSP_MODE_FM_TERRA: dprintk("msp3400: FM setstereo: %d\n",mode); - msp->stereo = mode; msp3400c_setcarrier(msp->bus,msp->second,msp->main); switch (mode) { case VIDEO_SOUND_STEREO: @@ -403,7 +427,6 @@ break; case MSP_MODE_FM_SAT: dprintk("msp3400: SAT setstereo: %d\n",mode); - msp->stereo = mode; switch (mode) { case VIDEO_SOUND_MONO: msp3400c_setcarrier(msp->bus, MSP_CARRIER(6.5), MSP_CARRIER(6.5)); @@ -424,7 +447,8 @@ dprintk("msp3400: NICAM setstereo: %d\n",mode); msp->stereo = mode; msp3400c_setcarrier(msp->bus,msp->second,msp->main); - nicam=0x0100; + if (msp->nicam_on) + nicam=0x0100; break; default: /* can't do stereo - abort here */ @@ -434,25 +458,22 @@ /* switch audio */ switch (mode) { case VIDEO_SOUND_STEREO: - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0008,0x0020|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0009,0x0020|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x000a,0x0020|nicam); + src = 0x0020 | nicam; #if 0 msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0005,0x4000); #endif break; case VIDEO_SOUND_MONO: case VIDEO_SOUND_LANG1: - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0008,0x0000|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0009,0x0000|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x000a,0x0000|nicam); + src = 0x0000 | nicam; break; case VIDEO_SOUND_LANG2: - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0008,0x0010|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0009,0x0010|nicam); - msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x000a,0x0010|nicam); + src = 0x0010 | nicam; break; } + msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0008,src); + msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0009,src); + msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x000a,src); } static void @@ -491,6 +512,77 @@ { 0x0057, "ERROR_RATE" }, }; +static int +autodetect_stereo(struct msp3400c *msp) +{ + int val; + int newstereo = msp->stereo; + int newnicam = msp->nicam_on; + int update = 0; + + switch (msp->mode) { + case MSP_MODE_FM_TERRA: + val = msp3400c_read(msp->bus, I2C_MSP3400C_DFP, 0x18); + dprintk("msp3400: stereo detect register: %d\n",val); + + if (val > 4096) { + newstereo = VIDEO_SOUND_STEREO | VIDEO_SOUND_MONO; + } else if (val < -4096) { + newstereo = VIDEO_SOUND_LANG1 | VIDEO_SOUND_LANG2; + } else { + newstereo = VIDEO_SOUND_MONO; + } + newnicam = 0; + break; + case MSP_MODE_FM_NICAM1: + case MSP_MODE_FM_NICAM2: + val = msp3400c_read(msp->bus, I2C_MSP3400C_DEM, 0x23); + dprintk("msp3400: nicam sync=%d, mode=%d\n",val & 1, (val & 0x1e) >> 1); + + if (val & 1) { + /* nicam synced */ + switch ((val & 0x1e) >> 1) { + case 0: + case 8: + newstereo = VIDEO_SOUND_STEREO; + break; + case 1: + case 9: + newstereo = VIDEO_SOUND_MONO + | VIDEO_SOUND_LANG1; + break; + case 2: + case 10: + newstereo = VIDEO_SOUND_MONO + | VIDEO_SOUND_LANG1 + | VIDEO_SOUND_LANG2; + break; + default: + newstereo = VIDEO_SOUND_MONO; + break; + } + newnicam=1; + } else { + newnicam=0; + newstereo = VIDEO_SOUND_MONO; + } + break; + } + if (newstereo != msp->stereo) { + update = 1; + dprintk("msp3400: watch: stereo %d => %d\n", + msp->stereo,newstereo); + msp->stereo = newstereo; + } + if (newnicam != msp->nicam_on) { + update = 1; + dprintk("msp3400: watch: nicam %d => %d\n", + msp->nicam_on,newnicam); + msp->nicam_on = newnicam; + } + return update; +} + /* * A kernel thread for msp3400 control -- we don't want to block the * in the ioctl while doing the sound carrier & stereo detect @@ -503,13 +595,34 @@ wake_up_interruptible(&msp->wq); } +/* stereo/multilang monitoring */ +static void watch_stereo(struct msp3400c *msp) +{ + LOCK_FLAGS; + + LOCK_I2C_BUS(msp->bus); + if (autodetect_stereo(msp)) { + if (msp->stereo & VIDEO_SOUND_STEREO) + msp3400c_setstereo(msp,VIDEO_SOUND_STEREO); + else if (msp->stereo & VIDEO_SOUND_LANG1) + msp3400c_setstereo(msp,VIDEO_SOUND_LANG1); + else + msp3400c_setstereo(msp,VIDEO_SOUND_MONO); + } + UNLOCK_I2C_BUS(msp->bus); + if (msp->watch_stereo) { + del_timer(&msp->wake_stereo); + msp->wake_stereo.expires = jiffies + 5*HZ; + add_timer(&msp->wake_stereo); + } +} + static int msp3400c_thread(void *data) { struct msp3400c *msp = data; struct CARRIER_DETECT *cd; int count, max1,max2,val1,val2, val,this; - int newstereo; LOCK_FLAGS; #ifdef __SMP__ @@ -529,7 +642,7 @@ unlock_kernel(); #endif - dprintk("msp3400: thread: start\n"); + printk("msp3400: daemon started\n"); if(msp->notify != NULL) up(msp->notify); @@ -550,48 +663,7 @@ msp->active = 1; if (msp->watch_stereo) { - /* do that stereo/multilang handling */ - LOCK_I2C_BUS(msp->bus); - newstereo = msp->stereo; - switch (msp->mode) { - case MSP_MODE_FM_TERRA: - val = msp3400c_read(msp->bus, I2C_MSP3400C_DFP, 0x18); - dprintk("msp3400: stereo detect register: %d\n",val); - - if (val > 4096) { - newstereo = VIDEO_SOUND_STEREO; - } else if (val < -4096) { - newstereo = VIDEO_SOUND_LANG1; - } else { - newstereo = VIDEO_SOUND_MONO; - } - break; - case MSP_MODE_FM_NICAM1: - case MSP_MODE_FM_NICAM2: - val = msp3400c_read(msp->bus, I2C_MSP3400C_DEM, 0x23); - switch ((val & 0x1e) >> 1) { - case 0: - case 8: - newstereo = VIDEO_SOUND_STEREO; - break; - default: - newstereo = VIDEO_SOUND_MONO; - break; - } - break; - } - if (msp->stereo != newstereo) { - dprintk("msp3400: watch: stereo %d ==> %d\n", - msp->stereo,newstereo); - msp3400c_setstereo(msp,newstereo); - } - UNLOCK_I2C_BUS(msp->bus); - if (msp->watch_stereo) { - del_timer(&msp->wake_stereo); - msp->wake_stereo.expires = jiffies + 5*HZ; - add_timer(&msp->wake_stereo); - } - + watch_stereo(msp); msp->active = 0; continue; } @@ -599,7 +671,7 @@ restart: LOCK_I2C_BUS(msp->bus); msp3400c_setvolume(msp->bus, 0, 0); - msp3400c_setmode(msp, MSP_MODE_AM_DETECT); + msp3400c_setmode(msp, MSP_MODE_AM_DETECT /* +1 */ ); val1 = val2 = 0; max1 = max2 = -1; del_timer(&msp->wake_stereo); @@ -626,7 +698,7 @@ val1 = val, max1 = this; dprintk("msp3400: carrier1 val: %5d / %s\n", val,cd[this].name); } - + /* carrier detect pass #2 -- second (stereo) carrier */ switch (max1) { case 1: /* 5.5 */ @@ -669,12 +741,14 @@ /* B/G FM-stereo */ msp->second = carrier_detect_55[max2].cdo; msp3400c_setmode(msp, MSP_MODE_FM_TERRA); + msp->nicam_on = 0; msp3400c_setstereo(msp, VIDEO_SOUND_MONO); msp->watch_stereo = 1; } else if (max2 == 1 && msp->nicam) { /* B/G NICAM */ msp->second = carrier_detect_55[max2].cdo; msp3400c_setmode(msp, MSP_MODE_FM_NICAM1); + msp->nicam_on = 1; msp3400c_setcarrier(msp->bus, msp->second, msp->main); msp->watch_stereo = 1; } else { @@ -685,6 +759,7 @@ /* PAL I NICAM */ msp->second = MSP_CARRIER(6.552); msp3400c_setmode(msp, MSP_MODE_FM_NICAM2); + msp->nicam_on = 1; msp3400c_setcarrier(msp->bus, msp->second, msp->main); msp->watch_stereo = 1; break; @@ -693,12 +768,14 @@ /* D/K FM-stereo */ msp->second = carrier_detect_65[max2].cdo; msp3400c_setmode(msp, MSP_MODE_FM_TERRA); + msp->nicam_on = 0; msp3400c_setstereo(msp, VIDEO_SOUND_MONO); msp->watch_stereo = 1; } else if (max2 == 0 && msp->nicam) { /* D/K NICAM */ msp->second = carrier_detect_65[max2].cdo; msp3400c_setmode(msp, MSP_MODE_FM_NICAM1); + msp->nicam_on = 1; msp3400c_setcarrier(msp->bus, msp->second, msp->main); msp->watch_stereo = 1; } else { @@ -710,7 +787,10 @@ no_second: msp->second = carrier_detect_main[max1].cdo; msp3400c_setmode(msp, MSP_MODE_FM_TERRA); + msp->nicam_on = 0; msp3400c_setcarrier(msp->bus, msp->second, msp->main); + msp->stereo = VIDEO_SOUND_MONO; + msp3400c_setstereo(msp, VIDEO_SOUND_MONO); break; } @@ -720,7 +800,7 @@ if (msp->watch_stereo) { del_timer(&msp->wake_stereo); - msp->wake_stereo.expires = jiffies + 2*HZ; + msp->wake_stereo.expires = jiffies + 5*HZ; add_timer(&msp->wake_stereo); } @@ -740,7 +820,6 @@ return 0; } - #if 0 /* not finished yet */ static int msp3410d_thread(void *data) @@ -1047,11 +1126,6 @@ return -1; } - msp3400c_setmode(msp, MSP_MODE_FM_TERRA); - msp3400c_setvolume(msp->bus, msp->left, msp->right); - msp3400c_setbass(msp->bus, msp->bass); - msp3400c_settreble(msp->bus, msp->treble); - #if 0 /* this will turn on a 1kHz beep - might be useful for debugging... */ msp3400c_write(msp->bus,I2C_MSP3400C_DFP, 0x0014, 0x1040); diff -u --recursive --new-file v2.3.31/linux/drivers/char/synclink.c linux/drivers/char/synclink.c --- v2.3.31/linux/drivers/char/synclink.c Fri Oct 15 15:25:13 1999 +++ linux/drivers/char/synclink.c Sun Dec 12 22:55:54 1999 @@ -1,7 +1,7 @@ /* * linux/drivers/char/synclink.c * - * ==FILEDATE 19990901== + * ==FILEDATE 19991207== * * Device driver for Microgate SyncLink ISA and PCI * high speed multiprotocol serial adapters. @@ -925,7 +925,7 @@ #endif static char *driver_name = "SyncLink serial driver"; -static char *driver_version = "1.14"; +static char *driver_version = "1.15"; static struct tty_driver serial_driver, callout_driver; static int serial_refcount; @@ -6981,7 +6981,6 @@ spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); - spin_unlock_irqrestore(&info->irq_spinlock,flags); /* Verify the reset state of some registers. */ @@ -7015,7 +7014,6 @@ } } - spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); @@ -7035,7 +7033,6 @@ spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); - spin_unlock_irqrestore(&info->irq_spinlock,flags); /* * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. @@ -7057,6 +7054,8 @@ usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + EndTime=100; while( EndTime-- && !info->irq_occurred ) { set_current_state(TASK_INTERRUPTIBLE); @@ -7359,7 +7358,9 @@ } } + spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset( info ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); /* restore current port options */ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); diff -u --recursive --new-file v2.3.31/linux/drivers/i2o/i2o_block.c linux/drivers/i2o/i2o_block.c --- v2.3.31/linux/drivers/i2o/i2o_block.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/i2o/i2o_block.c Sun Dec 12 22:58:00 1999 @@ -118,7 +118,7 @@ static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int); static void i2ob_end_request(struct request *); -static void i2ob_request(void); +static void i2ob_request(request_queue_t * q); /* * Dump messages. @@ -135,7 +135,6 @@ printk(KERN_INFO "\n"); } - /* * Get a message */ @@ -154,8 +153,8 @@ { struct i2o_controller *c = dev->controller; int tid = dev->tid; - u32 *msg; - u32 *mptr; + unsigned long msg; + unsigned long mptr; u64 offset; struct request *req = ireq->req; struct buffer_head *bh = req->bh; @@ -167,22 +166,22 @@ /* * Build the message based on the request. */ - __raw_writel(i2ob_context|(unit<<8), &msg[2]); - __raw_writel(ireq->num, &msg[3]); - __raw_writel(req->nr_sectors << 9, &msg[5]); + __raw_writel(i2ob_context|(unit<<8), msg+8); + __raw_writel(ireq->num, msg+12); + __raw_writel(req->nr_sectors << 9, msg+20); /* This can be optimised later - just want to be sure its right for starters */ offset = ((u64)(req->sector+base)) << 9; - __raw_writel( offset & 0xFFFFFFFF, &msg[6]); - __raw_writel(offset>>32, &msg[7]); + __raw_writel( offset & 0xFFFFFFFF, msg+24); + __raw_writel(offset>>32, msg+28); mptr=msg+8; if(req->cmd == READ) { - __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, &msg[1]); + __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4); /* We don't yet do cache/readahead and other magic */ - __raw_writel(1<<16, &msg[4]); + __raw_writel(1<<16, msg+16); while(bh!=NULL) { /* @@ -191,31 +190,33 @@ * sucky to read. */ if(bh->b_reqnext) - __raw_writel(0x10000000|(bh->b_size), mptr++); + __raw_writel(0x10000000|(bh->b_size), mptr); else - __raw_writel(0xD0000000|(bh->b_size), mptr++); + __raw_writel(0xD0000000|(bh->b_size), mptr); - __raw_writel(virt_to_bus(bh->b_data), mptr++); + __raw_writel(virt_to_bus(bh->b_data), mptr+4); + mptr+=8; count -= bh->b_size; bh = bh->b_reqnext; } } else if(req->cmd == WRITE) { - __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, &msg[1]); - __raw_writel(1<<16, &msg[4]); + __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4); + __raw_writel(1<<16, msg+16); while(bh!=NULL) { if(bh->b_reqnext) - __raw_writel(0x14000000|(bh->b_size), mptr++); + __raw_writel(0x14000000|(bh->b_size), mptr); else - __raw_writel(0xD4000000|(bh->b_size), mptr++); + __raw_writel(0xD4000000|(bh->b_size), mptr); count -= bh->b_size; - __raw_writel(virt_to_bus(bh->b_data), mptr++); + __raw_writel(virt_to_bus(bh->b_data), mptr+4); + mptr+=8; bh = bh->b_reqnext; } } - __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, &msg[0]); + __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, msg); if(req->current_nr_sectors > 8) printk("Gathered sectors %ld.\n", @@ -223,8 +224,7 @@ if(count != 0) { - printk("Request count botched by %d.\n", count); - msg[5] -= count; + printk(KERN_ERR "Request count botched by %d.\n", count); } i2o_post_message(c,m); @@ -399,7 +399,7 @@ */ atomic_dec(&queue_depth); - i2ob_request(); + i2ob_request(NULL); spin_unlock_irqrestore(&io_request_lock, flags); } @@ -437,7 +437,7 @@ /* * Restart any requests. */ - i2ob_request(); + i2ob_request(NULL); /* * Free the lock. @@ -453,7 +453,7 @@ * we use it. */ -static void i2ob_request(void) +static void i2ob_request(request_queue_t * q) { struct request *req; struct i2ob_request *ireq; @@ -527,7 +527,6 @@ } } - /* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common @@ -1086,7 +1085,9 @@ blk_size[MAJOR_NR] = i2ob_sizes; max_sectors[MAJOR_NR] = i2ob_max_sectors; - blk_dev[MAJOR_NR].request_fn = i2ob_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), i2ob_request); + blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0); + for (i = 0; i < MAX_I2OB << 4; i++) { i2ob_dev[i].refcnt = 0; i2ob_dev[i].flags = 0; diff -u --recursive --new-file v2.3.31/linux/drivers/i2o/i2o_core.c linux/drivers/i2o/i2o_core.c --- v2.3.31/linux/drivers/i2o/i2o_core.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/i2o/i2o_core.c Wed Dec 8 15:17:55 1999 @@ -1515,7 +1515,7 @@ { niop = iop->next; #ifdef DRIVERDEBUG - printk(KERN_INFO "Enableing iop%d\n", iop->unit); + printk(KERN_INFO "Enabling iop%d\n", iop->unit); #endif if(i2o_enable_controller(iop)) { @@ -2680,6 +2680,7 @@ EXPORT_SYMBOL(i2o_activate_controller); EXPORT_SYMBOL(i2o_online_controller); EXPORT_SYMBOL(i2o_get_class_name); +EXPORT_SYMBOL(i2o_status_get); EXPORT_SYMBOL(i2o_query_scalar); EXPORT_SYMBOL(i2o_set_scalar); diff -u --recursive --new-file v2.3.31/linux/drivers/net/pcmcia/ray_cs.c linux/drivers/net/pcmcia/ray_cs.c --- v2.3.31/linux/drivers/net/pcmcia/ray_cs.c Tue Nov 23 22:42:20 1999 +++ linux/drivers/net/pcmcia/ray_cs.c Wed Dec 8 13:59:17 1999 @@ -312,8 +312,9 @@ static void cs_error(client_handle_t handle, int func, int ret) { error_info_t err = { func, ret }; - CardServices(ReportError, handle, &err); + pcmcia_report_error(handle, &err); } + /*============================================================================= ray_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered @@ -409,7 +410,7 @@ init_timer(&local->timer); - ret = CardServices(RegisterClient, &link->handle, &client_reg); + ret = pcmcia_register_client(&link->handle, &client_reg); if (ret != 0) { printk("ray_cs ray_attach RegisterClient unhappy - detaching\n"); cs_error(link->handle, RegisterClient, ret); @@ -462,7 +463,7 @@ /* Break the link with Card Services */ if (link->handle) - CardServices(DeregisterClient, link->handle); + pcmcia_deregister_client(link->handle); /* Unlink device structure, free pieces */ *linkp = link->next; @@ -482,14 +483,14 @@ ethernet device available to the system. =============================================================================*/ #define CS_CHECK(fn, args...) \ -while ((last_ret=CardServices(last_fn=(fn),args))!=0) goto cs_failed +while ((last_ret=fn(args))!=0) goto cs_failed #define MAX_TUPLE_SIZE 128 static void ray_config(dev_link_t *link) { client_handle_t handle = link->handle; tuple_t tuple; cisparse_t parse; - int last_fn, last_ret; + int last_fn = 0, last_ret = 0; int i; u_char buf[MAX_TUPLE_SIZE]; win_req_t req; @@ -501,23 +502,23 @@ /* This reads the card's CONFIG tuple to find its configuration regs */ tuple.DesiredTuple = CISTPL_CONFIG; - CS_CHECK(GetFirstTuple, handle, &tuple); + CS_CHECK(pcmcia_get_first_tuple, handle, &tuple); tuple.TupleData = buf; tuple.TupleDataMax = MAX_TUPLE_SIZE; tuple.TupleOffset = 0; - CS_CHECK(GetTupleData, handle, &tuple); - CS_CHECK(ParseTuple, handle, &tuple, &parse); + CS_CHECK(pcmcia_get_tuple_data, handle, &tuple); + CS_CHECK(pcmcia_parse_tuple, handle, &tuple, &parse); link->conf.ConfigBase = parse.config.base; link->conf.Present = parse.config.rmask[0]; /* Determine card type and firmware version */ buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0; tuple.DesiredTuple = CISTPL_VERS_1; - CS_CHECK(GetFirstTuple, handle, &tuple); + CS_CHECK(pcmcia_get_first_tuple, handle, &tuple); tuple.TupleData = buf; tuple.TupleDataMax = MAX_TUPLE_SIZE; tuple.TupleOffset = 2; - CS_CHECK(GetTupleData, handle, &tuple); + CS_CHECK(pcmcia_get_tuple_data, handle, &tuple); for (i=0; ihandle, &link->irq); + CS_CHECK(pcmcia_request_irq, link->handle, &link->irq); dev->irq = link->irq.AssignedIRQ; /* This actually configures the PCMCIA socket -- setting up the I/O windows and the interrupt mapping. */ - CS_CHECK(RequestConfiguration, link->handle, &link->conf); + CS_CHECK(pcmcia_request_configuration, link->handle, &link->conf); /*** Set up 32k window for shared memory (transmit and control) ************/ req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; @@ -543,9 +544,9 @@ req.Size = 0x8000; req.AccessSpeed = ray_mem_speed; link->win = (window_handle_t)link->handle; - CS_CHECK(RequestWindow, &link->win, &req); + CS_CHECK(pcmcia_request_window, &link->win, &req); mem.CardOffset = 0x0000; mem.Page = 0; - CS_CHECK(MapMemPage, link->win, &mem); + CS_CHECK(pcmcia_map_mem_page, link->win, &mem); local->sram = (UCHAR *)(ioremap(req.Base,req.Size)); /*** Set up 16k window for shared memory (receive buffer) ***************/ @@ -554,9 +555,9 @@ req.Size = 0x4000; req.AccessSpeed = ray_mem_speed; local->rmem_handle = (window_handle_t)link->handle; - CS_CHECK(RequestWindow, &local->rmem_handle, &req); + CS_CHECK(pcmcia_request_window, &local->rmem_handle, &req); mem.CardOffset = 0x8000; mem.Page = 0; - CS_CHECK(MapMemPage, local->rmem_handle, &mem); + CS_CHECK(pcmcia_map_mem_page, local->rmem_handle, &mem); local->rmem = (UCHAR *)(ioremap(req.Base,req.Size)); /*** Set up window for attribute memory ***********************************/ @@ -565,9 +566,9 @@ req.Size = 0x1000; req.AccessSpeed = ray_mem_speed; local->amem_handle = (window_handle_t)link->handle; - CS_CHECK(RequestWindow, &local->amem_handle, &req); + CS_CHECK(pcmcia_request_window, &local->amem_handle, &req); mem.CardOffset = 0x0000; mem.Page = 0; - CS_CHECK(MapMemPage, local->amem_handle, &mem); + CS_CHECK(pcmcia_map_mem_page, local->amem_handle, &mem); local->amem = (UCHAR *)(ioremap(req.Base,req.Size)); DEBUG(3,"ray_config sram=%p\n",local->sram); @@ -893,15 +894,15 @@ iounmap(local->rmem); iounmap(local->amem); /* Do bother checking to see if these succeed or not */ - i = CardServices(ReleaseWindow, link->win); + i = pcmcia_release_window(link->win); if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(link->win) ret = %x\n",i); - i = CardServices(ReleaseWindow, local->amem_handle); + i = pcmcia_release_window(local->amem_handle); if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->amem) ret = %x\n",i); - i = CardServices(ReleaseWindow, local->rmem_handle); + i = pcmcia_release_window(local->rmem_handle); if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->rmem) ret = %x\n",i); - i = CardServices(ReleaseConfiguration, link->handle); + i = pcmcia_release_configuration(link->handle); if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseConfiguration ret = %x\n",i); - i = CardServices(ReleaseIRQ, link->handle, &link->irq); + i = pcmcia_release_irq(link->handle, &link->irq); if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseIRQ ret = %x\n",i); link->state &= ~DEV_CONFIG; @@ -950,7 +951,7 @@ dev->tbusy = 1; dev->start = 0; } - CardServices(ReleaseConfiguration, link->handle); + pcmcia_release_configuration(link->handle); } break; case CS_EVENT_PM_RESUME: @@ -958,7 +959,7 @@ /* Fall through... */ case CS_EVENT_CARD_RESET: if (link->state & DEV_CONFIG) { - CardServices(RequestConfiguration, link->handle, &link->conf); + pcmcia_request_configuration(link->handle, &link->conf); if (link->open) { ray_reset(dev); dev->tbusy = 0; @@ -2671,14 +2672,8 @@ static int __init init_ray_cs(void) { int rc; - servinfo_t serv; DEBUG(1, "%s\n", rcsid); - CardServices(GetCardServicesInfo, &serv); - if (serv.Revision != CS_RELEASE_CODE) { - printk(KERN_NOTICE "ray: Card Services release does not match!\n"); - return -1; - } rc = register_pcmcia_driver(&dev_info, &ray_attach, &ray_detach); DEBUG(1, "raylink init_module register_pcmcia_driver returns 0x%x\n",rc); #ifdef CONFIG_PROC_FS diff -u --recursive --new-file v2.3.31/linux/drivers/net/rclanmtl.c linux/drivers/net/rclanmtl.c --- v2.3.31/linux/drivers/net/rclanmtl.c Mon Apr 12 16:18:27 1999 +++ linux/drivers/net/rclanmtl.c Wed Dec 8 15:17:55 1999 @@ -741,7 +741,7 @@ } break; default: - printk("Unknown private I2O msg received: 0x%x\n", + printk("Unknown private I2O msg received: 0x%lx\n", p32[5]); break; } @@ -1216,7 +1216,7 @@ if (!timeout--) { kprintf("Timeout waiting for promiscuous mode from adapter\n"); - kprintf("0x%08.8ulx\n", p32[0]); + kprintf("0x%8.8lx\n", p32[0]); return RC_RTN_NO_LINK_SPEED; } } @@ -1337,7 +1337,7 @@ if (!timeout--) { kprintf("Timeout waiting for promiscuous mode from adapter\n"); - kprintf("0x%08.8ulx\n", p32[0]); + kprintf("0x%8.8lx\n", p32[0]); return RC_RTN_NO_LINK_SPEED; } } @@ -1421,7 +1421,7 @@ if (!timeout--) { kprintf("Timeout waiting for link speed from IOP\n"); - kprintf("0x%08.8ulx\n", p32[0]); + kprintf("0x%8.8lx\n", p32[0]); return RC_RTN_NO_LINK_SPEED; } } diff -u --recursive --new-file v2.3.31/linux/drivers/net/sb1000.c linux/drivers/net/sb1000.c --- v2.3.31/linux/drivers/net/sb1000.c Mon Oct 4 15:49:29 1999 +++ linux/drivers/net/sb1000.c Wed Dec 8 15:17:55 1999 @@ -56,8 +56,10 @@ #include #include #include +#include /* for SIOGCM/SIOSCM stuff */ + #include #ifdef SB1000_DEBUG @@ -93,8 +95,6 @@ static struct enet_statistics *sb1000_stats(struct net_device *dev); static int sb1000_close(struct net_device *dev); -/* Plug-n-Play routine */ -static inline unsigned char read_resource_data(void); /* SB1000 hardware routines to be used during open/configuration phases */ static inline void nicedelay(unsigned long usecs); @@ -138,160 +138,132 @@ static inline int sb1000_rx(struct net_device *dev); static inline void sb1000_error_dpc(struct net_device *dev); - -/* Plug-n-Play constants */ -static const int READ_DATA_PORT = 0x203; /* This port number may change!!! */ -static const int ADDRESS_PORT = 0x279; -static const int WRITE_DATA_PORT = 0xa79; - -/* Plug-n-Play read resource mechanism */ -static inline unsigned char -read_resource_data(void) { - /* poll */ - outb(0x05, ADDRESS_PORT); /* Select PnP status register. */ - while (!(inb(READ_DATA_PORT) & 0x1)) ; - /* read resource data */ - outb(0x04, ADDRESS_PORT); /* Select PnP resource data register. */ - return inb(READ_DATA_PORT); -} - /* probe for SB1000 using Plug-n-Play mechanism */ int sb1000_probe(struct net_device *dev) { unsigned short ioaddr[2], irq; - short i, csn; + struct pci_dev *idev=NULL; unsigned int serial_number; + + while(1) + { + /* + * Find the card + */ + + idev=isapnp_find_dev(NULL, ISAPNP_VENDOR('G','I','C'), + ISAPNP_FUNCTION(0x1000), idev); + + /* + * No card + */ + + if(idev==NULL) + return -ENODEV; + + /* + * Bring it online + */ + + idev->prepare(idev); + idev->activate(idev); + + /* + * Ports free ? + */ + + if(!idev->resource[0].start || check_region(idev->resource[0].start, 16)) + continue; + if(!idev->resource[1].start || check_region(idev->resource[1].start, 16)) + continue; + + serial_number = idev->bus->serial; + + ioaddr[0]=idev->resource[0].start; + ioaddr[1]=idev->resource[1].start; + + irq = idev->irq; + + /* check I/O base and IRQ */ + if (dev->base_addr != 0 && dev->base_addr != ioaddr[0]) + continue; + if (dev->rmem_end != 0 && dev->rmem_end != ioaddr[1]) + continue; + if (dev->irq != 0 && dev->irq != irq) + continue; + + /* + * Ok set it up. + */ + + + dev->base_addr = ioaddr[0]; + /* rmem_end holds the second I/O address - fv */ + dev->rmem_end = ioaddr[1]; + dev->irq = irq; + + if (sb1000_debug > 0) + printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), " + "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr, + dev->rmem_end, serial_number, dev->irq); + + dev = init_etherdev(dev, 0); + + /* Make up a SB1000-specific-data structure. */ + dev->priv = kmalloc(sizeof(struct sb1000_private), GFP_KERNEL); + if (dev->priv == NULL) + return -ENOMEM; + memset(dev->priv, 0, sizeof(struct sb1000_private)); + + if (sb1000_debug > 0) + printk(KERN_NOTICE "%s", version); + + /* The SB1000-specific entries in the device structure. */ + dev->open = sb1000_open; + dev->do_ioctl = sb1000_dev_ioctl; + dev->hard_start_xmit = sb1000_start_xmit; + dev->stop = sb1000_close; + dev->get_stats = sb1000_stats; + + /* Fill in the generic fields of the device structure. */ + dev->change_mtu = NULL; + dev->hard_header = NULL; + dev->rebuild_header = NULL; + dev->set_mac_address = NULL; + dev->header_cache_update= NULL; + + dev->type = ARPHRD_ETHER; + dev->hard_header_len = 0; + dev->mtu = 1500; + dev->addr_len = ETH_ALEN; + /* hardware address is 0:0:serial_number */ + dev->dev_addr[0] = 0; + dev->dev_addr[1] = 0; + dev->dev_addr[2] = serial_number >> 24 & 0xff; + dev->dev_addr[3] = serial_number >> 16 & 0xff; + dev->dev_addr[4] = serial_number >> 8 & 0xff; + dev->dev_addr[5] = serial_number >> 0 & 0xff; + dev->tx_queue_len = 0; + + /* New-style flags. */ + dev->flags = IFF_POINTOPOINT|IFF_NOARP; - const unsigned char initiation_key[] = { 0x00, 0x00, 0x6a, 0xb5, 0xda, - 0xed, 0xf6, 0xfb, 0x7d, 0xbe, 0xdf, 0x6f, 0x37, 0x1b, 0x0d, - 0x86, 0xc3, 0x61, 0xb0, 0x58, 0x2c, 0x16, 0x8b, 0x45, 0xa2, - 0xd1, 0xe8, 0x74, 0x3a, 0x9d, 0xce, 0xe7, 0x73, 0x39 }; - const unsigned char sb1000_vendor_ID[] = { - 0x1d, 0x23, 0x10, 0x00 }; /* "GIC1000" */ - - /* Reset the ISA PnP mechanism */ - outb(0x02, ADDRESS_PORT); /* Select PnP config control register. */ - outb(0x02, WRITE_DATA_PORT); /* Return to WaitForKey state. */ - - /* send initiation key */ - for (i = 0; i < sizeof(initiation_key) / sizeof(initiation_key[0]); i++) { - outb(initiation_key[i], ADDRESS_PORT); - } - - /* set card CSN into configuration mode */ - for (csn = 1; csn <= 255; csn++) { - outb(0x03, ADDRESS_PORT); /* Select PnP wake[CSN] register. */ - outb(csn, WRITE_DATA_PORT); /* Wake[CSN] */ - /* check card ID */ - for (i = 0; i < 4; i++) { - if (read_resource_data() != sb1000_vendor_ID[i]) break; - } - if (i == 4) break; - } - - /* SB1000 not found */ - if (csn > 255) { - /* return to WaitForKey state */ - outb(0x02, ADDRESS_PORT); /* Select PnP config control register. */ - outb(0x02, WRITE_DATA_PORT);/* Return to WaitForKey state. */ - return -ENODEV; - } + /* Lock resources */ - /* found: get serial number and skip checksum */ - serial_number = 0; - for (i = 0; i < 4; i++) { - serial_number |= read_resource_data() << (8 * i); - } - read_resource_data(); - - /* get I/O port base address */ - outb(0x60, ADDRESS_PORT); /* Select PnP I/O port base address 0. */ - ioaddr[0] = inb(READ_DATA_PORT) << 8; - outb(0x61, ADDRESS_PORT); - ioaddr[0] |= inb(READ_DATA_PORT); - outb(0x62, ADDRESS_PORT); /* Select PnP I/O port base address 1. */ - ioaddr[1] = inb(READ_DATA_PORT) << 8; - outb(0x63, ADDRESS_PORT); - ioaddr[1] |= inb(READ_DATA_PORT); - - /* get IRQ */ - outb(0x70, ADDRESS_PORT); /* Select PnP IRQ level select 0. */ - irq = inb(READ_DATA_PORT); - - /* return to WaitForKey state */ - outb(0x02, ADDRESS_PORT); /* Select PnP config control register. */ - outb(0x02, WRITE_DATA_PORT); /* Return to WaitForKey state. */ + request_region(ioaddr[0], 16, dev->name); + request_region(ioaddr[1], 16, dev->name); - /* check I/O base and IRQ */ - if (dev->base_addr != 0 && dev->base_addr != ioaddr[0]) { - return -ENODEV; - } - if (dev->rmem_end != 0 && dev->rmem_end != ioaddr[1]) { - return -ENODEV; - } - if (dev->irq != 0 && dev->irq != irq) { - return -ENODEV; + return 0; } - - dev->base_addr = ioaddr[0]; - /* rmem_end holds the second I/O address - fv */ - dev->rmem_end = ioaddr[1]; - dev->irq = irq; - - if (sb1000_debug > 0) - printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), csn %d, " - "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr, - dev->rmem_end, csn, serial_number, dev->irq); - - dev = init_etherdev(dev, 0); - - /* Make up a SB1000-specific-data structure. */ - dev->priv = kmalloc(sizeof(struct sb1000_private), GFP_KERNEL); - if (dev->priv == NULL) - return -ENOMEM; - memset(dev->priv, 0, sizeof(struct sb1000_private)); - - if (sb1000_debug > 0) - printk(KERN_NOTICE "%s", version); - - /* The SB1000-specific entries in the device structure. */ - dev->open = sb1000_open; - dev->do_ioctl = sb1000_dev_ioctl; - dev->hard_start_xmit = sb1000_start_xmit; - dev->stop = sb1000_close; - dev->get_stats = sb1000_stats; - - /* Fill in the generic fields of the device structure. */ - dev->change_mtu = NULL; - dev->hard_header = NULL; - dev->rebuild_header = NULL; - dev->set_mac_address = NULL; - dev->header_cache_update= NULL; - - dev->type = ARPHRD_ETHER; - dev->hard_header_len = 0; - dev->mtu = 1500; - dev->addr_len = ETH_ALEN; - /* hardware address is 0:0:serial_number */ - dev->dev_addr[0] = 0; - dev->dev_addr[1] = 0; - dev->dev_addr[2] = serial_number >> 24 & 0xff; - dev->dev_addr[3] = serial_number >> 16 & 0xff; - dev->dev_addr[4] = serial_number >> 8 & 0xff; - dev->dev_addr[5] = serial_number >> 0 & 0xff; - dev->tx_queue_len = 0; - - /* New-style flags. */ - dev->flags = IFF_POINTOPOINT|IFF_NOARP; - return 0; } /* * SB1000 hardware routines to be used during open/configuration phases */ + const int TimeOutJiffies = (int)(8.75 * HZ); static inline void nicedelay(unsigned long usecs) @@ -1279,6 +1251,8 @@ void cleanup_module(void) { unregister_netdev(&dev_sb1000); + release_region(&dev_sb1000.base_addr, 16); + release_region(&dev_sb1000.rmem_end, 16); kfree_s(dev_sb1000.priv, sizeof(struct sb1000_private)); dev_sb1000.priv = NULL; } diff -u --recursive --new-file v2.3.31/linux/drivers/net/tlan.c linux/drivers/net/tlan.c --- v2.3.31/linux/drivers/net/tlan.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/net/tlan.c Sun Dec 12 22:55:54 1999 @@ -31,6 +31,8 @@ * new PCI BIOS interface. * Alan Cox : Fixed the out of memory * handling. + * + * Torben Mathiasen New Maintainer! * ********************************************************************/ @@ -43,7 +45,7 @@ #include #include #include - +#include typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 ); @@ -234,13 +236,15 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) { TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv; + unsigned long flags; - cli(); + spin_lock_irqsave(&priv->lock, flags); if ( priv->timer.function != NULL ) { + spin_unlock_irqrestore(&priv->lock, flags); return; } priv->timer.function = &TLan_Timer; - sti(); + spin_unlock_irqrestore(&priv->lock, flags); priv->timer.data = (unsigned long) dev; priv->timer.expires = jiffies + ticks; @@ -336,6 +340,8 @@ priv->speed = speed; priv->sa_int = sa_int; priv->debug = debug; + + spin_lock_init(&priv->lock); ether_setup( dev ); @@ -464,9 +470,6 @@ priv = (TLanPrivateInfo *) dev->priv; - dev->name = priv->devName; - strcpy( priv->devName, " " ); - dev = init_etherdev( dev, sizeof(TLanPrivateInfo) ); dev->base_addr = io_base; @@ -485,7 +488,7 @@ } priv->sa_int = dev->mem_start & 0x02; priv->debug = dev->mem_end; - + spin_lock_init(&priv->lock); printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n", TLanVersionMajor, @@ -770,6 +773,7 @@ TLanList *tail_list; u8 *tail_buffer; int pad; + unsigned long flags; if ( ! priv->phyOnline ) { TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s PHY is not ready\n", dev->name ); @@ -810,7 +814,7 @@ tail_list->buffer[1].address = 0; } - cli(); + spin_lock_irqsave(&priv->lock, flags); tail_list->cStat = TLAN_CSTAT_READY; if ( ! priv->txInProgress ) { priv->txInProgress = 1; @@ -826,7 +830,7 @@ ( priv->txList + ( priv->txTail - 1 ) )->forward = virt_to_bus( tail_list ); } } - sti(); + spin_unlock_irqrestore(&priv->lock, flags); CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); @@ -870,10 +874,12 @@ u32 host_cmd; u16 host_int; int type; + TLanPrivateInfo *priv; dev = (struct net_device *) dev_id; + priv = (TLanPrivateInfo *) dev->priv; - cli(); + spin_lock(&priv->lock); if ( dev->interrupt ) { printk( "TLAN: Re-entering interrupt handler for %s: %ld.\n" , dev->name, dev->interrupt ); } @@ -892,7 +898,7 @@ } dev->interrupt--; - sti(); + spin_unlock(&priv->lock); } /* TLan_HandleInterrupts */ @@ -1558,6 +1564,7 @@ struct net_device *dev = (struct net_device *) data; TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv; u32 elapsed; + unsigned long flags; priv->timer.function = NULL; @@ -1581,7 +1588,7 @@ TLan_FinishReset( dev ); break; case TLAN_TIMER_ACTIVITY: - cli(); + spin_lock_irqsave(&priv->lock, flags); if ( priv->timer.function == NULL ) { elapsed = jiffies - priv->timerSetAt; if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { @@ -1589,11 +1596,12 @@ } else { priv->timer.function = &TLan_Timer; priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY; - sti(); + spin_unlock_irqrestore(&priv->lock, flags); add_timer( &priv->timer ); + break; } } - sti(); + spin_unlock_irqrestore(&priv->lock, flags); break; default: break; @@ -2435,16 +2443,19 @@ { u8 nack; u16 sio, tmp; - u32 i; + u32 i; int err; int minten; + TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv; + int irq; + unsigned long flags; err = FALSE; outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; if ( dev->interrupt == 0 ) - cli(); + spin_lock_irqsave(&priv->lock, flags); dev->interrupt++; TLan_MiiSync(dev->base_addr); @@ -2494,7 +2505,7 @@ dev->interrupt--; if ( dev->interrupt == 0 ) - sti(); + spin_unlock_irqrestore(&priv->lock, flags); return err; @@ -2606,12 +2617,14 @@ { u16 sio; int minten; + unsigned long flags; + TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv; outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; if ( dev->interrupt == 0 ) - cli(); + spin_lock_irqsave(&priv->lock, flags); dev->interrupt++; TLan_MiiSync( dev->base_addr ); @@ -2636,7 +2649,7 @@ dev->interrupt--; if ( dev->interrupt == 0 ) - sti(); + spin_unlock_irqrestore(&priv->lock, flags); } /* TLan_MiiWriteReg */ @@ -2834,29 +2847,41 @@ int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) { int err; + TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv; + unsigned long flags; + int ret=0; if ( dev->interrupt == 0 ) - cli(); + spin_lock_irqsave(&priv->lock, flags); dev->interrupt++; TLan_EeSendStart( dev->base_addr ); err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); if (err) - return 1; + { + ret=1; + goto fail; + } err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); if (err) - return 2; + { + ret=2; + goto fail; + } TLan_EeSendStart( dev->base_addr ); err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); if (err) - return 3; + { + ret=3; + goto fail; + } TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); - +fail: dev->interrupt--; if ( dev->interrupt == 0 ) - sti(); + spin_unlock_irqrestore(&priv->lock, flags); - return 0; + return ret; } /* TLan_EeReadByte */ diff -u --recursive --new-file v2.3.31/linux/drivers/net/tlan.h linux/drivers/net/tlan.h --- v2.3.31/linux/drivers/net/tlan.h Wed Aug 18 11:36:45 1999 +++ linux/drivers/net/tlan.h Sat Dec 11 07:39:10 1999 @@ -14,6 +14,10 @@ * ** This file is best viewed/edited with tabstop=4, colums>=132 * + * + * Dec 10, 1999 Torben Mathiasen + * New Maintainer + * ********************************************************************/ @@ -187,6 +191,7 @@ u8 tlanRev; u8 tlanFullDuplex; char devName[8]; + spinlock_t lock; } TLanPrivateInfo; diff -u --recursive --new-file v2.3.31/linux/drivers/net/tokenring/ibmtr.c linux/drivers/net/tokenring/ibmtr.c --- v2.3.31/linux/drivers/net/tokenring/ibmtr.c Mon Oct 11 15:38:15 1999 +++ linux/drivers/net/tokenring/ibmtr.c Sun Dec 12 22:55:54 1999 @@ -238,7 +238,7 @@ { short i, j; for (i=0, j=0; i<24; i++, j+=stride) - printk("%1x", ((int)readb(pcid + j)) & 0x0f); + printk("%1x", ((int)isa_readb(pcid + j)) & 0x0f); printk("\n"); } @@ -267,14 +267,8 @@ */ if (ibmtr_probe1(dev, base_addr)) - { -#ifndef MODULE -#ifndef PCMCIA - tr_freedev(dev); -#endif -#endif return -ENODEV; - } else + else return 0; } else if (base_addr != 0) /* Don't probe at all. */ @@ -285,13 +279,7 @@ int ioaddr = ibmtr_portlist[i]; if (check_region(ioaddr, IBMTR_IO_EXTENT)) continue; - if (ibmtr_probe1(dev, ioaddr)) { -#ifndef MODULE -#ifndef PCMCIA - tr_freedev(dev); -#endif -#endif - } else + if (!ibmtr_probe1(dev, ioaddr)) return 0; } @@ -351,7 +339,7 @@ * Suboptimize knowing first byte different */ - ctemp = readb(cd_chanid) & 0x0f; + ctemp = isa_readb(cd_chanid) & 0x0f; if (ctemp != *tchanid) { /* NOT ISA card, try MCA */ tchanid=mcchannelid; cardpresent=TR_MCA; @@ -366,7 +354,7 @@ */ for (i=2,j=1; i<=46; i=i+2,j++) { - if ((readb(cd_chanid+i) & 0x0f) != tchanid[j]) { + if ((isa_readb(cd_chanid+i) & 0x0f) != tchanid[j]) { cardpresent=NOTOK; /* match failed, not TR card */ break; } @@ -378,7 +366,7 @@ * as it has different IRQ settings */ - if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio)==0x0e)) + if (cardpresent == TR_ISA && (isa_readb(AIPFID + t_mmio)==0x0e)) cardpresent=TR_ISAPNP; if (cardpresent == NOTOK) { /* "channel_id" did not match, report */ @@ -461,14 +449,14 @@ if (intr==3) irq=11; timeout = jiffies + TR_SPIN_INTERVAL; - while(!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)) + while(!isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)) if (time_after(jiffies, timeout)) { DPRINTK("Hardware timeout during initialization.\n"); kfree_s(ti, sizeof(struct tok_info)); return -ENODEV; } - ti->sram=((__u32)readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12); + ti->sram=((__u32)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12); ti->global_int_enable=PIOaddr+ADAPTINTREL; ti->adapter_int_enable=PIOaddr+ADAPTINTREL; break; @@ -492,7 +480,7 @@ for (i=0; i<0x18; i=i+2) { /* technical reference states to do this */ - temp = readb(ti->mmio + AIP + i) & 0x0f; + temp = isa_readb(ti->mmio + AIP + i) & 0x0f; #if !TR_NEWFORMAT printk("%1X",ti->hw_address[j]=temp); #else @@ -507,13 +495,13 @@ #endif /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,...*/ - ti->adapter_type = readb(ti->mmio + AIPADAPTYPE); + ti->adapter_type = isa_readb(ti->mmio + AIPADAPTYPE); /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */ - ti->data_rate = readb(ti->mmio + AIPDATARATE); + ti->data_rate = isa_readb(ti->mmio + AIPDATARATE); /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */ - ti->token_release = readb(ti->mmio + AIPEARLYTOKEN); + ti->token_release = isa_readb(ti->mmio + AIPEARLYTOKEN); /* How much shared RAM is on adapter ? */ #ifdef PCMCIA @@ -524,10 +512,10 @@ #endif /* We need to set or do a bunch of work here based on previous results.. */ /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */ - ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE); + ti->shared_ram_paging = isa_readb(ti->mmio + AIPSHRAMPAGE); /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */ - switch (readb(ti->mmio + AIP4MBDHB)) { + switch (isa_readb(ti->mmio + AIP4MBDHB)) { case 0xe : ti->dhb_size4mb = 4096; break; @@ -540,7 +528,7 @@ } /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */ - switch (readb(ti->mmio + AIP16MBDHB)) { + switch (isa_readb(ti->mmio + AIP16MBDHB)) { case 0xe : ti->dhb_size16mb = 4096; break; @@ -576,7 +564,7 @@ /* * determine how much of total RAM is mapped into PC space */ - ti->mapped_ram_size=1<<((((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4); + ti->mapped_ram_size=1<<((((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4); ti->page_mask=0; if (ti->shared_ram_paging == 0xf) { /* No paging in adapter */ ti->mapped_ram_size = ti->avail_shared_ram; @@ -635,7 +623,7 @@ static __u32 ram_bndry_mask[]={0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000}; __u32 new_base, rrr_32, chk_base, rbm; - rrr_32 = ((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003; + rrr_32 = ((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003; rbm = ram_bndry_mask[rrr_32]; new_base = (ibmtr_mem_base + (~rbm)) & rbm; /* up to boundary */ chk_base = new_base + (ti->mapped_ram_size<<9); @@ -765,11 +753,11 @@ 'B' - 64KB less 512 bytes at top (WARNING ... must zero top bytes in INIT */ - avail_sram_code=0xf-readb(adapt_info->mmio + AIPAVAILSHRAM); + avail_sram_code=0xf-isa_readb(adapt_info->mmio + AIPAVAILSHRAM); if (avail_sram_code) return size_code[avail_sram_code]; else /* for code 'F', must compute size from RRR(3,2) bits */ - return 1<<((readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4); + return 1<<((isa_readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4); } static int __init trdev_init(struct net_device *dev) @@ -816,20 +804,20 @@ } SET_PAGE(ti->srb); for (i=0; isrb+i); + isa_writeb(0, ti->srb+i); - writeb(DIR_SET_FUNC_ADDR, + isa_writeb(DIR_SET_FUNC_ADDR, ti->srb + offsetof(struct srb_set_funct_addr, command)); DPRINTK("Setting functional address: "); for (i=0; i<4; i++) { - writeb(address[i], + isa_writeb(address[i], ti->srb + offsetof(struct srb_set_funct_addr, funct_address)+i); printk("%02X ", address[i]); } - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); printk("\n"); } @@ -838,7 +826,7 @@ struct tok_info *ti=(struct tok_info *)dev->priv; /* init the spinlock */ - ti->lock = (spinlock_t) SPIN_LOCK_UNLOCKED; + spin_lock_init(&ti->lock); if (ti->open_status==CLOSED) tok_init_card(dev); @@ -862,17 +850,17 @@ struct tok_info *ti=(struct tok_info *) dev->priv; - writeb(DIR_CLOSE_ADAPTER, + isa_writeb(DIR_CLOSE_ADAPTER, ti->srb + offsetof(struct srb_close_adapter, command)); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->open_status=CLOSED; sleep_on(&ti->wait_for_tok_int); - if (readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))) + if (isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))) DPRINTK("close adapter failed: %02X\n", - (int)readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))); + (int)isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))); dev->start = 0; #ifdef PCMCIA @@ -899,7 +887,7 @@ /* Disable interrupts till processing is finished */ dev->interrupt=1; - writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + isa_writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); /* Reset interrupt for ISA boards */ if (ti->adapter_int_enable) @@ -916,7 +904,7 @@ the extra levels of logic and call depth for the original solution. */ - status=readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); + status=isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); #ifdef PCMCIA /* Check if the PCMCIA card was pulled. */ if (status == 0xFF) @@ -928,7 +916,7 @@ } /* Check ISRP EVEN too. */ - if ( readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF) + if ( isa_readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF) { DPRINTK("PCMCIA card removed.\n"); spin_unlock(&(ti->lock)); @@ -943,26 +931,26 @@ int i; __u32 check_reason; - check_reason=ti->mmio + ntohs(readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN)); + check_reason=ti->mmio + ntohs(isa_readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN)); DPRINTK("Adapter check interrupt\n"); DPRINTK("8 reason bytes follow: "); for(i=0; i<8; i++, check_reason++) - printk("%02X ", (int)readb(check_reason)); + printk("%02X ", (int)isa_readb(check_reason)); printk("\n"); - writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); dev->interrupt=0; - } else if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) + } else if (isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) & (TCR_INT | ERR_INT | ACCESS_INT)) { DPRINTK("adapter error: ISRP_EVEN : %02x\n", - (int)readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)); - writeb(~(TCR_INT | ERR_INT | ACCESS_INT), + (int)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)); + isa_writeb(~(TCR_INT | ERR_INT | ACCESS_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); dev->interrupt=0; } else if (status @@ -971,12 +959,12 @@ if (status & SRB_RESP_INT) { /* SRB response */ - switch(readb(ti->srb)) { /* SRB command check */ + switch(isa_readb(ti->srb)) { /* SRB command check */ case XMIT_DIR_FRAME: { unsigned char xmit_ret_code; - xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code)); + xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code)); if (xmit_ret_code != 0xff) { DPRINTK("error on xmit_dir_frame request: %02X\n", xmit_ret_code); @@ -993,7 +981,7 @@ case XMIT_UI_FRAME: { unsigned char xmit_ret_code; - xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code)); + xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code)); if (xmit_ret_code != 0xff) { DPRINTK("error on xmit_ui_frame request: %02X\n", xmit_ret_code); @@ -1011,14 +999,14 @@ unsigned char open_ret_code; __u16 open_error_code; - ti->srb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr))); - ti->ssb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr))); - ti->arb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr))); - ti->asb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr))); + ti->srb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr))); + ti->ssb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr))); + ti->arb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr))); + ti->asb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr))); ti->current_skb=NULL; - open_ret_code = readb(ti->init_srb +offsetof(struct srb_open_response, ret_code)); - open_error_code = ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, error_code))); + open_ret_code = isa_readb(ti->init_srb +offsetof(struct srb_open_response, ret_code)); + open_error_code = ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, error_code))); if (open_ret_code==7) { @@ -1049,9 +1037,9 @@ #else DPRINTK("Adapter initialized and opened.\n"); #endif - writeb(~(SRB_RESP_INT), + isa_writeb(~(SRB_RESP_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(~(CMD_IN_SRB), + isa_writeb(~(CMD_IN_SRB), ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); open_sap(EXTENDED_SAP,dev); @@ -1073,13 +1061,13 @@ break; case DLC_OPEN_SAP: - if (readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) { + if (isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) { DPRINTK("open_sap failed: ret_code = %02X,retrying\n", - (int)readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))); ibmtr_reset_timer(&(ti->tr_timer), dev); } else { ti->exsap_station_id= - readw(ti->srb+offsetof(struct dlc_open_sap, station_id)); + isa_readw(ti->srb+offsetof(struct dlc_open_sap, station_id)); ti->open_status=SUCCESS; /* TR adapter is now available */ wake_up(&ti->wait_for_reset); } @@ -1090,16 +1078,16 @@ case DIR_SET_GRP_ADDR: case DIR_SET_FUNC_ADDR: case DLC_CLOSE_SAP: - if (readb(ti->srb+offsetof(struct srb_interrupt, ret_code))) + if (isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code))) DPRINTK("error on %02X: %02X\n", - (int)readb(ti->srb+offsetof(struct srb_interrupt, command)), - (int)readb(ti->srb+offsetof(struct srb_interrupt, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, command)), + (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code))); break; case DIR_READ_LOG: - if (readb(ti->srb+offsetof(struct srb_read_log, ret_code))) + if (isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code))) DPRINTK("error on dir_read_log: %02X\n", - (int)readb(ti->srb+offsetof(struct srb_read_log, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code))); else if (IBMTR_DEBUG_MESSAGES) { DPRINTK( @@ -1107,24 +1095,24 @@ "A/C errors %02X, Abort delimiters %02X, Lost frames %02X\n" "Receive congestion count %02X, Frame copied errors %02X\n" "Frequency errors %02X, Token errors %02X\n", - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, line_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, internal_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, burst_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)), + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, abort_delimiters)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, lost_frames)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, recv_congest_count)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, frame_copied_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, frequency_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, token_errors))); } dev->tbusy=0; @@ -1132,19 +1120,19 @@ default: DPRINTK("Unknown command %02X encountered\n", - (int)readb(ti->srb)); + (int)isa_readb(ti->srb)); } /* SRB command check */ - writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); - writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); + isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); skip_reset: } /* SRB response */ if (status & ASB_FREE_INT) { /* ASB response */ - switch(readb(ti->asb)) { /* ASB command check */ + switch(isa_readb(ti->asb)) { /* ASB command check */ case REC_DATA: case XMIT_UI_FRAME: @@ -1153,25 +1141,25 @@ default: DPRINTK("unknown command in asb %02X\n", - (int)readb(ti->asb)); + (int)isa_readb(ti->asb)); } /* ASB command check */ - if (readb(ti->asb+2)!=0xff) /* checks ret_code */ + if (isa_readb(ti->asb+2)!=0xff) /* checks ret_code */ DPRINTK("ASB error %02X in cmd %02X\n", - (int)readb(ti->asb+2),(int)readb(ti->asb)); - writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + (int)isa_readb(ti->asb+2),(int)isa_readb(ti->asb)); + isa_writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); } /* ASB response */ if (status & ARB_CMD_INT) { /* ARB response */ - switch (readb(ti->arb)) { /* ARB command check */ + switch (isa_readb(ti->arb)) { /* ARB command check */ case DLC_STATUS: DPRINTK("DLC_STATUS new status: %02X on station %02X\n", - ntohs(readw(ti->arb + offsetof(struct arb_dlc_status, status))), - ntohs(readw(ti->arb + ntohs(isa_readw(ti->arb + offsetof(struct arb_dlc_status, status))), + ntohs(isa_readw(ti->arb +offsetof(struct arb_dlc_status, station_id)))); break; @@ -1182,7 +1170,7 @@ case RING_STAT_CHANGE: { unsigned short ring_status; - ring_status=ntohs(readw(ti->arb + ring_status=ntohs(isa_readw(ti->arb +offsetof(struct arb_ring_stat_change, ring_status))); if (ring_status & (SIGNAL_LOSS | LOBE_FAULT)) { @@ -1209,46 +1197,46 @@ default: DPRINTK("Unknown command %02X in arb\n", - (int)readb(ti->arb)); + (int)isa_readb(ti->arb)); break; } /* ARB command check */ - writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } /* ARB response */ if (status & SSB_RESP_INT) { /* SSB response */ unsigned char retcode; - switch (readb(ti->ssb)) { /* SSB command check */ + switch (isa_readb(ti->ssb)) { /* SSB command check */ case XMIT_DIR_FRAME: case XMIT_UI_FRAME: - retcode = readb(ti->ssb+2); + retcode = isa_readb(ti->ssb+2); if (retcode && (retcode != 0x22)) /* checks ret_code */ DPRINTK("xmit ret_code: %02X xmit error code: %02X\n", - (int)retcode, (int)readb(ti->ssb+6)); + (int)retcode, (int)isa_readb(ti->ssb+6)); else ti->tr_stats.tx_packets++; break; case XMIT_XID_CMD: - DPRINTK("xmit xid ret_code: %02X\n", (int)readb(ti->ssb+2)); + DPRINTK("xmit xid ret_code: %02X\n", (int)isa_readb(ti->ssb+2)); default: - DPRINTK("Unknown command %02X in ssb\n", (int)readb(ti->ssb)); + DPRINTK("Unknown command %02X in ssb\n", (int)isa_readb(ti->ssb)); } /* SSB command check */ - writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } /* SSB response */ } /* SRB, ARB, ASB or SSB response */ dev->interrupt=0; - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); break; case FIRST_INT: @@ -1278,12 +1266,12 @@ /* we assign the shared-ram address for ISA devices */ if(!ti->sram) { - writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); + isa_writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); ti->sram=((__u32)ti->sram_base << 12); } ti->init_srb=ti->sram - +ntohs((unsigned short)readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN)); - SET_PAGE(ntohs((unsigned short)readw(ti->mmio+ACA_OFFSET + WRBR_EVEN))); + +ntohs((unsigned short)isa_readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN)); + SET_PAGE(ntohs((unsigned short)isa_readw(ti->mmio+ACA_OFFSET + WRBR_EVEN))); dev->mem_start = ti->sram; dev->mem_end = ti->sram + (ti->mapped_ram_size<<9) - 1; @@ -1292,12 +1280,12 @@ { int i; DPRINTK("init_srb(%p):", ti->init_srb); - for (i=0;i<17;i++) printk("%02X ", (int)readb(ti->init_srb+i)); + for (i=0;i<17;i++) printk("%02X ", (int)isa_readb(ti->init_srb+i)); printk("\n"); } #endif - hw_encoded_addr = readw(ti->init_srb + hw_encoded_addr = isa_readw(ti->init_srb + offsetof(struct srb_init_response, encoded_address)); #if !TR_NEWFORMAT @@ -1307,7 +1295,7 @@ #endif encoded_addr=(ti->sram + ntohs(hw_encoded_addr)); - ti->ring_speed = readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4; + ti->ring_speed = isa_readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4; #if !TR_NEWFORMAT DPRINTK("encoded addr (%04X,%04X,%08X): ", hw_encoded_addr, ntohs(hw_encoded_addr), encoded_addr); @@ -1316,12 +1304,12 @@ ti->ring_speed, ti->sram); #endif - ti->auto_ringspeedsave=readb(ti->init_srb + ti->auto_ringspeedsave=isa_readb(ti->init_srb +offsetof(struct srb_init_response, init_status_2)) & 0x4 ? TRUE : FALSE; #if !TR_NEWFORMAT for(i=0;idev_addr[i]=readb(encoded_addr + i); + dev->dev_addr[i]=isa_readb(encoded_addr + i); printk("%02X%s", dev->dev_addr[i], (i==TR_ALEN-1) ? "" : ":" ); } printk("\n"); @@ -1346,10 +1334,10 @@ #ifdef ENABLE_PAGING if(ti->page_mask) - writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); + isa_writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); #endif - writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + isa_writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); #if !TR_NEWFORMAT DPRINTK("resetting card\n"); @@ -1364,7 +1352,7 @@ #endif ti->open_status=IN_PROGRESS; - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); return 0; } @@ -1375,18 +1363,18 @@ SET_PAGE(ti->srb); for (i=0; isrb+i); + isa_writeb(0, ti->srb+i); - writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command)); - writew(htons(MAX_I_FIELD), + isa_writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command)); + isa_writew(htons(MAX_I_FIELD), ti->srb + offsetof(struct dlc_open_sap, max_i_field)); - writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, + isa_writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb + offsetof(struct dlc_open_sap, sap_options)); - writeb(SAP_OPEN_STATION_CNT, + isa_writeb(SAP_OPEN_STATION_CNT, ti->srb + offsetof(struct dlc_open_sap, station_count)); - writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value)); + isa_writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value)); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } @@ -1403,42 +1391,42 @@ DPRINTK("now opening the board...\n"); #endif - writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); + isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); for (i=0; iinit_srb+i); + isa_writeb(0, ti->init_srb+i); - writeb(DIR_OPEN_ADAPTER, + isa_writeb(DIR_OPEN_ADAPTER, ti->init_srb + offsetof(struct dir_open_adapter, command)); - writew(htons(OPEN_PASS_BCON_MAC), + isa_writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + offsetof(struct dir_open_adapter, open_options)); if (ti->ring_speed == 16) { - writew(htons(ti->dhb_size16mb), + isa_writew(htons(ti->dhb_size16mb), ti->init_srb + offsetof(struct dir_open_adapter, dhb_length)); - writew(htons(ti->rbuf_cnt16), + isa_writew(htons(ti->rbuf_cnt16), ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf)); - writew(htons(ti->rbuf_len16), + isa_writew(htons(ti->rbuf_len16), ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len)); } else { - writew(htons(ti->dhb_size4mb), + isa_writew(htons(ti->dhb_size4mb), ti->init_srb + offsetof(struct dir_open_adapter, dhb_length)); - writew(htons(ti->rbuf_cnt4), + isa_writew(htons(ti->rbuf_cnt4), ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf)); - writew(htons(ti->rbuf_len4), + isa_writew(htons(ti->rbuf_len4), ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len)); } - writeb(NUM_DHB, /* always 2 */ + isa_writeb(NUM_DHB, /* always 2 */ ti->init_srb + offsetof(struct dir_open_adapter, num_dhb)); - writeb(DLC_MAX_SAP, + isa_writeb(DLC_MAX_SAP, ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sap)); - writeb(DLC_MAX_STA, + isa_writeb(DLC_MAX_STA, ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sta)); ti->srb=ti->init_srb; /* We use this one in the interrupt handler */ - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } @@ -1452,7 +1440,7 @@ int i; struct trllc *llc; - if (readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF) + if (isa_readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF) DPRINTK("ASB not free !!!\n"); /* in providing the transmit interrupts, @@ -1461,7 +1449,7 @@ to stuff with data. Here we compute the effective address where we will place data.*/ dhb=ti->sram - +ntohs(readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address))); + +ntohs(isa_readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address))); /* Figure out the size of the 802.5 header */ if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */ @@ -1472,28 +1460,28 @@ llc = (struct trllc *)(ti->current_skb->data + hdr_len); - xmit_command = readb(ti->srb + offsetof(struct srb_xmit, command)); + xmit_command = isa_readb(ti->srb + offsetof(struct srb_xmit, command)); - writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command)); - writew(readb(ti->srb + offsetof(struct srb_xmit, station_id)), + isa_writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command)); + isa_writew(isa_readb(ti->srb + offsetof(struct srb_xmit, station_id)), ti->asb + offsetof(struct asb_xmit_resp, station_id)); - writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value)); - writeb(readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)), + isa_writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value)); + isa_writeb(isa_readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)), ti->asb + offsetof(struct asb_xmit_resp, cmd_corr)); - writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code)); + isa_writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code)); if ((xmit_command==XMIT_XID_CMD) || (xmit_command==XMIT_TEST_CMD)) { - writew(htons(0x11), + isa_writew(htons(0x11), ti->asb + offsetof(struct asb_xmit_resp, frame_length)); - writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); - writeb(AC, dhb); - writeb(LLC_FRAME, dhb+1); + isa_writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); + isa_writeb(AC, dhb); + isa_writeb(LLC_FRAME, dhb+1); - for (i=0; immio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } @@ -1502,13 +1490,13 @@ * the token ring packet is copied from sk_buff to the adapter * buffer identified in the command data received with the interrupt. */ - writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); - writew(htons(ti->current_skb->len), + isa_writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); + isa_writew(htons(ti->current_skb->len), ti->asb + offsetof(struct asb_xmit_resp, frame_length)); - memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len); + isa_memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->tr_stats.tx_bytes+=ti->current_skb->len; dev->tbusy=0; dev_kfree_skb(ti->current_skb); @@ -1531,19 +1519,19 @@ struct iphdr *iph; rbuffer=(ti->sram - +ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2; + +ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2; - if(readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF) + if(isa_readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF) DPRINTK("ASB not free !!!\n"); - writeb(REC_DATA, + isa_writeb(REC_DATA, ti->asb + offsetof(struct asb_rec, command)); - writew(readw(ti->arb + offsetof(struct arb_rec_req, station_id)), + isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, station_id)), ti->asb + offsetof(struct asb_rec, station_id)); - writew(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)), + isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)), ti->asb + offsetof(struct asb_rec, rec_buf_addr)); - lan_hdr_len=readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len)); + lan_hdr_len=isa_readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len)); hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr); llc=(rbuffer + offsetof(struct rec_buf, data) + lan_hdr_len); @@ -1552,28 +1540,28 @@ DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n", (unsigned int)offsetof(struct rec_buf,data), (unsigned int)lan_hdr_len); DPRINTK("llc: %08X rec_buf_addr: %04X ti->sram: %p\n", llc, - ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))), + ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))), ti->sram); DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, " "ethertype: %04X\n", - (int)readb(llc + offsetof(struct trllc, dsap)), - (int)readb(llc + offsetof(struct trllc, ssap)), - (int)readb(llc + offsetof(struct trllc, llc)), - (int)readb(llc + offsetof(struct trllc, protid)), - (int)readb(llc + offsetof(struct trllc, protid)+1), - (int)readb(llc + offsetof(struct trllc, protid)+2), - (int)readw(llc + offsetof(struct trllc, ethertype))); + (int)isa_readb(llc + offsetof(struct trllc, dsap)), + (int)isa_readb(llc + offsetof(struct trllc, ssap)), + (int)isa_readb(llc + offsetof(struct trllc, llc)), + (int)isa_readb(llc + offsetof(struct trllc, protid)), + (int)isa_readb(llc + offsetof(struct trllc, protid)+1), + (int)isa_readb(llc + offsetof(struct trllc, protid)+2), + (int)isa_readw(llc + offsetof(struct trllc, ethertype))); #endif - if (readb(llc + offsetof(struct trllc, llc))!=UI_CMD) { - writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); + if (isa_readb(llc + offsetof(struct trllc, llc))!=UI_CMD) { + isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); ti->tr_stats.rx_dropped++; - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } - length = ntohs(readw(ti->arb+offsetof(struct arb_rec_req, frame_len))); - if ((readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) && - (readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) && + length = ntohs(isa_readw(ti->arb+offsetof(struct arb_rec_req, frame_len))); + if ((isa_readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) && + (isa_readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) && (length>=hdr_len)) { IPv4_p = 1; } @@ -1588,20 +1576,20 @@ DPRINTK("Probably non-IP frame received.\n"); DPRINTK("ssap: %02X dsap: %02X saddr: %02X:%02X:%02X:%02X:%02X:%02X " "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n", - (int)readb(llc + offsetof(struct trllc, ssap)), - (int)readb(llc + offsetof(struct trllc, dsap)), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+1), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+2), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+3), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+4), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+5), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+1), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+2), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+3), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+4), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+5)); + (int)isa_readb(llc + offsetof(struct trllc, ssap)), + (int)isa_readb(llc + offsetof(struct trllc, dsap)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+1), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+2), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+3), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+4), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+5), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+1), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+2), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+3), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+4), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+5)); } #endif @@ -1610,8 +1598,8 @@ if (!(skb=dev_alloc_skb(skb_size))) { DPRINTK("out of memory. frame dropped.\n"); ti->tr_stats.rx_dropped++; - writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } @@ -1619,12 +1607,12 @@ skb_reserve(skb, sizeof(struct trh_hdr)-lan_hdr_len+sizeof(struct trllc)); skb->dev=dev; data=skb->data; - rbuffer_len=ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len))); + rbuffer_len=ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len))); rbufdata = rbuffer + offsetof(struct rec_buf,data); if (IPv4_p) { /* Copy the headers without checksumming */ - memcpy_fromio(data, rbufdata, hdr_len); + isa_memcpy_fromio(data, rbufdata, hdr_len); /* Watch for padded packets and bogons */ iph=(struct iphdr*)(data + lan_hdr_len + sizeof(struct trllc)); @@ -1644,20 +1632,20 @@ length < rbuffer_len ? length : rbuffer_len, chksum); else - memcpy_fromio(data, rbufdata, rbuffer_len); - rbuffer = ntohs(readw(rbuffer)); + isa_memcpy_fromio(data, rbufdata, rbuffer_len); + rbuffer = ntohs(isa_readw(rbuffer)); if (!rbuffer) break; length -= rbuffer_len; data += rbuffer_len; rbuffer += ti->sram; - rbuffer_len = ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len))); + rbuffer_len = ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len))); rbufdata = rbuffer + offsetof(struct rec_buf, data); } - writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); + isa_writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->tr_stats.rx_bytes += skb->len; ti->tr_stats.rx_packets++; @@ -1696,10 +1684,10 @@ /* Save skb; we'll need it when the adapter asks for the data */ ti->current_skb=skb; - writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command)); - writew(ti->exsap_station_id, ti->srb + isa_writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command)); + isa_writew(ti->exsap_station_id, ti->srb +offsetof(struct srb_xmit, station_id)); - writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD)); + isa_writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD)); spin_unlock_irqrestore(&(ti->lock), flags); dev->trans_start=jiffies; @@ -1721,9 +1709,9 @@ ti=(struct tok_info *) dev->priv; ti->readlog_pending = 0; - writeb(DIR_READ_LOG, ti->srb); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(DIR_READ_LOG, ti->srb); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); dev->tbusy=1; /* really srb busy... */ } diff -u --recursive --new-file v2.3.31/linux/drivers/net/tokenring/ibmtr.h linux/drivers/net/tokenring/ibmtr.h --- v2.3.31/linux/drivers/net/tokenring/ibmtr.h Mon Oct 11 15:38:15 1999 +++ linux/drivers/net/tokenring/ibmtr.h Sun Dec 12 22:55:54 1999 @@ -162,7 +162,7 @@ #define ACA_RW 0x00 #ifdef ENABLE_PAGING -#define SET_PAGE(x) (writeb(((x>>8)&ti.page_mask), \ +#define SET_PAGE(x) (isa_writeb(((x>>8)&ti.page_mask), \ ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN)) #else #define SET_PAGE(x) diff -u --recursive --new-file v2.3.31/linux/drivers/parport/share.c linux/drivers/parport/share.c --- v2.3.31/linux/drivers/parport/share.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/parport/share.c Mon Dec 13 16:00:32 1999 @@ -107,6 +107,13 @@ for (port = portlist; port; port = port->next) drv->attach (port); + /* For compatibility with 2.2, check the (obsolete) parport_lowlevel + * alias in case some people haven't changed to post-install rules + * yet. parport_enumerate (itself deprecated) will printk a + * friendly reminder. */ + if (!portlist) + parport_enumerate (); + return 0; } diff -u --recursive --new-file v2.3.31/linux/drivers/pci/names.c linux/drivers/pci/names.c --- v2.3.31/linux/drivers/pci/names.c Wed Dec 8 14:11:26 1999 +++ linux/drivers/pci/names.c Thu Dec 9 13:29:05 1999 @@ -5,6 +5,7 @@ * David Mosberger-Tang, Martin Mares */ +#include #include #include #include diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/bulkmem.c linux/drivers/pcmcia/bulkmem.c --- v2.3.31/linux/drivers/pcmcia/bulkmem.c Thu Nov 11 20:11:42 1999 +++ linux/drivers/pcmcia/bulkmem.c Wed Dec 8 13:25:25 1999 @@ -294,9 +294,9 @@ { switch (func) { case MTDRequestWindow: - return CardServices(RequestWindow, a1, a2, NULL); + return pcmcia_request_window(a1, a2); case MTDReleaseWindow: - return CardServices(ReleaseWindow, a1, NULL, NULL); + return pcmcia_release_window(a1); case MTDModifyWindow: return mtd_modify_window(a1, a2); break; case MTDSetVpp: @@ -403,7 +403,7 @@ return CS_NO_MORE_ITEMS; } /* match_region */ -int get_first_region(client_handle_t handle, region_info_t *rgn) +int pcmcia_get_first_region(client_handle_t handle, region_info_t *rgn) { socket_info_t *s = SOCKET(handle); if (CHECK_HANDLE(handle)) @@ -422,7 +422,7 @@ return match_region(handle, s->c_region, rgn); } /* get_first_region */ -int get_next_region(client_handle_t handle, region_info_t *rgn) +int pcmcia_get_next_region(client_handle_t handle, region_info_t *rgn) { if (CHECK_HANDLE(handle)) return CS_BAD_HANDLE; @@ -435,7 +435,7 @@ ======================================================================*/ -int register_mtd(client_handle_t handle, mtd_reg_t *reg) +int pcmcia_register_mtd(client_handle_t handle, mtd_reg_t *reg) { memory_handle_t list; socket_info_t *s; @@ -470,7 +470,7 @@ ======================================================================*/ -int register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header) +int pcmcia_register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header) { eraseq_t *queue; @@ -485,7 +485,7 @@ return CS_SUCCESS; } /* register_erase_queue */ -int deregister_erase_queue(eraseq_handle_t eraseq) +int pcmcia_deregister_erase_queue(eraseq_handle_t eraseq) { int i; if (CHECK_ERASEQ(eraseq)) @@ -499,7 +499,7 @@ return CS_SUCCESS; } /* deregister_erase_queue */ -int check_erase_queue(eraseq_handle_t eraseq) +int pcmcia_check_erase_queue(eraseq_handle_t eraseq) { int i; if (CHECK_ERASEQ(eraseq)) @@ -517,7 +517,7 @@ ======================================================================*/ -int open_memory(client_handle_t *handle, open_mem_t *open) +int pcmcia_open_memory(client_handle_t *handle, open_mem_t *open) { socket_info_t *s; memory_handle_t region; @@ -550,7 +550,7 @@ ======================================================================*/ -int close_memory(memory_handle_t handle) +int pcmcia_close_memory(memory_handle_t handle) { DEBUG(1, "cs: close_memory(0x%p)\n", handle); if (CHECK_REGION(handle)) @@ -565,7 +565,7 @@ ======================================================================*/ -int read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf) +int pcmcia_read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf) { mtd_request_t mtd; if (CHECK_REGION(handle)) @@ -591,7 +591,7 @@ ======================================================================*/ -int write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf) +int pcmcia_write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf) { mtd_request_t mtd; if (CHECK_REGION(handle)) @@ -616,7 +616,7 @@ ======================================================================*/ -int copy_memory(memory_handle_t handle, copy_op_t *req) +int pcmcia_copy_memory(memory_handle_t handle, copy_op_t *req) { if (CHECK_REGION(handle)) return CS_BAD_HANDLE; diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/cb_enabler.c linux/drivers/pcmcia/cb_enabler.c --- v2.3.31/linux/drivers/pcmcia/cb_enabler.c Thu Nov 11 20:11:42 1999 +++ linux/drivers/pcmcia/cb_enabler.c Wed Dec 8 13:59:35 1999 @@ -113,7 +113,7 @@ static void cs_error(client_handle_t handle, int func, int ret) { error_info_t err = { func, ret }; - CardServices(ReportError, handle, &err); + pcmcia_report_error(handle, &err); } /*====================================================================*/ @@ -148,7 +148,7 @@ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME; client_reg.Version = 0x0210; client_reg.event_callback_args.client_data = link; - ret = CardServices(RegisterClient, &link->handle, &client_reg); + ret = pcmcia_register_client(&link->handle, &client_reg); if (ret != 0) { cs_error(link->handle, RegisterClient, ret); cb_detach(link); @@ -183,7 +183,7 @@ } if (link->handle) - CardServices(DeregisterClient, link->handle); + pcmcia_deregister_client(link->handle); *linkp = link->next; kfree_s(link, sizeof(struct dev_link_t)); @@ -206,7 +206,7 @@ link->state |= DEV_CONFIG; /* Get PCI bus info */ - CardServices(GetConfigurationInfo, handle, &config); + pcmcia_get_configuration_info(handle, &config); bus = config.Option; devfn = config.Function; /* Is this a new bus? */ @@ -221,14 +221,14 @@ b->ncfg = b->nuse = 1; /* Special hook: CS know what to do... */ - i = CardServices(RequestIO, handle, NULL); + i = pcmcia_request_io(handle, NULL); if (i != CS_SUCCESS) { cs_error(handle, RequestIO, i); return; } b->flags |= DID_REQUEST; b->owner = link; - i = CardServices(RequestConfiguration, handle, &link->conf); + i = pcmcia_request_configuration(handle, &link->conf); if (i != CS_SUCCESS) { cs_error(handle, RequestConfiguration, i); return; @@ -267,12 +267,11 @@ if (link->state & DEV_SUSPEND) b->flags &= ~DID_CONFIG; else if ((b->flags & DID_CONFIG) && (--b->ncfg == 0)) { - CardServices(ReleaseConfiguration, b->owner->handle, - &b->owner->conf); + pcmcia_release_configuration(b->owner->handle); b->flags &= ~DID_CONFIG; } if ((b->flags & DID_REQUEST) && (--b->nuse == 0)) { - CardServices(ReleaseIO, b->owner->handle, NULL); + pcmcia_release_io(b->owner->handle, NULL); b->flags &= ~DID_REQUEST; } if (b->flags == 0) { @@ -314,8 +313,7 @@ drv->ops->suspend(link->dev); b->ncfg--; if (b->ncfg == 0) - CardServices(ReleaseConfiguration, link->handle, - &link->conf); + pcmcia_release_configuration(link->handle); } break; case CS_EVENT_PM_RESUME: @@ -325,7 +323,7 @@ if (link->state & DEV_CONFIG) { b->ncfg++; if (b->ncfg == 1) - CardServices(RequestConfiguration, link->handle, + pcmcia_request_configuration(link->handle, &link->conf); if (drv->ops->resume != NULL) drv->ops->resume(link->dev); @@ -379,7 +377,7 @@ { servinfo_t serv; DEBUG(0, "%s\n", version); - CardServices(GetCardServicesInfo, &serv); + pcmcia_get_card_services_info(&serv); if (serv.Revision != CS_RELEASE_CODE) { printk(KERN_NOTICE "cb_enabler: Card Services release " "does not match!\n"); diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/cistpl.c linux/drivers/pcmcia/cistpl.c --- v2.3.31/linux/drivers/pcmcia/cistpl.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/pcmcia/cistpl.c Wed Dec 8 13:51:11 1999 @@ -158,7 +158,7 @@ vs->cis_mem.sys_start = base; vs->cis_mem.sys_stop = base+vs->cap.map_size-1; vs->cis_virt = bus_ioremap(vs->cap.bus, base, vs->cap.map_size); - ret = validate_cis(vs->clients, &info1); + ret = pcmcia_validate_cis(vs->clients, &info1); /* invalidate mapping and CIS cache */ bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0; if ((ret != 0) || (info1.Chains == 0)) @@ -167,7 +167,7 @@ vs->cis_mem.sys_stop = base+2*vs->cap.map_size-1; vs->cis_virt = bus_ioremap(vs->cap.bus, base+vs->cap.map_size, vs->cap.map_size); - ret = validate_cis(vs->clients, &info2); + ret = pcmcia_validate_cis(vs->clients, &info2); bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0; return ((ret == 0) && (info1.Chains == info2.Chains)); } @@ -315,7 +315,7 @@ ======================================================================*/ -int replace_cis(client_handle_t handle, cisdump_t *cis) +int pcmcia_replace_cis(client_handle_t handle, cisdump_t *cis) { socket_info_t *s; if (CHECK_HANDLE(handle)) @@ -353,9 +353,9 @@ #define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) #define SPACE(f) (((tuple_flags *)(&(f)))->space) -int get_next_tuple(client_handle_t handle, tuple_t *tuple); +int pcmcia_get_next_tuple(client_handle_t handle, tuple_t *tuple); -int get_first_tuple(client_handle_t handle, tuple_t *tuple) +int pcmcia_get_first_tuple(client_handle_t handle, tuple_t *tuple) { socket_info_t *s; if (CHECK_HANDLE(handle)) @@ -381,15 +381,15 @@ !(tuple->Attributes & TUPLE_RETURN_COMMON)) { cisdata_t req = tuple->DesiredTuple; tuple->DesiredTuple = CISTPL_LONGLINK_MFC; - if (get_next_tuple(handle, tuple) == CS_SUCCESS) { + if (pcmcia_get_next_tuple(handle, tuple) == CS_SUCCESS) { tuple->DesiredTuple = CISTPL_LINKTARGET; - if (get_next_tuple(handle, tuple) != CS_SUCCESS) + if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS) return CS_NO_MORE_ITEMS; } else tuple->CISOffset = tuple->TupleLink = 0; tuple->DesiredTuple = req; } - return get_next_tuple(handle, tuple); + return pcmcia_get_next_tuple(handle, tuple); } static int follow_link(socket_info_t *s, tuple_t *tuple) @@ -430,7 +430,7 @@ return ofs; } -int get_next_tuple(client_handle_t handle, tuple_t *tuple) +int pcmcia_get_next_tuple(client_handle_t handle, tuple_t *tuple) { socket_info_t *s; u_char link[2], tmp; @@ -510,7 +510,7 @@ ofs += link[1] + 2; } if (i == MAX_TUPLES) { - DEBUG(1, "cs: overrun in get_next_tuple for socket %d\n", + DEBUG(1, "cs: overrun in pcmcia_get_next_tuple for socket %d\n", handle->Socket); return CS_NO_MORE_ITEMS; } @@ -525,7 +525,7 @@ #define _MIN(a, b) (((a) < (b)) ? (a) : (b)) -int get_tuple_data(client_handle_t handle, tuple_t *tuple) +int pcmcia_get_tuple_data(client_handle_t handle, tuple_t *tuple) { socket_info_t *s; u_int len; @@ -1234,7 +1234,7 @@ /*====================================================================*/ -int parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) +int pcmcia_parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) { int ret = CS_SUCCESS; @@ -1326,14 +1326,14 @@ tuple.DesiredTuple = code; tuple.Attributes = TUPLE_RETURN_COMMON; - ret = CardServices(GetFirstTuple, handle, &tuple, NULL); + ret = pcmcia_get_first_tuple(handle, &tuple); if (ret != CS_SUCCESS) return ret; tuple.TupleData = buf; tuple.TupleOffset = 0; tuple.TupleDataMax = sizeof(buf); - ret = CardServices(GetTupleData, handle, &tuple, NULL); + ret = pcmcia_get_tuple_data(handle, &tuple); if (ret != CS_SUCCESS) return ret; - ret = CardServices(ParseTuple, handle, &tuple, parse); + ret = pcmcia_parse_tuple(handle, &tuple, parse); return ret; } @@ -1347,7 +1347,7 @@ ======================================================================*/ -int validate_cis(client_handle_t handle, cisinfo_t *info) +int pcmcia_validate_cis(client_handle_t handle, cisinfo_t *info) { tuple_t tuple; cisparse_t p; @@ -1359,7 +1359,7 @@ info->Chains = reserved = errors = 0; tuple.DesiredTuple = RETURN_FIRST_TUPLE; tuple.Attributes = TUPLE_RETURN_COMMON; - ret = get_first_tuple(handle, &tuple); + ret = pcmcia_get_first_tuple(handle, &tuple); if (ret != CS_SUCCESS) return CS_SUCCESS; @@ -1380,7 +1380,7 @@ return CS_SUCCESS; for (info->Chains = 1; info->Chains < MAX_TUPLES; info->Chains++) { - ret = get_next_tuple(handle, &tuple); + ret = pcmcia_get_next_tuple(handle, &tuple); if (ret != CS_SUCCESS) break; if (((tuple.TupleCode > 0x23) && (tuple.TupleCode < 0x40)) || ((tuple.TupleCode > 0x47) && (tuple.TupleCode < 0x80)) || diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/cs.c linux/drivers/pcmcia/cs.c --- v2.3.31/linux/drivers/pcmcia/cs.c Tue Dec 7 09:32:45 1999 +++ linux/drivers/pcmcia/cs.c Wed Dec 8 13:59:43 1999 @@ -316,6 +316,7 @@ s->cis_mem.flags = 0; s->cis_mem.speed = cis_speed; s->erase_busy.next = s->erase_busy.prev = &s->erase_busy; + spin_lock_init(&s->lock); for (i = 0; i < sockets; i++) if (socket_table[i] == NULL) break; @@ -779,7 +780,7 @@ ======================================================================*/ -static int access_configuration_register(client_handle_t handle, +int pcmcia_access_configuration_register(client_handle_t handle, conf_reg_t *reg) { socket_info_t *s; @@ -826,7 +827,7 @@ ======================================================================*/ -static int bind_device(bind_req_t *req) +int pcmcia_bind_device(bind_req_t *req) { client_t *client; socket_info_t *s; @@ -861,7 +862,7 @@ ======================================================================*/ -static int bind_mtd(mtd_bind_t *req) +int pcmcia_bind_mtd(mtd_bind_t *req) { socket_info_t *s; memory_handle_t region; @@ -890,7 +891,7 @@ /*====================================================================*/ -static int deregister_client(client_handle_t handle) +int pcmcia_deregister_client(client_handle_t handle) { client_t **client; socket_info_t *s; @@ -945,7 +946,7 @@ /*====================================================================*/ -static int get_configuration_info(client_handle_t handle, +int pcmcia_get_configuration_info(client_handle_t handle, config_info_t *config) { socket_info_t *s; @@ -1016,7 +1017,7 @@ ======================================================================*/ -static int get_card_services_info(servinfo_t *info) +int pcmcia_get_card_services_info(servinfo_t *info) { info->Signature[0] = 'C'; info->Signature[1] = 'S'; @@ -1034,7 +1035,7 @@ ======================================================================*/ -static int get_first_client(client_handle_t *handle, client_req_t *req) +int pcmcia_get_first_client(client_handle_t *handle, client_req_t *req) { socket_t s; if (req->Attributes & CLIENT_THIS_SOCKET) @@ -1051,7 +1052,7 @@ /*====================================================================*/ -static int get_next_client(client_handle_t *handle, client_req_t *req) +int pcmcia_get_next_client(client_handle_t *handle, client_req_t *req) { socket_info_t *s; if ((handle == NULL) || CHECK_HANDLE(*handle)) @@ -1070,7 +1071,7 @@ /*====================================================================*/ -static int get_window(window_handle_t *handle, int idx, win_req_t *req) +int pcmcia_get_window(window_handle_t *handle, int idx, win_req_t *req) { socket_info_t *s; window_t *win; @@ -1103,18 +1104,18 @@ return CS_SUCCESS; } /* get_window */ -static int get_first_window(client_handle_t *handle, win_req_t *req) +int pcmcia_get_first_window(client_handle_t *handle, win_req_t *req) { if ((handle == NULL) || CHECK_HANDLE(*handle)) return CS_BAD_HANDLE; - return get_window((window_handle_t *)handle, 0, req); + return pcmcia_get_window((window_handle_t *)handle, 0, req); } -static int get_next_window(window_handle_t *win, win_req_t *req) +int pcmcia_get_next_window(window_handle_t *win, win_req_t *req) { if ((win == NULL) || ((*win)->magic != WINDOW_MAGIC)) return CS_BAD_HANDLE; - return get_window(win, (*win)->index+1, req); + return pcmcia_get_window(win, (*win)->index+1, req); } /*====================================================================== @@ -1124,7 +1125,7 @@ ======================================================================*/ -static int get_status(client_handle_t handle, cs_status_t *status) +int pcmcia_get_status(client_handle_t handle, cs_status_t *status) { socket_info_t *s; config_t *c; @@ -1194,7 +1195,7 @@ ======================================================================*/ -static int get_mem_page(window_handle_t win, memreq_t *req) +int pcmcia_get_mem_page(window_handle_t win, memreq_t *req) { if ((win == NULL) || (win->magic != WINDOW_MAGIC)) return CS_BAD_HANDLE; @@ -1203,7 +1204,7 @@ return CS_SUCCESS; } /* get_mem_page */ -static int map_mem_page(window_handle_t win, memreq_t *req) +int pcmcia_map_mem_page(window_handle_t win, memreq_t *req) { socket_info_t *s; if ((win == NULL) || (win->magic != WINDOW_MAGIC)) @@ -1223,7 +1224,7 @@ ======================================================================*/ -static int modify_configuration(client_handle_t handle, +int pcmcia_modify_configuration(client_handle_t handle, modconf_t *mod) { socket_info_t *s; @@ -1272,7 +1273,7 @@ ======================================================================*/ -static int modify_window(window_handle_t win, modwin_t *req) +int pcmcia_modify_window(window_handle_t win, modwin_t *req) { if ((win == NULL) || (win->magic != WINDOW_MAGIC)) return CS_BAD_HANDLE; @@ -1301,7 +1302,7 @@ ======================================================================*/ -static int register_client(client_handle_t *handle, client_reg_t *req) +int pcmcia_register_client(client_handle_t *handle, client_reg_t *req) { client_t *client; socket_info_t *s; @@ -1378,8 +1379,7 @@ /*====================================================================*/ -static int release_configuration(client_handle_t handle, - socket_t *Socket) +int pcmcia_release_configuration(client_handle_t handle) { pccard_io_map io; socket_info_t *s; @@ -1435,7 +1435,7 @@ ======================================================================*/ -static int release_io(client_handle_t handle, io_req_t *req) +int pcmcia_release_io(client_handle_t handle, io_req_t *req) { socket_info_t *s; @@ -1472,7 +1472,7 @@ /*====================================================================*/ -static int cs_release_irq(client_handle_t handle, irq_req_t *req) +int pcmcia_release_irq(client_handle_t handle, irq_req_t *req) { socket_info_t *s; if (CHECK_HANDLE(handle) || !(handle->state & CLIENT_IRQ_REQ)) @@ -1508,7 +1508,7 @@ /*====================================================================*/ -static int release_window(window_handle_t win) +int pcmcia_release_window(window_handle_t win) { socket_info_t *s; @@ -1534,7 +1534,7 @@ /*====================================================================*/ -static int request_configuration(client_handle_t handle, +int pcmcia_request_configuration(client_handle_t handle, config_req_t *req) { int i; @@ -1672,7 +1672,7 @@ ======================================================================*/ -static int request_io(client_handle_t handle, io_req_t *req) +int pcmcia_request_io(client_handle_t handle, io_req_t *req) { socket_info_t *s; config_t *c; @@ -1738,7 +1738,7 @@ ======================================================================*/ -static int cs_request_irq(client_handle_t handle, irq_req_t *req) +int pcmcia_request_irq(client_handle_t handle, irq_req_t *req) { socket_info_t *s; config_t *c; @@ -1814,7 +1814,7 @@ ======================================================================*/ -static int request_window(client_handle_t *handle, win_req_t *req) +int pcmcia_request_window(client_handle_t *handle, win_req_t *req) { socket_info_t *s; window_t *win; @@ -1889,7 +1889,7 @@ ======================================================================*/ -static int reset_card(client_handle_t handle, client_req_t *req) +int pcmcia_reset_card(client_handle_t handle, client_req_t *req) { int i, ret; socket_info_t *s; @@ -1924,7 +1924,7 @@ ======================================================================*/ -static int suspend_card(client_handle_t handle, client_req_t *req) +int pcmcia_suspend_card(client_handle_t handle, client_req_t *req) { int i; socket_info_t *s; @@ -1945,7 +1945,7 @@ return CS_SUCCESS; } /* suspend_card */ -static int resume_card(client_handle_t handle, client_req_t *req) +int pcmcia_resume_card(client_handle_t handle, client_req_t *req) { int i; socket_info_t *s; @@ -1970,7 +1970,7 @@ ======================================================================*/ -static int eject_card(client_handle_t handle, client_req_t *req) +int pcmcia_eject_card(client_handle_t handle, client_req_t *req) { int i, ret; socket_info_t *s; @@ -1996,7 +1996,7 @@ } /* eject_card */ -static int insert_card(client_handle_t handle, client_req_t *req) +int pcmcia_insert_card(client_handle_t handle, client_req_t *req) { int i, status; socket_info_t *s; @@ -2034,7 +2034,7 @@ ======================================================================*/ -static int set_event_mask(client_handle_t handle, eventmask_t *mask) +int pcmcia_set_event_mask(client_handle_t handle, eventmask_t *mask) { u_int events, bit; if (CHECK_HANDLE(handle)) @@ -2054,7 +2054,7 @@ /*====================================================================*/ -static int report_error(client_handle_t handle, error_info_t *err) +int pcmcia_report_error(client_handle_t handle, error_info_t *err) { int i; char *serv; @@ -2101,103 +2101,103 @@ #endif switch (func) { case AccessConfigurationRegister: - return access_configuration_register(a1, a2); break; + return pcmcia_access_configuration_register(a1, a2); break; case AdjustResourceInfo: - return adjust_resource_info(a1, a2); break; + return pcmcia_adjust_resource_info(a1, a2); break; case CheckEraseQueue: - return check_erase_queue(a1); break; + return pcmcia_check_erase_queue(a1); break; case CloseMemory: - return close_memory(a1); break; + return pcmcia_close_memory(a1); break; case CopyMemory: - return copy_memory(a1, a2); break; + return pcmcia_copy_memory(a1, a2); break; case DeregisterClient: - return deregister_client(a1); break; + return pcmcia_deregister_client(a1); break; case DeregisterEraseQueue: - return deregister_erase_queue(a1); break; + return pcmcia_deregister_erase_queue(a1); break; case GetFirstClient: - return get_first_client(a1, a2); break; + return pcmcia_get_first_client(a1, a2); break; case GetCardServicesInfo: - return get_card_services_info(a1); break; + return pcmcia_get_card_services_info(a1); break; case GetConfigurationInfo: - return get_configuration_info(a1, a2); break; + return pcmcia_get_configuration_info(a1, a2); break; case GetNextClient: - return get_next_client(a1, a2); break; + return pcmcia_get_next_client(a1, a2); break; case GetFirstRegion: - return get_first_region(a1, a2); break; + return pcmcia_get_first_region(a1, a2); break; case GetFirstTuple: - return get_first_tuple(a1, a2); break; + return pcmcia_get_first_tuple(a1, a2); break; case GetNextRegion: - return get_next_region(a1, a2); break; + return pcmcia_get_next_region(a1, a2); break; case GetNextTuple: - return get_next_tuple(a1, a2); break; + return pcmcia_get_next_tuple(a1, a2); break; case GetStatus: - return get_status(a1, a2); break; + return pcmcia_get_status(a1, a2); break; case GetTupleData: - return get_tuple_data(a1, a2); break; + return pcmcia_get_tuple_data(a1, a2); break; case MapMemPage: - return map_mem_page(a1, a2); break; + return pcmcia_map_mem_page(a1, a2); break; case ModifyConfiguration: - return modify_configuration(a1, a2); break; + return pcmcia_modify_configuration(a1, a2); break; case ModifyWindow: - return modify_window(a1, a2); break; + return pcmcia_modify_window(a1, a2); break; case OpenMemory: - return open_memory(a1, a2); + return pcmcia_open_memory(a1, a2); case ParseTuple: - return parse_tuple(a1, a2, a3); break; + return pcmcia_parse_tuple(a1, a2, a3); break; case ReadMemory: - return read_memory(a1, a2, a3); break; + return pcmcia_read_memory(a1, a2, a3); break; case RegisterClient: - return register_client(a1, a2); break; + return pcmcia_register_client(a1, a2); break; case RegisterEraseQueue: - return register_erase_queue(a1, a2); break; + return pcmcia_register_erase_queue(a1, a2); break; case RegisterMTD: - return register_mtd(a1, a2); break; + return pcmcia_register_mtd(a1, a2); break; case ReleaseConfiguration: - return release_configuration(a1, a2); break; + return pcmcia_release_configuration(a1); break; case ReleaseIO: - return release_io(a1, a2); break; + return pcmcia_release_io(a1, a2); break; case ReleaseIRQ: - return cs_release_irq(a1, a2); break; + return pcmcia_release_irq(a1, a2); break; case ReleaseWindow: - return release_window(a1); break; + return pcmcia_release_window(a1); break; case RequestConfiguration: - return request_configuration(a1, a2); break; + return pcmcia_request_configuration(a1, a2); break; case RequestIO: - return request_io(a1, a2); break; + return pcmcia_request_io(a1, a2); break; case RequestIRQ: - return cs_request_irq(a1, a2); break; + return pcmcia_request_irq(a1, a2); break; case RequestWindow: - return request_window(a1, a2); break; + return pcmcia_request_window(a1, a2); break; case ResetCard: - return reset_card(a1, a2); break; + return pcmcia_reset_card(a1, a2); break; case SetEventMask: - return set_event_mask(a1, a2); break; + return pcmcia_set_event_mask(a1, a2); break; case ValidateCIS: - return validate_cis(a1, a2); break; + return pcmcia_validate_cis(a1, a2); break; case WriteMemory: - return write_memory(a1, a2, a3); break; + return pcmcia_write_memory(a1, a2, a3); break; case BindDevice: - return bind_device(a1); break; + return pcmcia_bind_device(a1); break; case BindMTD: - return bind_mtd(a1); break; + return pcmcia_bind_mtd(a1); break; case ReportError: - return report_error(a1, a2); break; + return pcmcia_report_error(a1, a2); break; case SuspendCard: - return suspend_card(a1, a2); break; + return pcmcia_suspend_card(a1, a2); break; case ResumeCard: - return resume_card(a1, a2); break; + return pcmcia_resume_card(a1, a2); break; case EjectCard: - return eject_card(a1, a2); break; + return pcmcia_eject_card(a1, a2); break; case InsertCard: - return insert_card(a1, a2); break; + return pcmcia_insert_card(a1, a2); break; case ReplaceCIS: - return replace_cis(a1, a2); break; + return pcmcia_replace_cis(a1, a2); break; case GetFirstWindow: - return get_first_window(a1, a2); break; + return pcmcia_get_first_window(a1, a2); break; case GetNextWindow: - return get_next_window(a1, a2); break; + return pcmcia_get_next_window(a1, a2); break; case GetMemPage: - return get_mem_page(a1, a2); break; + return pcmcia_get_mem_page(a1, a2); break; default: return CS_UNSUPPORTED_FUNCTION; break; } diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/ds.c linux/drivers/pcmcia/ds.c --- v2.3.31/linux/drivers/pcmcia/ds.c Tue Dec 7 09:32:45 1999 +++ linux/drivers/pcmcia/ds.c Wed Dec 8 13:50:19 1999 @@ -128,7 +128,7 @@ static void cs_error(client_handle_t handle, int func, int ret) { error_info_t err = { func, ret }; - CardServices(ReportError, handle, &err); + pcmcia_report_error(handle, &err); } /*====================================================================== @@ -340,7 +340,7 @@ bind_req.Attributes = mtd_info->Attributes; bind_req.Socket = i; bind_req.CardOffset = mtd_info->CardOffset; - ret = CardServices(BindMTD, &bind_req); + ret = pcmcia_bind_mtd(&bind_req); if (ret != CS_SUCCESS) { cs_error(NULL, BindMTD, ret); printk(KERN_NOTICE "ds: unable to bind MTD '%s' to socket %d" @@ -394,7 +394,7 @@ bind_req.Socket = i; bind_req.Function = bind_info->function; bind_req.dev_info = &driver->dev_info; - ret = CardServices(BindDevice, &bind_req); + ret = pcmcia_bind_device(&bind_req); if (ret != CS_SUCCESS) { cs_error(NULL, BindDevice, ret); printk(KERN_NOTICE "ds: unable to bind '%s' to socket %d\n", @@ -687,78 +687,74 @@ switch (cmd) { case DS_ADJUST_RESOURCE_INFO: - ret = CardServices(AdjustResourceInfo, s->handle, &buf.adjust); + ret = pcmcia_adjust_resource_info(s->handle, &buf.adjust); break; case DS_GET_CARD_SERVICES_INFO: - ret = CardServices(GetCardServicesInfo, &buf.servinfo); + ret = pcmcia_get_card_services_info(&buf.servinfo); break; case DS_GET_CONFIGURATION_INFO: - ret = CardServices(GetConfigurationInfo, s->handle, &buf.config); + ret = pcmcia_get_configuration_info(s->handle, &buf.config); break; case DS_GET_FIRST_TUPLE: - ret = CardServices(GetFirstTuple, s->handle, &buf.tuple); + ret = pcmcia_get_first_tuple(s->handle, &buf.tuple); break; case DS_GET_NEXT_TUPLE: - ret = CardServices(GetNextTuple, s->handle, &buf.tuple); + ret = pcmcia_get_next_tuple(s->handle, &buf.tuple); break; case DS_GET_TUPLE_DATA: buf.tuple.TupleData = buf.tuple_parse.data; buf.tuple.TupleDataMax = sizeof(buf.tuple_parse.data); - ret = CardServices(GetTupleData, s->handle, &buf.tuple); + ret = pcmcia_get_tuple_data(s->handle, &buf.tuple); break; case DS_PARSE_TUPLE: buf.tuple.TupleData = buf.tuple_parse.data; - ret = CardServices(ParseTuple, s->handle, &buf.tuple, - &buf.tuple_parse.parse); + ret = pcmcia_parse_tuple(s->handle, &buf.tuple, &buf.tuple_parse.parse); break; case DS_RESET_CARD: - ret = CardServices(ResetCard, s->handle, NULL); + ret = pcmcia_reset_card(s->handle, NULL); break; case DS_GET_STATUS: - ret = CardServices(GetStatus, s->handle, &buf.status); + ret = pcmcia_get_status(s->handle, &buf.status); break; case DS_VALIDATE_CIS: - ret = CardServices(ValidateCIS, s->handle, &buf.cisinfo); + ret = pcmcia_validate_cis(s->handle, &buf.cisinfo); break; case DS_SUSPEND_CARD: - ret = CardServices(SuspendCard, s->handle, NULL); + ret = pcmcia_suspend_card(s->handle, NULL); break; case DS_RESUME_CARD: - ret = CardServices(ResumeCard, s->handle, NULL); + ret = pcmcia_resume_card(s->handle, NULL); break; case DS_EJECT_CARD: - ret = CardServices(EjectCard, s->handle, NULL); + ret = pcmcia_eject_card(s->handle, NULL); break; case DS_INSERT_CARD: - ret = CardServices(InsertCard, s->handle, NULL); + ret = pcmcia_insert_card(s->handle, NULL); break; case DS_ACCESS_CONFIGURATION_REGISTER: if ((buf.conf_reg.Action == CS_WRITE) && !suser()) return -EPERM; - ret = CardServices(AccessConfigurationRegister, s->handle, - &buf.conf_reg); + ret = pcmcia_access_configuration_register(s->handle, &buf.conf_reg); break; case DS_GET_FIRST_REGION: - ret = CardServices(GetFirstRegion, s->handle, &buf.region); + ret = pcmcia_get_first_region(s->handle, &buf.region); break; case DS_GET_NEXT_REGION: - ret = CardServices(GetNextRegion, s->handle, &buf.region); + ret = pcmcia_get_next_region(s->handle, &buf.region); break; case DS_GET_FIRST_WINDOW: buf.win_info.handle = (window_handle_t)s->handle; - ret = CardServices(GetFirstWindow, &buf.win_info.handle, - &buf.win_info.window); + ret = pcmcia_get_first_window(&buf.win_info.handle, &buf.win_info.window); break; case DS_GET_NEXT_WINDOW: - ret = CardServices(GetNextWindow, &buf.win_info.handle, - &buf.win_info.window); + ret = pcmcia_get_next_window(&buf.win_info.handle, &buf.win_info.window); break; case DS_GET_MEM_PAGE: - ret = CardServices(GetMemPage, buf.win_info.handle, + ret = pcmcia_get_mem_page(buf.win_info.handle, &buf.win_info.map); break; case DS_REPLACE_CIS: - ret = CardServices(ReplaceCIS, s->handle, &buf.cisdump); + ret = pcmcia_replace_cis(s->handle, &buf.cisdump); break; case DS_BIND_REQUEST: if (!suser()) return -EPERM; @@ -838,7 +834,7 @@ DEBUG(0, "%s\n", version); - CardServices(GetCardServicesInfo, &serv); + pcmcia_get_card_services_info(&serv); if (serv.Revision != CS_RELEASE_CODE) { printk(KERN_NOTICE "ds: Card Services release does not match!\n"); return -1; @@ -876,13 +872,13 @@ for (i = 0; i < sockets; i++) { bind.Socket = i; bind.Function = BIND_FN_ALL; - ret = CardServices(BindDevice, &bind); + ret = pcmcia_bind_device(&bind); if (ret != CS_SUCCESS) { cs_error(NULL, BindDevice, ret); break; } client_reg.event_callback_args.client_data = &socket_table[i]; - ret = CardServices(RegisterClient, &socket_table[i].handle, + ret = pcmcia_register_client(&socket_table[i].handle, &client_reg); if (ret != CS_SUCCESS) { cs_error(NULL, RegisterClient, ret); @@ -923,7 +919,7 @@ if (major_dev != -1) unregister_chrdev(major_dev, "pcmcia"); for (i = 0; i < sockets; i++) - CardServices(DeregisterClient, socket_table[i].handle); + pcmcia_deregister_client(socket_table[i].handle); sockets = 0; kfree(socket_table); } diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/i82365.c linux/drivers/pcmcia/i82365.c --- v2.3.31/linux/drivers/pcmcia/i82365.c Tue Dec 7 09:32:45 1999 +++ linux/drivers/pcmcia/i82365.c Wed Dec 8 13:34:15 1999 @@ -2703,7 +2703,7 @@ static int __init init_i82365(void) { servinfo_t serv; - CardServices(GetCardServicesInfo, &serv); + pcmcia_get_card_services_info(&serv); if (serv.Revision != CS_RELEASE_CODE) { printk(KERN_NOTICE "i82365: Card Services release " "does not match!\n"); diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/rsrc_mgr.c linux/drivers/pcmcia/rsrc_mgr.c --- v2.3.31/linux/drivers/pcmcia/rsrc_mgr.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/pcmcia/rsrc_mgr.c Wed Dec 8 11:45:11 1999 @@ -629,7 +629,7 @@ /*====================================================================*/ -int adjust_resource_info(client_handle_t handle, adjust_t *adj) +int pcmcia_adjust_resource_info(client_handle_t handle, adjust_t *adj) { if (CHECK_HANDLE(handle)) return CS_BAD_HANDLE; diff -u --recursive --new-file v2.3.31/linux/drivers/pcmcia/tcic.c linux/drivers/pcmcia/tcic.c --- v2.3.31/linux/drivers/pcmcia/tcic.c Thu Nov 11 20:11:43 1999 +++ linux/drivers/pcmcia/tcic.c Wed Dec 8 13:35:15 1999 @@ -379,7 +379,7 @@ servinfo_t serv; DEBUG(0, "%s\n", version); - CardServices(GetCardServicesInfo, &serv); + pcmcia_get_card_services_info(&serv); if (serv.Revision != CS_RELEASE_CODE) { printk(KERN_NOTICE "tcic: Card Services release " "does not match!\n"); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/53c8xx_d.h linux/drivers/scsi/53c8xx_d.h --- v2.3.31/linux/drivers/scsi/53c8xx_d.h Mon Nov 1 13:56:26 1999 +++ linux/drivers/scsi/53c8xx_d.h Wed Dec 8 15:17:55 1999 @@ -1,3 +1,4 @@ +/* DO NOT EDIT - Generated automatically by script_asm.pl */ static u32 SCRIPT[] = { /* diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/Config.in linux/drivers/scsi/Config.in --- v2.3.31/linux/drivers/scsi/Config.in Fri Oct 15 15:25:14 1999 +++ linux/drivers/scsi/Config.in Mon Dec 13 14:38:06 1999 @@ -10,8 +10,12 @@ comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs' -bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN +#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + bool 'Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES +#fi +bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN + bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS bool ' SCSI logging facility' CONFIG_SCSI_LOGGING diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/Makefile linux/drivers/scsi/Makefile --- v2.3.31/linux/drivers/scsi/Makefile Thu Nov 11 20:11:43 1999 +++ linux/drivers/scsi/Makefile Sun Dec 12 23:04:20 1999 @@ -40,8 +40,8 @@ OX_OBJS := scsi_syms.o endif L_OBJS += scsi_n_syms.o hosts.o scsi_ioctl.o constants.o scsicam.o - L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o - L_OBJS += scsi_proc.o + L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o + L_OBJS += scsi_merge.o scsi_proc.o else ifeq ($(CONFIG_SCSI),m) MIX_OBJS += scsi_syms.o @@ -721,10 +721,11 @@ $(CC) $(CFLAGS) -c megaraid.c scsi_mod.o: $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o constants.o \ - scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o scsi_queue.o + scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o \ + scsi_queue.o scsi_lib.o scsi_merge.o $(LD) $(LD_RFLAG) -r -o $@ $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o \ - constants.o scsicam.o scsi_proc.o \ - scsi_error.o scsi_obsolete.o scsi_queue.o \ + constants.o scsicam.o scsi_proc.o scsi_merge.o \ + scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o sr_mod.o: sr.o sr_ioctl.o sr_vendor.o $(LD) $(LD_RFLAG) -r -o $@ sr.o sr_ioctl.o sr_vendor.o diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/advansys.c linux/drivers/scsi/advansys.c --- v2.3.31/linux/drivers/scsi/advansys.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/advansys.c Sun Dec 12 23:04:20 1999 @@ -1,5 +1,5 @@ -/* $Id: advansys.c,v 1.67 1999/11/18 20:13:15 bobf Exp bobf $ */ -#define ASC_VERSION "3.2K" /* AdvanSys Driver Version */ +/* $Id: advansys.c,v 1.68 1999/11/19 01:57:47 bobf Exp bobf $ */ +#define ASC_VERSION "3.2L" /* AdvanSys Driver Version */ /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters @@ -668,6 +668,11 @@ 4. Increase Wide board scatter-gather list maximum length to 255 when the driver is compiled into the kernel. + 3.2L (11/18/99): + 1. Fix bug in adv_get_sglist() that caused an assertion failure + at line 7475. The reqp->sgblkp pointer must be initialized + to NULL in adv_get_sglist(). + J. Known Problems/Fix List (XXX) 1. Need to add memory mapping workaround. Test the memory mapping. @@ -7471,8 +7476,8 @@ slp = (struct scatterlist *) scp->request_buffer; sg_elem_cnt = scp->use_sg; prev_sg_block = NULL; + reqp->sgblkp == NULL; - ASC_ASSERT(reqp->sgblkp == NULL); do { /* diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/aha1542.c linux/drivers/scsi/aha1542.c --- v2.3.31/linux/drivers/scsi/aha1542.c Thu Nov 11 20:11:44 1999 +++ linux/drivers/scsi/aha1542.c Sun Dec 12 23:04:20 1999 @@ -1292,7 +1292,9 @@ * check for timeout, and if we are doing something like this * we are pretty desperate anyways. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(4*HZ); + spin_lock_irq(&io_request_lock); WAIT(STATUS(SCpnt->host->io_port), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF); @@ -1359,7 +1361,9 @@ * check for timeout, and if we are doing something like this * we are pretty desperate anyways. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(4*HZ); + spin_lock_irq(&io_request_lock); WAIT(STATUS(SCpnt->host->io_port), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/atp870u.c linux/drivers/scsi/atp870u.c --- v2.3.31/linux/drivers/scsi/atp870u.c Thu Nov 11 20:11:45 1999 +++ linux/drivers/scsi/atp870u.c Sun Dec 12 23:02:23 1999 @@ -36,47 +36,59 @@ * static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/atp870u.c,v 1.0 1997/05/07 15:22:00 root Exp root $"; */ -static unsigned char admaxu = 1, host_idu[2], chip_veru[2], scam_on[2], global_map[2]; -static unsigned short int active_idu[2], wide_idu[2], sync_idu, ultra_map[2]; -static int workingu[2] = {0, 0}; - -static Scsi_Cmnd *querequ[2][qcnt], *curr_req[2][16]; - -static unsigned char devspu[2][16] = { - {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, - {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20} -}; +static unsigned char admaxu = 1; +static unsigned short int sync_idu; -static unsigned char dirctu[2][16], last_cmd[2], in_snd[2], in_int[2]; -static unsigned char ata_cdbu[2][16]; -static unsigned int ioportu[2] = {0, 0}; static unsigned int irqnumu[2] = {0, 0}; -static unsigned short int pciportu[2]; -static unsigned long prdaddru[2][16], tran_lenu[2][16], last_lenu[2][16]; -static unsigned char prd_tableu[2][16][1024]; -static unsigned char *prd_posu[2][16]; -static unsigned char quhdu[2], quendu[2]; -static unsigned char devtypeu[2][16] = +struct atp_unit { - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + unsigned long ioport; + unsigned long irq; + unsigned long pciport; + unsigned char last_cmd; + unsigned char in_snd; + unsigned char in_int; + unsigned char quhdu; + unsigned char quendu; + unsigned char scam_on; + unsigned char global_map; + unsigned char chip_veru; + unsigned char host_idu; + int working; + unsigned short wide_idu; + unsigned short active_idu; + unsigned short ultra_map; + unsigned char ata_cdbu[16]; + Scsi_Cmnd *querequ[qcnt]; + struct atp_id + { + unsigned char dirctu; + unsigned char devspu; + unsigned char devtypeu; + unsigned long prdaddru; + unsigned long tran_lenu; + unsigned long last_lenu; + unsigned char *prd_posu; + unsigned char *prd_tableu; + Scsi_Cmnd *curr_req; + } id[16]; }; static struct Scsi_Host *atp_host[2] = {NULL, NULL}; +static struct atp_unit atp_unit[2]; static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs) { unsigned long flags; unsigned short int tmpcip, id; - unsigned char i, j, h, tarid, lun; + unsigned char i, j, h, target_id, lun; unsigned char *prd; Scsi_Cmnd *workrequ; unsigned int workportu, tmport; unsigned long adrcntu, k; int errstus; + struct atp_unit *dev = dev_id; for (h = 0; h < 2; h++) { if (irq == irqnumu[h]) { @@ -85,59 +97,84 @@ } return; irq_numok: - in_int[h] = 1; - workportu = ioportu[h]; + dev->in_int = 1; + workportu = dev->ioport; tmport = workportu; - - if (workingu[h] != 0) + + if (dev->working != 0) { tmport += 0x1f; j = inb(tmport); - tmpcip = pciportu[h]; + tmpcip = dev->pciport; if ((inb(tmpcip) & 0x08) != 0) { tmpcip += 0x2; - while ((inb(tmpcip) & 0x08) != 0); + for (k=0; k < 1000; k++) + { + if ((inb(tmpcip) & 0x08) == 0) + { + goto stop_dma; + } + if ((inb(tmpcip) & 0x01) == 0) + { + goto stop_dma; + } + } } - tmpcip = pciportu[h]; +stop_dma: + tmpcip = dev->pciport; outb(0x00, tmpcip); tmport -= 0x08; i = inb(tmport); if ((j & 0x40) == 0) { - if ((last_cmd[h] & 0x40) == 0) + if ((dev->last_cmd & 0x40) == 0) { - last_cmd[h] = 0xff; + dev->last_cmd = 0xff; } } - else last_cmd[h] |= 0x40; + else dev->last_cmd |= 0x40; tmport -= 0x02; - tarid = inb(tmport); + target_id = inb(tmport); tmport += 0x02; - if ((tarid & 0x40) != 0) { - tarid = (tarid & 0x07) | 0x08; + /* + * Remap wide devices onto id numbers + */ + + if ((target_id & 0x40) != 0) { + target_id = (target_id & 0x07) | 0x08; } else { - tarid &= 0x07; + target_id &= 0x07; } + if (i == 0x85) { - if (wide_idu[h] != 0) + /* + * Flip wide + */ + if (dev->wide_idu != 0) { tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; j |= 0x01; outb(j, tmport); } - if (((quhdu[h] != quendu[h]) || (last_cmd[h] != 0xff)) && - (in_snd[h] == 0)) + /* + * Issue more commands + */ + if (((dev->quhdu != dev->quendu) || (dev->last_cmd != 0xff)) && + (dev->in_snd == 0)) { send_s870(h); } - in_int[h] = 0; + /* + * Done + */ + dev->in_int = 0; return; } if (i == 0x21) @@ -147,15 +184,15 @@ ((unsigned char *) &adrcntu)[2] = inb(tmport++); ((unsigned char *) &adrcntu)[1] = inb(tmport++); ((unsigned char *) &adrcntu)[0] = inb(tmport); - k = last_lenu[h][tarid]; + k = dev->id[target_id].last_lenu; k -= adrcntu; - tran_lenu[h][tarid] = k; - last_lenu[h][tarid] = adrcntu; + dev->id[target_id].tran_lenu = k; + dev->id[target_id].last_lenu = adrcntu; tmport -= 0x04; outb(0x41, tmport); tmport += 0x08; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } if ((i == 0x80) || (i == 0x8f)) @@ -163,7 +200,7 @@ lun = 0; tmport -= 0x07; j = inb(tmport); - if (j == 0x44) { + if (j == 0x44 || i==0x80) { tmport += 0x0d; lun = inb(tmport) & 0x07; } else { @@ -174,71 +211,80 @@ ((unsigned char *) &adrcntu)[2] = inb(tmport++); ((unsigned char *) &adrcntu)[1] = inb(tmport++); ((unsigned char *) &adrcntu)[0] = inb(tmport); - k = last_lenu[h][tarid]; + k = dev->id[target_id].last_lenu; k -= adrcntu; - tran_lenu[h][tarid] = k; - last_lenu[h][tarid] = adrcntu; + dev->id[target_id].tran_lenu = k; + dev->id[target_id].last_lenu = adrcntu; tmport += 0x04; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } else { outb(0x46, tmport); - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } } tmport = workportu + 0x10; outb(0x45, tmport); tmport += 0x06; - tarid = inb(tmport); - if ((tarid & 0x10) != 0) + target_id = inb(tmport); + /* + * Remap wide identifiers + */ + if ((target_id & 0x10) != 0) { - tarid = (tarid & 0x07) | 0x08; + target_id = (target_id & 0x07) | 0x08; } else { - tarid &= 0x07; + target_id &= 0x07; } - workrequ = curr_req[h][tarid]; + workrequ = dev->id[target_id].curr_req; tmport = workportu + 0x0f; outb(lun, tmport); tmport += 0x02; - outb(devspu[h][tarid], tmport++); - adrcntu = tran_lenu[h][tarid]; - k = last_lenu[h][tarid]; + outb(dev->id[target_id].devspu, tmport++); + adrcntu = dev->id[target_id].tran_lenu; + k = dev->id[target_id].last_lenu; outb(((unsigned char *) &k)[2], tmport++); outb(((unsigned char *) &k)[1], tmport++); outb(((unsigned char *) &k)[0], tmport++); - j = tarid; - if (tarid > 7) { + /* Remap wide */ + j = target_id; + if (target_id > 7) { j = (j & 0x07) | 0x40; } - j |= dirctu[h][tarid]; + /* Add direction */ + j |= dev->id[target_id].dirctu; outb(j, tmport++); outb(0x80, tmport); tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; id = 1; - id = id << tarid; - if ((id & wide_idu[h]) != 0) { + id = id << target_id; + /* + * Is this a wide device + */ + if ((id & dev->wide_idu) != 0) { j |= 0x01; } outb(j, tmport); - if (last_lenu[h][tarid] == 0) { + + if (dev->id[target_id].last_lenu == 0) { tmport = workportu + 0x18; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } - prd = prd_posu[h][tarid]; + prd = dev->id[target_id].prd_posu; while (adrcntu != 0) { id = ((unsigned short int *) (prd))[2]; @@ -252,35 +298,44 @@ (k - adrcntu); ((unsigned long *) (prd))[0] += adrcntu; adrcntu = 0; - prd_posu[h][tarid] = prd; + dev->id[target_id].prd_posu = prd; } else { adrcntu -= k; - prdaddru[h][tarid] += 0x08; + dev->id[target_id].prdaddru += 0x08; prd += 0x08; if (adrcntu == 0) { - prd_posu[h][tarid] = prd; + dev->id[target_id].prd_posu = prd; } } } - tmpcip = pciportu[h] + 0x04; - outl(prdaddru[h][tarid], tmpcip); + tmpcip = dev->pciport + 0x04; + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip -= 0x02; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip -= 0x02; tmport = workportu + 0x18; - if (dirctu[h][tarid] != 0) { + /* + * Check transfer direction + */ + if (dev->id[target_id].dirctu != 0) { outb(0x08, tmport); outb(0x01, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } outb(0x08, tmport); outb(0x09, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } - workrequ = curr_req[h][tarid]; + + /* + * Current scsi request on this target + */ + + workrequ = dev->id[target_id].curr_req; + if (i == 0x42) { errstus = 0x02; workrequ->result = errstus; @@ -293,24 +348,36 @@ errstus = inb(tmport); workrequ->result = errstus; go_42: + /* + * Complete the command + */ spin_lock_irqsave(&io_request_lock, flags); (*workrequ->scsi_done) (workrequ); spin_unlock_irqrestore(&io_request_lock, flags); - curr_req[h][tarid] = 0; - workingu[h]--; - if (wide_idu[h] != 0) { + /* + * Clear it off the queue + */ + dev->id[target_id].curr_req = 0; + dev->working--; + /* + * Take it back wide + */ + if (dev->wide_idu != 0) { tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; j |= 0x01; outb(j, tmport); } - if (((last_cmd[h] != 0xff) || (quhdu[h] != quendu[h])) && - (in_snd[h] == 0)) + /* + * If there is stuff to send and nothing going then send it + */ + if (((dev->last_cmd != 0xff) || (dev->quhdu != dev->quendu)) && + (dev->in_snd == 0)) { send_s870(h); } - in_int[h] = 0; + dev->in_int = 0; return; } if (i == 0x4f) { @@ -319,23 +386,23 @@ i &= 0x0f; if (i == 0x09) { tmpcip = tmpcip + 4; - outl(prdaddru[h][tarid], tmpcip); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; tmport = workportu + 0x10; outb(0x41, tmport); - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x08; outb(0x08, tmport); outb(0x09, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } if (i == 0x08) { tmpcip = tmpcip + 4; - outl(prdaddru[h][tarid], tmpcip); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); @@ -344,11 +411,11 @@ outb(0x41, tmport); tmport += 0x05; outb((unsigned char) (inb(tmport) | 0x20), tmport); - dirctu[h][tarid] = 0x20; + dev->id[target_id].dirctu = 0x20; tmport += 0x03; outb(0x08, tmport); outb(0x01, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } tmport -= 0x07; @@ -357,20 +424,20 @@ } else { outb(0x46, tmport); } - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } else { tmport = workportu + 0x17; inb(tmport); - workingu[h] = 0; - in_int[h] = 0; + dev->working = 0; + dev->in_int = 0; return; } } @@ -381,6 +448,7 @@ unsigned long flags; unsigned short int m; unsigned int tmport; + struct atp_unit *dev; for (h = 0; h <= admaxu; h++) { if (req_p->host == atp_host[h]) { @@ -394,9 +462,15 @@ done(req_p); return 0; } + dev = &atp_unit[h]; m = 1; m = m << req_p->target; - if ((m & active_idu[h]) == 0) { + + /* + * Fake a timeout for missing targets + */ + + if ((m & dev->active_idu) == 0) { req_p->result = 0x00040000; done(req_p); return 0; @@ -404,30 +478,36 @@ if (done) { req_p->scsi_done = done; } else { - printk("atp870u_queuecommand: done can't be NULL\n"); + printk(KERN_WARNING "atp870u_queuecommand: done can't be NULL\n"); req_p->result = 0; done(req_p); return 0; } - quendu[h]++; - if (quendu[h] >= qcnt) { - quendu[h] = 0; + /* + * Count new command + */ + dev->quendu++; + if (dev->quendu >= qcnt) { + dev->quendu = 0; } + /* + * Check queue state + */ wait_que_empty: - if (quhdu[h] == quendu[h]) { + if (dev->quhdu == dev->quendu) { goto wait_que_empty; } save_flags(flags); cli(); - querequ[h][quendu[h]] = req_p; - if (quendu[h] == 0) { + dev->querequ[dev->quendu] = req_p; + if (dev->quendu == 0) { i = qcnt - 1; } else { - i = quendu[h] - 1; + i = dev->quendu - 1; } - tmport = ioportu[h] + 0x1c; + tmport = dev->ioport + 0x1c; restore_flags(flags); - if ((inb(tmport) == 0) && (in_int[h] == 0) && (in_snd[h] == 0)) { + if ((inb(tmport) == 0) && (dev->in_int == 0) && (dev->in_snd == 0)) { send_s870(h); } return 0; @@ -447,44 +527,45 @@ Scsi_Cmnd *workrequ; unsigned long flags; unsigned int i; - unsigned char j, tarid; + unsigned char j, target_id; unsigned char *prd; unsigned short int tmpcip, w; unsigned long l, bttl; unsigned int workportu; struct scatterlist *sgpnt; + struct atp_unit *dev = &atp_unit[h]; save_flags(flags); cli(); - if (in_snd[h] != 0) { + if (dev->in_snd != 0) { restore_flags(flags); return; } - in_snd[h] = 1; - if ((last_cmd[h] != 0xff) && ((last_cmd[h] & 0x40) != 0)) { - last_cmd[h] &= 0x0f; - workrequ = curr_req[h][last_cmd[h]]; + dev->in_snd = 1; + if ((dev->last_cmd != 0xff) && ((dev->last_cmd & 0x40) != 0)) { + dev->last_cmd &= 0x0f; + workrequ = dev->id[dev->last_cmd].curr_req; goto cmd_subp; } - workingu[h]++; - j = quhdu[h]; - quhdu[h]++; - if (quhdu[h] >= qcnt) { - quhdu[h] = 0; - } - workrequ = querequ[h][quhdu[h]]; - if (curr_req[h][workrequ->target] == 0) { - curr_req[h][workrequ->target] = workrequ; - last_cmd[h] = workrequ->target; + dev->working++; + j = dev->quhdu; + dev->quhdu++; + if (dev->quhdu >= qcnt) { + dev->quhdu = 0; + } + workrequ = dev->querequ[dev->quhdu]; + if (dev->id[workrequ->target].curr_req == 0) { + dev->id[workrequ->target].curr_req = workrequ; + dev->last_cmd = workrequ->target; goto cmd_subp; } - quhdu[h] = j; - workingu[h]--; - in_snd[h] = 0; + dev->quhdu = j; + dev->working--; + dev->in_snd = 0; restore_flags(flags); return; cmd_subp: - workportu = ioportu[h]; + workportu = dev->ioport; tmport = workportu + 0x1f; if ((inb(tmport) & 0xb0) != 0) { goto abortsnd; @@ -494,43 +575,63 @@ goto oktosend; } abortsnd: - last_cmd[h] |= 0x40; - in_snd[h] = 0; + dev->last_cmd |= 0x40; + dev->in_snd = 0; restore_flags(flags); return; oktosend: - memcpy(&ata_cdbu[h][0], &workrequ->cmnd[0], workrequ->cmd_len); - if (ata_cdbu[h][0] == 0x25) { + memcpy(&dev->ata_cdbu[0], &workrequ->cmnd[0], workrequ->cmd_len); + if (dev->ata_cdbu[0] == READ_CAPACITY) { if (workrequ->request_bufflen > 8) { workrequ->request_bufflen = 0x08; } } - if (ata_cdbu[h][0] == 0x12) { + /* + * Why limit this ???? + */ + if (dev->ata_cdbu[0] == INQUIRY) { if (workrequ->request_bufflen > 0x24) { workrequ->request_bufflen = 0x24; - ata_cdbu[h][4] = 0x24; + dev->ata_cdbu[4] = 0x24; } } + tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; - tarid = workrequ->target; + target_id = workrequ->target; + + /* + * Wide ? + */ w = 1; - w = w << tarid; - if ((w & wide_idu[h]) != 0) { + w = w << target_id; + if ((w & dev->wide_idu) != 0) { j |= 0x01; - } + } outb(j, tmport); + + /* + * Write the command + */ + tmport = workportu; outb(workrequ->cmd_len, tmport++); outb(0x2c, tmport++); outb(0xcf, tmport++); for (i = 0; i < workrequ->cmd_len; i++) { - outb(ata_cdbu[h][i], tmport++); + outb(dev->ata_cdbu[i], tmport++); } tmport = workportu + 0x0f; - outb(0x00, tmport); + outb(workrequ->lun, tmport); tmport += 0x02; - outb(devspu[h][tarid], tmport++); + /* + * Write the target + */ + outb(dev->id[target_id].devspu, tmport++); + + /* + * Figure out the transfer size + */ if (workrequ->use_sg) { l = 0; @@ -546,38 +647,54 @@ } else { l = workrequ->request_bufflen; } + /* + * Write transfer size + */ outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++); - j = tarid; - last_lenu[h][j] = l; - tran_lenu[h][j] = 0; + j = target_id; + dev->id[j].last_lenu = l; + dev->id[j].tran_lenu = 0; + /* + * Flip the wide bits + */ if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } - if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) || - (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) { + /* + * Check transfer direction + */ + if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) || + (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) { outb((unsigned char) (j | 0x20), tmport++); } else { outb(j, tmport++); } + outb((unsigned char)(inb(tmport) | 0x80),tmport); outb(0x80, tmport); tmport = workportu + 0x1c; - dirctu[h][tarid] = 0; + dev->id[target_id].dirctu = 0; if (l == 0) { if (inb(tmport) == 0) { tmport = workportu + 0x18; outb(0x08, tmport); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; } - tmpcip = pciportu[h]; - prd = &prd_tableu[h][tarid][0]; - prd_posu[h][tarid] = prd; + tmpcip = dev->pciport; + prd = dev->id[target_id].prd_tableu; + dev->id[target_id].prd_posu = prd; + + /* + * Now write the request list. Either as scatter/gather or as + * a linear chain. + */ + if (workrequ->use_sg) { sgpnt = (struct scatterlist *) workrequ->request_buffer; @@ -590,6 +707,9 @@ } (unsigned short int) (((unsigned short int *) (prd))[i - 1]) = 0x8000; } else { + /* + * For a linear request write a chain of blocks + */ bttl = virt_to_bus(workrequ->request_buffer); l = workrequ->request_bufflen; i = 0; @@ -606,24 +726,24 @@ (unsigned long) (((unsigned long *) (prd))[i >> 1]) = bttl; } tmpcip = tmpcip + 4; - prdaddru[h][tarid] = virt_to_bus(&prd_tableu[h][tarid][0]); - outl(prdaddru[h][tarid], tmpcip); + dev->id[target_id].prdaddru = virt_to_bus(dev->id[target_id].prd_tableu); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; - if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) || - (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) + if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) || + (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) { - dirctu[h][tarid] = 0x20; + dev->id[target_id].dirctu = 0x20; if (inb(tmport) == 0) { tmport = workportu + 0x18; outb(0x08, tmport); outb(0x01, tmpcip); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; } @@ -633,9 +753,9 @@ outb(0x08, tmport); outb(0x09, tmpcip); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; @@ -657,13 +777,13 @@ return SCpnt->result; } -unsigned char fun_scam(unsigned char host, unsigned short int *val) +unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { unsigned int tmport; unsigned short int i, k; unsigned char j; - tmport = ioportu[host] + 0x1c; + tmport = dev->ioport + 0x1c; outw(*val, tmport); FUN_D7: for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ @@ -706,32 +826,34 @@ unsigned long n; unsigned short int m, assignid_map, val; unsigned char mbuf[33], quintet[2]; - static unsigned char g2q_tab[8] = - {0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27}; + struct atp_unit *dev = &atp_unit[host]; + static unsigned char g2q_tab[8] = { + 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 + }; for (i = 0; i < 0x10; i++) { mydlyu(0xffff); } - tmport = ioportu[host] + 1; + tmport = dev->ioport + 1; outb(0x08, tmport++); outb(0x7f, tmport); - tmport = ioportu[host] + 0x11; + tmport = dev->ioport + 0x11; outb(0x20, tmport); - if ((scam_on[host] & 0x40) == 0) { + if ((dev->scam_on & 0x40) == 0) { return; } m = 1; - m <<= host_idu[host]; + m <<= dev->host_idu; j = 16; - if (chip_veru[host] < 4) { + if (dev->chip_veru < 4) { m |= 0xff00; j = 8; } assignid_map = m; - tmport = ioportu[host] + 0x02; + tmport = dev->ioport + 0x02; outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ outb(0, tmport++); outb(0, tmport++); @@ -746,7 +868,7 @@ if ((m & assignid_map) != 0) { continue; } - tmport = ioportu[host] + 0x0f; + tmport = dev->ioport + 0x0f; outb(0, tmport++); tmport += 0x02; outb(0, tmport++); @@ -758,14 +880,14 @@ k = i; } outb(k, tmport++); - tmport = ioportu[host] + 0x1b; - if (chip_veru[host] == 4) { + tmport = dev->ioport + 0x1b; + if (dev->chip_veru == 4) { outb((unsigned char) ((inb(tmport) & 0x0e) | 0x01), tmport); } else { outb((unsigned char) (inb(tmport) & 0x0e), tmport); } wait_rdyok: - tmport = ioportu[host] + 0x18; + tmport = dev->ioport + 0x18; outb(0x09, tmport); tmport += 0x07; @@ -776,22 +898,22 @@ if ((k == 0x85) || (k == 0x42)) { continue; } - tmport = ioportu[host] + 0x10; + tmport = dev->ioport + 0x10; outb(0x41, tmport); goto wait_rdyok; } assignid_map |= m; } - tmport = ioportu[host] + 0x02; + tmport = dev->ioport + 0x02; outb(0x7f, tmport); - tmport = ioportu[host] + 0x1b; + tmport = dev->ioport + 0x1b; outb(0x02, tmport); outb(0, 0x80); val = 0x0080; /* bsy */ - tmport = ioportu[host] + 0x1c; + tmport = dev->ioport + 0x1c; outw(val, tmport); val |= 0x0040; /* sel */ outw(val, tmport); @@ -836,7 +958,7 @@ if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ outw(0, tmport--); outb(0, tmport); - tmport = ioportu[host] + 0x15; + tmport = dev->ioport + 0x15; outb(0, tmport); tmport += 0x03; outb(0x09, tmport); @@ -848,11 +970,11 @@ } val &= 0x00ff; /* synchronization */ val |= 0x3f00; - fun_scam(host, &val); + fun_scam(dev, &val); outb(3, 0x80); val &= 0x00ff; /* isolation */ val |= 0x2000; - fun_scam(host, &val); + fun_scam(dev, &val); outb(4, 0x80); i = 8; j = 0; @@ -863,7 +985,7 @@ outb(5, 0x80); val &= 0x00ff; /* get ID_STRING */ val |= 0x2000; - k = fun_scam(host, &val); + k = fun_scam(dev, &val); if ((k & 0x03) == 0) { goto TCM_5; } @@ -927,11 +1049,11 @@ val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ m = quintet[0] << 8; val |= m; - fun_scam(host, &val); + fun_scam(dev, &val); val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ m = quintet[1] << 8; val |= m; - fun_scam(host, &val); + fun_scam(dev, &val); goto TCM_SYNC; @@ -949,25 +1071,26 @@ static unsigned char synu[6] = {0x80, 1, 3, 1, 0x0c, 0x0e}; static unsigned char synw[6] = {0x80, 1, 3, 1, 0x0c, 0x07}; static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0}; + struct atp_unit *dev = &atp_unit[host]; sync_idu = 0; tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) | 0x10), tmport); for (i = 0; i < 16; i++) { - if ((chip_veru[host] != 4) && (i > 7)) { + if ((dev->chip_veru != 4) && (i > 7)) { break; } m = 1; m = m << i; - if ((m & active_idu[host]) != 0) { + if ((m & dev->active_idu) != 0) { continue; } - if (i == host_idu[host]) { - printk(" ID: %2d Host Adapter\n", host_idu[host]); + if (i == dev->host_idu) { + printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_idu); continue; } - if (chip_veru[host] == 4) { + if (dev->chip_veru == 4) { tmport = wkport + 0x1b; j = (inb(tmport) & 0x0e) | 0x01; outb(j, tmport); @@ -984,7 +1107,7 @@ tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1003,7 +1126,7 @@ continue; } while (inb(tmport) != 0x8e); - active_idu[host] |= m; + dev->active_idu |= m; tmport = wkport + 0x10; outb(0x30, tmport); @@ -1033,7 +1156,7 @@ tmport += 0x07; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(inqd[6], tmport++); outb(inqd[7], tmport++); @@ -1046,7 +1169,7 @@ continue; } while (inb(tmport) != 0x8e); - if (chip_veru[host] == 4) { + if (dev->chip_veru == 4) { tmport = wkport + 0x1b; j = inb(tmport) & 0x0e; outb(j, tmport); @@ -1087,16 +1210,16 @@ } inq_ok: mbuf[36] = 0; - printk(" ID: %2d %s\n", i, &mbuf[8]); - devtypeu[host][i] = mbuf[0]; + printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); + dev->id[i].devtypeu = mbuf[0]; rmb = mbuf[1]; - if (chip_veru[host] != 4) { + if (dev->chip_veru != 4) { goto not_wide; } if ((mbuf[7] & 0x60) == 0) { goto not_wide; } - if ((global_map[host] & 0x20) == 0) { + if ((dev->global_map & 0x20) == 0) { goto not_wide; } tmport = wkport + 0x1b; @@ -1112,7 +1235,7 @@ tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1238,16 +1361,16 @@ } m = 1; m = m << i; - wide_idu[host] |= m; + dev->wide_idu |= m; not_wide: - if ((devtypeu[host][i] == 0x00) || (devtypeu[host][i] == 0x07)) { + if ((dev->id[i].devtypeu == 0x00) || (dev->id[i].devtypeu == 0x07)) { goto set_sync; } continue; set_sync: tmport = wkport + 0x1b; j = inb(tmport) & 0x0e; - if ((m & wide_idu[host]) != 0) { + if ((m & dev->wide_idu) != 0) { j |= 0x01; } outb(j, tmport); @@ -1261,7 +1384,7 @@ tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1289,10 +1412,10 @@ if (rmb != 0) { outb(synn[j++], tmport); } else { - if ((m & wide_idu[host]) != 0) { + if ((m & dev->wide_idu) != 0) { outb(synw[j++], tmport); } else { - if ((m & ultra_map[host]) != 0) { + if ((m & dev->ultra_map) != 0) { outb(synu[j++], tmport); } else { outb(synn[j++], tmport); @@ -1407,7 +1530,7 @@ if (mbuf[4] > 0x0c) { mbuf[4] = 0x0c; } - devspu[host][i] = mbuf[4]; + dev->id[i].devspu = mbuf[4]; if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; @@ -1426,7 +1549,7 @@ } j = 0x60; set_syn_ok: - devspu[host][i] = (devspu[host][i] & 0x0f) | j; + dev->id[i].devspu = (dev->id[i].devspu & 0x0f) | j; } tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) & 0xef), tmport); @@ -1439,124 +1562,124 @@ unsigned long flags; unsigned int base_io, error, tmport; unsigned short index = 0; - unsigned char pci_bus[3], pci_device_fn[3], chip_ver[3], host_id; + struct pci_dev *pdev[3]; + unsigned char chip_ver[3], host_id; struct Scsi_Host *shpnt = NULL; + int tmpcnt = 0; int count = 0; - static unsigned short devid[7] = - {0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0}; - static struct pci_dev *pdev = NULL, *acard_pdev[3]; + int result; + + static unsigned short devid[7] = { + 0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0 + }; - printk("aec671x_detect: \n"); + printk(KERN_INFO "aec671x_detect: \n"); if (!pci_present()) { - printk(" NO BIOS32 SUPPORT.\n"); + printk(KERN_INFO" NO PCI SUPPORT.\n"); return count; } tpnt->proc_name = "atp870u"; for (h = 0; h < 2; h++) { - active_idu[h] = 0; - wide_idu[h] = 0; - host_idu[h] = 0x07; - quhdu[h] = 0; - quendu[h] = 0; - pci_bus[h] = 0; - pci_device_fn[h] = 0xff; - chip_ver[h] = 0; - last_cmd[h] = 0xff; - in_snd[h] = 0; - in_int[h] = 0; + struct atp_unit *dev = &atp_unit[h]; + for(k=0;k<16;k++) + { + dev->id[k].prd_tableu = kmalloc(1024, GFP_KERNEL); + dev->id[k].devspu=0x20; + dev->id[k].devtypeu = 0; + dev->id[k].curr_req = NULL; + } + dev->active_idu = 0; + dev->wide_idu = 0; + dev->host_idu = 0x07; + dev->quhdu = 0; + dev->quendu = 0; + pdev[h]=NULL; + pdev[2]=NULL; + dev->chip_veru = 0; + dev->last_cmd = 0xff; + dev->in_snd = 0; + dev->in_int = 0; for (k = 0; k < qcnt; k++) { - querequ[h][k] = 0; + dev->querequ[k] = 0; } for (k = 0; k < 16; k++) { - curr_req[h][k] = 0; + dev->id[k].curr_req = 0; } } h = 0; while (devid[h] != 0) { - pdev = pci_find_device(0x1191, devid[h], pdev); - if (pdev == NULL) { + pdev[2] = pci_find_device(0x1191, devid[h], pdev[2]); + if (pdev[2] == NULL) { h++; index = 0; continue; } chip_ver[2] = 0; - /* To avoid messing with the things below... */ - acard_pdev[2] = pdev; - pci_device_fn[2] = pdev->devfn; - pci_bus[2] = pdev->bus->number; - if (devid[h] == 0x8002) { - error = pci_read_config_byte(pdev, 0x08, &chip_ver[2]); + error = pci_read_config_byte(pdev[2], 0x08, &chip_ver[2]); if (chip_ver[2] < 2) { goto nxt_devfn; } } - if (devid[h] == 0x8010) { + if (devid[h] == 0x8010 || devid[h] == 0x8050) { chip_ver[2] = 0x04; } - if (pci_device_fn[2] < pci_device_fn[0]) { - acard_pdev[1] = acard_pdev[0]; - pci_bus[1] = pci_bus[0]; - pci_device_fn[1] = pci_device_fn[0]; - chip_ver[1] = chip_ver[0]; - acard_pdev[0] = acard_pdev[2]; - pci_bus[0] = pci_bus[2]; - pci_device_fn[0] = pci_device_fn[2]; - chip_ver[0] = chip_ver[2]; - } else if (pci_device_fn[2] < pci_device_fn[1]) { - acard_pdev[1] = acard_pdev[2]; - pci_bus[1] = pci_bus[2]; - pci_device_fn[1] = pci_device_fn[2]; - chip_ver[1] = chip_ver[2]; - } + pdev[tmpcnt] = pdev[2]; + chip_ver[tmpcnt] = chip_ver[2]; + tmpcnt++; nxt_devfn: index++; if (index > 3) { index = 0; h++; } + if(tmpcnt>1) + break; } for (h = 0; h < 2; h++) { - if (pci_device_fn[h] == 0xff) { + struct atp_unit *dev=&atp_unit[h]; + if (pdev[h]==NULL) { return count; } - pdev = acard_pdev[h]; - pdev->devfn = pci_device_fn[h]; - pdev->bus->number = pci_bus[h]; /* Found an atp870u/w. */ - error = pci_read_config_dword(pdev, 0x10, &base_io); - error += pci_read_config_byte(pdev, 0x3c, &irq); - error += pci_read_config_byte(pdev, 0x49, &host_id); + base_io = pdev[h]->resource[0].start; + irq = pdev[h]->irq; + error = pci_read_config_byte(pdev[h],0x49,&host_id); base_io &= 0xfffffff8; - printk(" ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n" + + if (check_region(base_io,0x40) != 0) + { + return 0; + } + printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n" ,h, base_io, irq); - ioportu[h] = base_io; - pciportu[h] = base_io + 0x20; + dev->ioport = base_io; + dev->pciport = base_io + 0x20; irqnumu[h] = irq; host_id &= 0x07; - host_idu[h] = host_id; - chip_veru[h] = chip_ver[h]; + dev->host_idu = host_id; + dev->chip_veru = chip_ver[h]; tmport = base_io + 0x22; - scam_on[h] = inb(tmport); + dev->scam_on = inb(tmport); tmport += 0x0b; - global_map[h] = inb(tmport++); - ultra_map[h] = inw(tmport); - if (ultra_map[h] == 0) { - scam_on[h] = 0x00; - global_map[h] = 0x20; - ultra_map[h] = 0xffff; + dev->global_map = inb(tmport++); + dev->ultra_map = inw(tmport); + if (dev->ultra_map == 0) { + dev->scam_on = 0x00; + dev->global_map = 0x20; + dev->ultra_map = 0xffff; } shpnt = scsi_register(tpnt, 4); save_flags(flags); cli(); - if (request_irq(irq, atp870u_intr_handle, 0, "atp870u", NULL)) { - printk("Unable to allocate IRQ for Acard controller.\n"); + if (request_irq(irq, atp870u_intr_handle, SA_SHIRQ, "atp870u", dev)) { + printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n"); goto unregister; } tmport = base_io + 0x3a; @@ -1584,9 +1707,11 @@ is870(h, base_io); tmport = base_io + 0x3a; outb((inb(tmport) & 0xef), tmport); + tmport++; + outb((inb(tmport) | 0x20),tmport); atp_host[h] = shpnt; - if (chip_ver[h] == 4) { + if (dev->chip_veru == 4) { shpnt->max_id = 16; } shpnt->this_id = host_id; @@ -1617,7 +1742,7 @@ { unsigned char h, j; unsigned int tmport; -/* printk(" atp870u_abort: \n"); */ + struct atp_unit *dev; for (h = 0; h <= admaxu; h++) { if (SCpnt->host == atp_host[h]) { goto find_adp; @@ -1625,20 +1750,23 @@ } panic("Abort host not found !"); find_adp: - printk(" workingu=%x last_cmd=%x ", workingu[h], last_cmd[h]); - printk(" quhdu=%x quendu=%x ", quhdu[h], quendu[h]); - tmport = ioportu[h]; + dev=&atp_unit[h]; + printk(KERN_DEBUG "working=%x last_cmd=%x ", dev->working, dev->last_cmd); + printk(" quhdu=%x quendu=%x ", dev->quhdu, dev->quendu); + tmport = dev->ioport; for (j = 0; j < 0x17; j++) { printk(" r%2x=%2x", j, inb(tmport++)); } tmport += 0x05; printk(" r1c=%2x", inb(tmport)); tmport += 0x03; - printk(" r1f=%2x in_snd=%2x ", inb(tmport), in_snd[h]); + printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd); tmport++; printk(" r20=%2x", inb(tmport)); tmport += 0x02; - printk(" r22=%2x \n", inb(tmport)); + printk(" r22=%2x", inb(tmport)); + tmport += 0x18; + printk(" r3a=%2x \n",inb(tmport)); return (SCSI_ABORT_SNOOZE); } @@ -1648,7 +1776,6 @@ /* * See if a bus reset was suggested. */ -/* printk("atp870u_reset: \n"); */ for (h = 0; h <= admaxu; h++) { if (SCpnt->host == atp_host[h]) { goto find_host; @@ -1658,9 +1785,9 @@ find_host: /* SCpnt->result = 0x00080000; SCpnt->scsi_done(SCpnt); - workingu[h]=0; - quhdu[h]=0; - quendu[h]=0; + dev->working=0; + dev->quhdu=0; + dev->quendu=0; return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET); */ return (SCSI_RESET_SNOOZE); } @@ -1669,14 +1796,14 @@ { static char buffer[128]; - strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V1.0 "); + strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V2.0+ac "); return buffer; } int atp870u_set_info(char *buffer, int length, struct Scsi_Host *HBAptr) { - return (-ENOSYS); /* Currently this is a no-op */ + return -ENOSYS; /* Currently this is a no-op */ } #define BLS buffer + len + size @@ -1714,7 +1841,7 @@ if (offset == 0) { memset(buff, 0, sizeof(buff)); } - size += sprintf(BLS, "ACARD AEC-671X Driver Version: 1.0\n"); + size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.0+ac\n"); len += size; pos = begin + len; size = 0; @@ -1727,7 +1854,7 @@ pos = begin + len; size = 0; - stop_output: +stop_output: *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); /* Start slop */ if (len > length) { @@ -1756,6 +1883,26 @@ ip[2] = cylinders; return 0; +} + + +int atp870u_release (struct Scsi_Host *pshost) +{ + int h; + for (h = 0; h <= admaxu; h++) + { + if (pshost == atp_host[h]) { + int k; + free_irq (pshost->irq, &atp_unit[h]); + release_region (pshost->io_port, pshost->n_io_port); + scsi_unregister(pshost); + for(k=0;k<16;k++) + kfree(atp_unit[h].id[k].prd_tableu); + return 0; + } + } + panic("atp870u: bad scsi host passed.\n"); + } #ifdef MODULE diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/atp870u.h linux/drivers/scsi/atp870u.h --- v2.3.31/linux/drivers/scsi/atp870u.h Thu Nov 11 20:11:45 1999 +++ linux/drivers/scsi/atp870u.h Sun Dec 12 23:02:23 1999 @@ -24,10 +24,11 @@ int atp870u_abort(Scsi_Cmnd *); int atp870u_reset(Scsi_Cmnd *, unsigned int); int atp870u_biosparam(Disk *, kdev_t, int *); +int atp870u_release(struct Scsi_Host *); void send_s870(unsigned char); -#define qcnt 32 -#define ATP870U_SCATTER 127 +#define qcnt 32 +#define ATP870U_SCATTER 128 #define ATP870U_CMDLUN 1 #ifndef NULL @@ -38,31 +39,33 @@ extern int atp870u_proc_info(char *, char **, off_t, int, int, int); -#define ATP870U { \ - proc_name: "atp870u", \ - proc_info: atp870u_proc_info, \ - name: NULL, \ - detect: atp870u_detect, \ - release: NULL, \ - info: atp870u_info, \ - command: atp870u_command, \ - queuecommand: atp870u_queuecommand, \ - eh_strategy_handler: NULL, \ - eh_abort_handler: NULL, \ - eh_device_reset_handler: NULL, \ - eh_bus_reset_handler: NULL, \ - eh_host_reset_handler: NULL, \ - abort: atp870u_abort, \ - reset: atp870u_reset, \ - slave_attach: NULL, \ - bios_param: atp870u_biosparam, \ - can_queue: qcnt, \ - this_id: 1, \ - sg_tablesize: ATP870U_SCATTER, \ - cmd_per_lun: ATP870U_CMDLUN, \ - present: 0, \ - unchecked_isa_dma: 0, \ - use_clustering: ENABLE_CLUSTERING, \ - use_new_eh_code: 0 \ +#define ATP870U { \ + next: NULL, \ + module: NULL, \ + proc_info: atp870u_proc_info, \ + name: NULL, \ + detect: atp870u_detect, \ + release: atp870u_release, \ + info: atp870u_info, \ + command: atp870u_command, \ + queuecommand: atp870u_queuecommand, \ + eh_strategy_handler: NULL, \ + eh_abort_handler: NULL, \ + eh_device_reset_handler: NULL, \ + eh_bus_reset_handler: NULL, \ + eh_host_reset_handler: NULL, \ + abort: atp870u_abort, \ + reset: atp870u_reset, \ + slave_attach: NULL, \ + bios_param: atp870u_biosparam, \ + can_queue: qcnt, /* max simultaneous cmds */\ + this_id: 7, /* scsi id of host adapter */\ + sg_tablesize: ATP870U_SCATTER, /* max scatter-gather cmds */\ + cmd_per_lun: ATP870U_CMDLUN, /* cmds per lun (linked cmds) */\ + present: 0, /* number of 7xxx's present */\ + unchecked_isa_dma: 0, /* no memory DMA restrictions */\ + use_clustering: ENABLE_CLUSTERING, \ + use_new_eh_code: 0 \ } + #endif diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/eata.c linux/drivers/scsi/eata.c --- v2.3.31/linux/drivers/scsi/eata.c Thu Nov 11 20:11:46 1999 +++ linux/drivers/scsi/eata.c Sun Dec 12 23:04:20 1999 @@ -1049,7 +1049,7 @@ sh[j]->unchecked_isa_dma = FALSE; else { unsigned long flags; - sh[j]->wish_block = TRUE; +//FIXME// sh[j]->wish_block = TRUE; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/eata_dma.c linux/drivers/scsi/eata_dma.c --- v2.3.31/linux/drivers/scsi/eata_dma.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/eata_dma.c Sun Dec 12 23:04:20 1999 @@ -1297,7 +1297,7 @@ else hd->primary = TRUE; - sh->wish_block = FALSE; +//FIXME// sh->wish_block = FALSE; if (hd->bustype != IS_ISA) { sh->unchecked_isa_dma = FALSE; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/g_NCR5380.c linux/drivers/scsi/g_NCR5380.c --- v2.3.31/linux/drivers/scsi/g_NCR5380.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/g_NCR5380.c Sun Dec 12 23:02:23 1999 @@ -16,6 +16,9 @@ * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl * + * Added ISAPNP support for DTC436 adapters, + * Thomas Sailer, sailer@ife.ee.ethz.ch + * * ALPHA RELEASE 1. * * For more information, please consult @@ -117,7 +120,8 @@ #include "sd.h" #include #include -#include +#include +#include #define NCR_NOT_SET 0 static int ncr_irq=NCR_NOT_SET; @@ -280,6 +284,36 @@ else if (dtc_3181e != NCR_NOT_SET) overrides[0].board=BOARD_DTC3181E; + if (!current_override && isapnp_present()) { + struct pci_dev *dev = NULL; + count = 0; + while ((dev = isapnp_find_dev(NULL, ISAPNP_VENDOR('D','T','C'), ISAPNP_FUNCTION(0x436e), dev))) { + if (count >= NO_OVERRIDES) + break; + if (!dev->active && dev->prepare(dev) < 0) { + printk(KERN_ERR "dtc436e probe: prepare failed\n"); + continue; + } + if (!(dev->resource[0].flags & IORESOURCE_IO)) + continue; + if (!dev->active && dev->activate(dev) < 0) { + printk(KERN_ERR "dtc436e probe: activate failed\n"); + continue; + } + if (dev->irq_resource[0].flags & IORESOURCE_IRQ) + overrides[count].irq=dev->irq_resource[0].start; + else + overrides[count].irq=IRQ_NONE; + if (dev->dma_resource[0].flags & IORESOURCE_DMA) + overrides[count].dma=dev->dma_resource[0].start; + else + overrides[count].dma=DMA_NONE; + overrides[count].NCR5380_map_name=(NCR5380_map_type)dev->resource[0].start; + overrides[count].board=BOARD_DTC3181E; + count++; + } + } + tpnt->proc_name = "g_NCR5380"; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { @@ -304,6 +338,7 @@ break; } +#ifdef CONFIG_SCSI_G_NCR5380_PORT if (ports) { /* wakeup sequence for the NCR53C400A and DTC3181E*/ @@ -343,7 +378,13 @@ request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380"); - +#else + if(check_mem_region(overrides[current_override].NCR5380_map_name, + NCR5380_region_size)) + continue; + request_mem_region(overrides[current_override].NCR5380_map_name, + NCR5380_region_size, "ncr5380"); +#endif instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name; @@ -393,7 +434,11 @@ NCR5380_setup(instance); +#ifdef CONFIG_SCSI_G_NCR5380_PORT release_region(instance->NCR5380_instance_name, NCR5380_region_size); +#else + release_mem_region(instance->NCR5380_instance_name, NCR5380_region_size); +#endif if (instance->irq != IRQ_NONE) free_irq(instance->irq, NULL); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/g_NCR5380.h linux/drivers/scsi/g_NCR5380.h --- v2.3.31/linux/drivers/scsi/g_NCR5380.h Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/g_NCR5380.h Wed Dec 8 15:17:55 1999 @@ -123,7 +123,7 @@ #define NCR5380_map_config memory -#define NCR5380_map_type volatile unsigned char* +#define NCR5380_map_type unsigned long #define NCR5380_map_name base diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/gdth_proc.c linux/drivers/scsi/gdth_proc.c --- v2.3.31/linux/drivers/scsi/gdth_proc.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/gdth_proc.c Sun Dec 12 23:04:20 1999 @@ -934,10 +934,8 @@ scp->request.rq_status = RQ_SCSI_BUSY; scp->request.sem = &sem; scp->SCp.this_residual = IOCTL_PRI; - GDTH_LOCK_SCSI_DOCMD(); scsi_do_cmd(scp, cmnd, gdtcmd, sizeof(gdth_cmd_str), gdth_scsi_done, timeout*HZ, 1); - GDTH_UNLOCK_SCSI_DOCMD(); down(&sem); } diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/hosts.c linux/drivers/scsi/hosts.c --- v2.3.31/linux/drivers/scsi/hosts.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/hosts.c Sun Dec 12 23:04:20 1999 @@ -696,8 +696,6 @@ atomic_set(&retval->host_active,0); retval->host_busy = 0; retval->host_failed = 0; - retval->block = NULL; - retval->wish_block = 0; if(j > 0xffff) panic("Too many extra bytes requested\n"); retval->extra_bytes = j; retval->loaded_as_module = scsi_loadable_module_flag; @@ -723,11 +721,8 @@ retval->ehandler = NULL; /* Initial value until the thing starts up. */ retval->eh_notify = NULL; /* Who we notify when we exit. */ - /* - * Initialize the fields used for mid-level queueing. - */ - retval->pending_commands = NULL; - retval->host_busy = FALSE; + + retval->host_blocked = FALSE; #ifdef DEBUG printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j); @@ -783,6 +778,7 @@ kernel_thread((int (*)(void *))scsi_error_handler, (void *) shpnt, 0); + /* * Now wait for the kernel error thread to initialize itself * as it might be needed when we scan the bus. @@ -873,7 +869,6 @@ printk ("scsi : %d host%s.\n", next_scsi_host, (next_scsi_host == 1) ? "" : "s"); - scsi_make_blocked_list(); /* Now attach the high level drivers */ #ifdef CONFIG_BLK_DEV_SD diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/hosts.h linux/drivers/scsi/hosts.h --- v2.3.31/linux/drivers/scsi/hosts.h Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/hosts.h Tue Dec 14 00:56:43 1999 @@ -1,6 +1,6 @@ /* * hosts.h Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale * * mid to low-level SCSI driver interface header * Initial versions: Drew Eckhardt @@ -8,7 +8,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * @@ -301,13 +301,7 @@ */ struct Scsi_Host * next; Scsi_Device * host_queue; - /* - * List of commands that have been rejected because either the host - * or the device was busy. These need to be retried relatively quickly, - * but we need to hold onto it for a short period until the host/device - * is available. - */ - Scsi_Cmnd * pending_commands; + struct task_struct * ehandler; /* Error recovery thread. */ struct semaphore * eh_wait; /* The error recovery thread waits on @@ -340,13 +334,6 @@ unsigned int max_lun; unsigned int max_channel; - /* - * Pointer to a circularly linked list - this indicates the hosts - * that should be locked out of performing I/O while we have an active - * command on this host. - */ - struct Scsi_Host * block; - unsigned wish_block:1; /* These parameters should be set by the detect routine */ unsigned long base; @@ -391,9 +378,14 @@ * Host uses correct SCSI ordering not PC ordering. The bit is * set for the minority of drivers whose authors actually read the spec ;) */ - unsigned reverse_ordering:1; - + + /* + * Indicates that one or more devices on this host were starved, and + * when the device becomes less busy that we need to feed them. + */ + unsigned some_device_starved:1; + void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *); /* @@ -412,7 +404,6 @@ extern void build_proc_dir_entries(Scsi_Host_Template *); - /* * scsi_init initializes the scsi hosts. */ @@ -456,6 +447,7 @@ void (*finish)(void); /* Perform initialization after attachment */ int (*attach)(Scsi_Device *); /* Attach devices to arrays */ void (*detach)(Scsi_Device *); + int (*init_command)(Scsi_Cmnd *); /* Used by new queueing code. */ }; extern struct Scsi_Device_Template sd_template; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/ibmmca.c linux/drivers/scsi/ibmmca.c --- v2.3.31/linux/drivers/scsi/ibmmca.c Thu Nov 11 20:11:47 1999 +++ linux/drivers/scsi/ibmmca.c Sun Dec 12 23:04:20 1999 @@ -747,7 +747,7 @@ static int device_inquiry(int host_index, int ldn) { int retries; - Scsi_Cmnd cmd; + Scsi_Cmnd *cmd; struct im_scb *scb; struct im_tsb *tsb; unsigned char *buf; @@ -757,12 +757,18 @@ buf = (unsigned char *)(&(ld(host_index)[ldn].buf)); ld(host_index)[ldn].tsb.dev_status = 0; /* prepare stusblock */ + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL|GFP_DMA); + if(cmd==NULL) + { + printk(KERN_ERR "ibmmca: out of memory for inquiry.\n"); + return 0; + } if (bypass_controller) { /* fill the commonly known field for device-inquiry SCSI cmnd */ - cmd.cmd_len = 6; - memset (&(cmd.cmnd), 0x0, sizeof(char) * cmd.cmd_len); - cmd.cmnd[0] = INQUIRY; /* device inquiry */ - cmd.cmnd[4] = 0xff; /* return buffer size = 255 */ + cmd->cmd_len = 6; + memset (&(cmd->cmnd), 0x0, sizeof(char) * cmd->cmd_len); + cmd->cmnd[0] = INQUIRY; /* device inquiry */ + cmd->cmnd[4] = 0xff; /* return buffer size = 255 */ } for (retries = 0; retries < 3; retries++) { @@ -770,8 +776,8 @@ { /* bypass the hardware integrated command set */ scb->command = IM_OTHER_SCSI_CMD_CMD; scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT; - scb->u1.scsi_cmd_length = cmd.cmd_len; - memcpy (scb->u2.scsi_command, &(cmd.cmnd), cmd.cmd_len); + scb->u1.scsi_cmd_length = cmd->cmd_len; + memcpy (scb->u2.scsi_command, &(cmd->cmnd), cmd->cmd_len); last_scsi_command(host_index)[ldn] = INQUIRY; last_scsi_type(host_index)[ldn] = IM_SCB; } @@ -800,6 +806,7 @@ return 1; } } + kfree(cmd); /*if all three retries failed, return "no device at this ldn" */ if (retries >= 3) @@ -1406,7 +1413,7 @@ } else if (special == INTEGRATED_SCSI) { /* if the integrated subsystem has been found automatically: */ - len += sprintf (buf + len, "Adapter cathegory: integrated\n"); + len += sprintf (buf + len, "Adapter category: integrated\n"); len += sprintf (buf + len, "Chip revision level: %d\n", ((pos2 & 0xf0) >> 4)); len += sprintf (buf + len, "Chip status: %s\n", @@ -1417,7 +1424,7 @@ else if ((special>=0)&& (special<(sizeof(subsys_list)/sizeof(struct subsys_list_struct)))) { /* if the subsystem is a slot adapter */ - len += sprintf (buf + len, "Adapter cathegory: slot-card\n"); + len += sprintf (buf + len, "Adapter category: slot-card\n"); len += sprintf (buf + len, "Chip revision level: %d\n", ((pos2 & 0xf0) >> 4)); len += sprintf (buf + len, "Chip status: %s\n", @@ -1427,14 +1434,14 @@ } else { - len += sprintf (buf + len, "Adapter cathegory: unknown\n"); + len += sprintf (buf + len, "Adapter category: unknown\n"); } /* common subsystem information to write to the slotn file */ len += sprintf (buf + len, "Subsystem PUN: %d\n", shpnt->this_id); len += sprintf (buf + len, "I/O base address range: 0x%x-0x%x", (unsigned int)(shpnt->io_port), (unsigned int)(shpnt->io_port+7)); - /* Now make sure, the bufferlength is devideable by 4 to avoid + /* Now make sure, the bufferlength is divisible by 4 to avoid * paging problems of the buffer. */ while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) ) { @@ -1483,7 +1490,7 @@ ((struct ibmmca_hostdata *)shpnt->hostdata)->_pos3 = 0; ((struct ibmmca_hostdata *)shpnt->hostdata)->_special = FORCED_DETECTION; - mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter"); + mca_set_adapter_name(MCA_INTEGSCSI, "forcibly detected SCSI Adapter"); mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt); mca_mark_as_used(MCA_INTEGSCSI); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/ide-scsi.c linux/drivers/scsi/ide-scsi.c --- v2.3.31/linux/drivers/scsi/ide-scsi.c Thu Nov 18 20:25:37 1999 +++ linux/drivers/scsi/ide-scsi.c Wed Dec 8 15:17:55 1999 @@ -602,11 +602,14 @@ { struct Scsi_Host *host; int id; + int last_lun = 0; host_template->proc_name = "ide-scsi"; host = scsi_register(host_template, 0); - for (id = 0; id < MAX_HWIFS * MAX_DRIVES && idescsi_drives[id]; id++); + for (id = 0; id < MAX_HWIFS * MAX_DRIVES && idescsi_drives[id]; id++) + last_lun = IDE_MAX(last_lun, idescsi_drives[id]->last_lun); host->max_id = id; + host->max_lun = last_lun + 1; host->can_queue = host->cmd_per_lun * id; return 1; } diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/inia100.c linux/drivers/scsi/inia100.c --- v2.3.31/linux/drivers/scsi/inia100.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/scsi/inia100.c Sun Dec 12 23:30:55 1999 @@ -351,6 +351,7 @@ pHCB->pSRB_head = NULL; /* Initial SRB save queue */ pHCB->pSRB_tail = NULL; /* Initial SRB save queue */ pHCB->pSRB_lock = SPIN_LOCK_UNLOCKED; /* SRB save queue lock */ + pHCB->BitAllocFlagLock = SPIN_LOCK_UNLOCKED; /* Get total memory needed for SCB */ sz = orc_num_scb * sizeof(ORC_SCB); if ((pHCB->HCS_virScbArray = (PVOID) kmalloc(sz, GFP_ATOMIC | GFP_DMA)) == NULL) { diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/ips.c linux/drivers/scsi/ips.c --- v2.3.31/linux/drivers/scsi/ips.c Thu Nov 11 20:11:47 1999 +++ linux/drivers/scsi/ips.c Sun Dec 12 23:04:20 1999 @@ -396,7 +396,7 @@ sh->cmd_per_lun = sh->hostt->cmd_per_lun; sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; sh->use_clustering = sh->hostt->use_clustering; - sh->wish_block = FALSE; +//FIXME// sh->wish_block = FALSE; /* Store info in HA structure */ ha->io_addr = io_addr; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/megaraid.c linux/drivers/scsi/megaraid.c --- v2.3.31/linux/drivers/scsi/megaraid.c Tue Dec 7 09:32:45 1999 +++ linux/drivers/scsi/megaraid.c Sun Dec 12 23:04:20 1999 @@ -287,9 +287,9 @@ static Scsi_Cmnd *qCompleted = NULL; #if SERDEBUG -volatile static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; #endif -volatile static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED; #if SERDEBUG static char strbuf[MAX_SERBUF + 1]; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c --- v2.3.31/linux/drivers/scsi/scsi.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/scsi/scsi.c Mon Dec 13 14:08:40 1999 @@ -132,7 +132,8 @@ unsigned long scsi_pid = 0; Scsi_Cmnd *last_cmnd = NULL; /* Command groups 3 and 4 are reserved and should never be used. */ -const unsigned char scsi_command_size[8] = { +const unsigned char scsi_command_size[8] = +{ 6, 10, 10, 12, 12, 12, 10, 10 }; @@ -278,9 +279,9 @@ {"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-4.8S", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-5.16S", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"EMULEX", "MD21/S2 ESDI", "*", BLIST_SINGLELUN}, {"CANON", "IPUBJD", "*", BLIST_SPARSELUN}, {"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN}, @@ -291,6 +292,8 @@ {"MATSHITA","PD-2 LF-D100","*", BLIST_GHOST}, {"HITACHI", "GF-1050","*", BLIST_GHOST}, /* Hitachi SCSI DVD-RAM */ {"TOSHIBA","CDROM","*", BLIST_ISROM}, + {"Toshiba","DVD-RAM SD-W1101","*", BLIST_GHOST}, + {"Toshiba","DVD-RAM SD-W1111","*", BLIST_GHOST}, /* * Must be at end of list... @@ -322,87 +325,6 @@ return 0; } -/* - * Function: scsi_make_blocked_list - * - * Purpose: Build linked list of hosts that require blocking. - * - * Arguments: None. - * - * Returns: Nothing - * - * Notes: Blocking is sort of a hack that is used to prevent more than one - * host adapter from being active at one time. This is used in cases - * where the ISA bus becomes unreliable if you have more than one - * host adapter really pumping data through. - * - * We spent a lot of time examining the problem, and I *believe* that - * the problem is bus related as opposed to being a driver bug. - * - * The blocked list is used as part of the synchronization object - * that we use to ensure that only one host is active at one time. - * I (ERY) would like to make this go away someday, but this would - * require that we have a recursive mutex object. - */ - -void scsi_make_blocked_list(void) -{ - int block_count = 0, index; - struct Scsi_Host *sh[128], *shpnt; - - /* - * Create a circular linked list from the scsi hosts which have - * the "wish_block" field in the Scsi_Host structure set. - * The blocked list should include all the scsi hosts using ISA DMA. - * In some systems, using two dma channels simultaneously causes - * unpredictable results. - * Among the scsi hosts in the blocked list, only one host at a time - * is allowed to have active commands queued. The transition from - * one active host to the next one is allowed only when host_busy == 0 - * for the active host (which implies host_busy == 0 for all the hosts - * in the list). Moreover for block devices the transition to a new - * active host is allowed only when a request is completed, since a - * block device request can be divided into multiple scsi commands - * (when there are few sg lists or clustering is disabled). - * - * (DB, 4 Feb 1995) - */ - - - host_active = NULL; - - for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { - -#if 0 - /* - * Is this is a candidate for the blocked list? - * Useful to put into the blocked list all the hosts whose driver - * does not know about the host->block feature. - */ - if (shpnt->unchecked_isa_dma) - shpnt->wish_block = 1; -#endif - - if (shpnt->wish_block) - sh[block_count++] = shpnt; - } - - if (block_count == 1) - sh[0]->block = NULL; - - else if (block_count > 1) { - - for (index = 0; index < block_count - 1; index++) { - sh[index]->block = sh[index + 1]; - printk("scsi%d : added to blocked host list.\n", - sh[index]->host_no); - } - - sh[block_count - 1]->block = sh[0]; - printk("scsi%d : added to blocked host list.\n", - sh[index]->host_no); - } -} static void scan_scsis_done(Scsi_Cmnd * SCpnt) { @@ -414,10 +336,11 @@ up(SCpnt->request.sem); } +#ifdef MODULE MODULE_PARM(scsi_logging_level, "i"); MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero"); -#ifndef MODULE +#else static int __init scsi_logging_setup(char *str) { @@ -443,10 +366,12 @@ static int max_scsi_luns = 1; #endif +#ifdef MODULE + MODULE_PARM(max_scsi_luns, "i"); MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 8)"); -#ifndef MODULE +#else static int __init scsi_luns_setup(char *str) { @@ -474,15 +399,12 @@ void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), int timeout, int retries) { - unsigned long flags; DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; - spin_lock_irqsave(&io_request_lock, flags); scsi_do_cmd (SCpnt, (void *) cmnd, buffer, bufflen, done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down (&sem); SCpnt->request.sem = NULL; } @@ -519,6 +441,16 @@ SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device), GFP_ATOMIC); if (SDpnt) { + /* + * Register the queue for the device. All I/O requests will come + * in through here. We also need to register a pointer to + * ourselves, since the queue handler won't know what device + * the queue actually represents. We could look it up, but it + * is pointless work. + */ + blk_init_queue(&SDpnt->request_queue, scsi_request_fn); + blk_queue_headactive(&SDpnt->request_queue, 0); + SDpnt->request_queue.queuedata = (void *) SDpnt; /* Make sure we have something that is valid for DMA purposes */ scsi_result = ((!shpnt->unchecked_isa_dma) ? &scsi_result0[0] : scsi_init_malloc(512, GFP_DMA)); @@ -536,6 +468,8 @@ SDpnt->host = shpnt; SDpnt->online = TRUE; + initialize_merge_fn(SDpnt); + init_waitqueue_head(&SDpnt->device_wait); /* @@ -581,7 +515,6 @@ if (sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init) (); - oldSDpnt->scsi_request_fn = NULL; for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) { if (sdtpnt->attach) { (*sdtpnt->attach) (oldSDpnt); @@ -727,6 +660,7 @@ SDpnt->borken = 1; SDpnt->was_reset = 0; SDpnt->expecting_cc_ua = 0; + SDpnt->starved = 0; scsi_cmd[0] = TEST_UNIT_READY; scsi_cmd[1] = lun << 5; @@ -948,6 +882,19 @@ return 0; } /* + * Register the queue for the device. All I/O requests will come + * in through here. We also need to register a pointer to + * ourselves, since the queue handler won't know what device + * the queue actually represents. We could look it up, but it + * is pointless work. + */ + blk_init_queue(&SDpnt->request_queue, scsi_request_fn); + blk_queue_headactive(&SDpnt->request_queue, 0); + SDpnt->request_queue.queuedata = (void *) SDpnt; + SDpnt->host = shpnt; + initialize_merge_fn(SDpnt); + + /* * And hook up our command block to the new device we will be testing * for. */ @@ -1044,127 +991,6 @@ * of the calling code to ensure that this is the case. */ -Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device) -{ - Scsi_Cmnd *SCpnt = NULL; - int tablesize; - Scsi_Cmnd *found = NULL; - struct buffer_head *bh, *bhp; - - if (!device) - panic("No device passed to scsi_request_queueable().\n"); - - if (req && req->rq_status == RQ_INACTIVE) - panic("Inactive in scsi_request_queueable"); - - /* - * Look for a free command block. If we have been instructed not to queue - * multiple commands to multi-lun devices, then check to see what else is - * going for this device first. - */ - - if (!device->single_lun) { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - SCpnt = SCpnt->next; - } - } else { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->channel == device->channel - && SCpnt->target == device->id) { - if (SCpnt->lun == device->lun) { - if (found == NULL - && SCpnt->request.rq_status == RQ_INACTIVE) { - found = SCpnt; - } - } - if (SCpnt->request.rq_status != RQ_INACTIVE) { - /* - * I think that we should really limit things to one - * outstanding command per device - this is what tends - * to trip up buggy firmware. - */ - return NULL; - } - } - SCpnt = SCpnt->next; - } - SCpnt = found; - } - - if (!SCpnt) - return NULL; - - if (SCSI_BLOCK(device, device->host)) - return NULL; - - if (req) { - memcpy(&SCpnt->request, req, sizeof(struct request)); - tablesize = device->host->sg_tablesize; - bhp = bh = req->bh; - if (!tablesize) - bh = NULL; - /* Take a quick look through the table to see how big it is. - * We already have our copy of req, so we can mess with that - * if we want to. - */ - while (req->nr_sectors && bh) { - bhp = bhp->b_reqnext; - if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp)) - tablesize--; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - if (!tablesize) - break; - bh = bhp; - } - if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */ - SCpnt->request.bhtail = bh; - req->bh = bh->b_reqnext; /* Divide request */ - bh->b_reqnext = NULL; - bh = req->bh; - - /* Now reset things so that req looks OK */ - SCpnt->request.nr_sectors -= req->nr_sectors; - req->current_nr_sectors = bh->b_size >> 9; - req->buffer = bh->b_data; - SCpnt->request.sem = NULL; /* Wait until whole thing done */ - } else { - req->rq_status = RQ_INACTIVE; - wake_up(&wait_for_request); - } - } else { - SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */ - SCpnt->request.sem = NULL; /* And no one is waiting for the device - * either */ - } - - atomic_inc(&SCpnt->host->host_active); - SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target, - atomic_read(&SCpnt->host->host_active))); - SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ - SCpnt->old_use_sg = 0; - SCpnt->transfersize = 0; - SCpnt->resid = 0; - SCpnt->underflow = 0; - SCpnt->cmd_len = 0; - - /* - * Since not everyone seems to set the device info correctly - * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. - */ - - SCpnt->channel = device->channel; - SCpnt->lun = device->lun; - SCpnt->target = device->id; - SCpnt->state = SCSI_STATE_INITIALIZING; - SCpnt->owner = SCSI_OWNER_HIGHLEVEL; - - return SCpnt; -} /* This function returns a structure pointer that will be valid for * the device. The wait parameter tells us whether we should wait for @@ -1176,168 +1002,158 @@ * of the packets for each device */ -Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device, - int wait) -{ - kdev_t dev; - struct request *req = NULL; - int tablesize; - struct buffer_head *bh, *bhp; - struct Scsi_Host *host; - Scsi_Cmnd *SCpnt = NULL; - Scsi_Cmnd *SCwait = NULL; - Scsi_Cmnd *found = NULL; - - if (!device) - panic("No device passed to scsi_allocate_device().\n"); - - if (reqp) - req = *reqp; +/* + * This lock protects the freelist for all devices on the system. + * We could make this finer grained by having a single lock per + * device if it is ever found that there is excessive contention + * on this lock. + */ +static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED; - /* - * See if this request has already been queued by an - * interrupt routine - */ - - if (req) { - if (req->rq_status == RQ_INACTIVE) - return NULL; - dev = req->rq_dev; - } else - dev = 0; /* unused */ +/* + * Used for access to internal allocator used for DMA safe buffers. + */ +static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED; - host = device->host; +/* + * Used to protect insertion into and removal from the queue of + * commands to be processed by the bottom half handler. + */ +static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED; - if (in_interrupt() && SCSI_BLOCK(device, host)) - return NULL; +/* + * Function: scsi_allocate_device + * + * Purpose: Allocate a command descriptor. + * + * Arguments: device - device for which we want a command descriptor + * wait - 1 if we should wait in the event that none + * are available. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Pointer to command descriptor. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + */ +Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait) +{ + struct Scsi_Host *host; + Scsi_Cmnd *SCpnt = NULL; + Scsi_Device *SDpnt; + unsigned long flags; + + if (!device) + panic("No device passed to scsi_allocate_device().\n"); + + host = device->host; + + spin_lock_irqsave(&device_request_lock, flags); + while (1 == 1) { - if (!device->single_lun) { - SCpnt = device->device_queue; - while (SCpnt) { - SCwait = SCpnt; - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - SCpnt = SCpnt->next; - } - } else { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->channel == device->channel - && SCpnt->target == device->id) { - if (SCpnt->lun == device->lun) { - SCwait = SCpnt; - if (found == NULL - && SCpnt->request.rq_status == RQ_INACTIVE) { - found = SCpnt; + SCpnt = NULL; + if (!device->device_blocked) { + if (device->single_lun) { + /* + * FIXME(eric) - this is not at all optimal. Given that + * single lun devices are rare and usually slow + * (i.e. CD changers), this is good enough for now, but + * we may want to come back and optimize this later. + * + * Scan through all of the devices attached to this + * host, and see if any are active or not. If so, + * we need to defer this command. + * + * We really need a busy counter per device. This would + * allow us to more easily figure out whether we should + * do anything here or not. + */ + for (SDpnt = host->host_queue; + SDpnt; + SDpnt = SDpnt->next) { + /* + * Only look for other devices on the same bus + * with the same target ID. + */ + if (SDpnt->channel != device->channel + || SDpnt->id != device->id + || SDpnt == device) { + continue; + } + for (SCpnt = SDpnt->device_queue; + SCpnt; + SCpnt = SCpnt->next) { + if (SCpnt->request.rq_status != RQ_INACTIVE) { + break; } } - if (SCpnt->request.rq_status != RQ_INACTIVE) { - /* - * I think that we should really limit things to one - * outstanding command per device - this is what tends - * to trip up buggy firmware. - */ - found = NULL; + if (SCpnt) { break; } } - SCpnt = SCpnt->next; + if (SDpnt) { + /* + * Some other device in this cluster is busy. + * If asked to wait, we need to wait, otherwise + * return NULL. + */ + SCpnt = NULL; + break; + } + } + /* + * Now we can check for a free command block for this device. + */ + for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) { + if (SCpnt->request.rq_status == RQ_INACTIVE) + break; } - SCpnt = found; } - - /* See if this request has already been queued by an interrupt routine + /* + * If we couldn't find a free command block, and we have been + * asked to wait, then do so. */ - if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) { - return NULL; + if (SCpnt) { + break; } - if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) { /* Might have changed */ - if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE) { - DECLARE_WAITQUEUE(wait,current); - add_wait_queue(&device->device_wait,&wait); - current->state=TASK_UNINTERRUPTIBLE; - spin_unlock(&io_request_lock); - schedule(); - current->state=TASK_RUNNING; - remove_wait_queue(&device->device_wait,&wait); - spin_lock_irq(&io_request_lock); - } else { - if (!wait) - return NULL; - if (!SCwait) { - printk("Attempt to allocate device channel %d," - " target %d, lun %d\n", device->channel, - device->id, device->lun); - panic("No device found in scsi_allocate_device\n"); - } - } + /* + * If we have been asked to wait for a free block, then + * wait here. + */ + spin_unlock_irqrestore(&device_request_lock, flags); + if (wait) { + /* + * This should block until a device command block + * becomes available. + */ + sleep_on(&device->device_wait); + spin_lock_irqsave(&device_request_lock, flags); } else { - if (req) { - memcpy(&SCpnt->request, req, sizeof(struct request)); - tablesize = device->host->sg_tablesize; - bhp = bh = req->bh; - if (!tablesize) - bh = NULL; - /* Take a quick look through the table to see how big it is. - * We already have our copy of req, so we can mess with that - * if we want to. - */ - while (req->nr_sectors && bh) { - bhp = bhp->b_reqnext; - if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp)) - tablesize--; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - if (!tablesize) - break; - bh = bhp; - } - if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */ - SCpnt->request.bhtail = bh; - req->bh = bh->b_reqnext; /* Divide request */ - bh->b_reqnext = NULL; - bh = req->bh; - /* Now reset things so that req looks OK */ - SCpnt->request.nr_sectors -= req->nr_sectors; - req->current_nr_sectors = bh->b_size >> 9; - req->buffer = bh->b_data; - SCpnt->request.sem = NULL; /* Wait until whole thing done */ - } else { - req->rq_status = RQ_INACTIVE; - *reqp = req->next; - wake_up(&wait_for_request); - } - } else { - SCpnt->request.rq_status = RQ_SCSI_BUSY; - SCpnt->request.sem = NULL; /* And no one is waiting for this - * to complete */ - } - atomic_inc(&SCpnt->host->host_active); - SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", - SCpnt->target, - atomic_read(&SCpnt->host->host_active))); - break; + return NULL; } } + SCpnt->request.rq_status = RQ_SCSI_BUSY; + SCpnt->request.sem = NULL; /* And no one is waiting for this + * to complete */ + atomic_inc(&SCpnt->host->host_active); + SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ SCpnt->old_use_sg = 0; SCpnt->transfersize = 0; /* No default transfer size */ SCpnt->cmd_len = 0; - SCpnt->resid = 0; - SCpnt->underflow = 0; /* Do not flag underflow conditions */ - /* Since not everyone seems to set the device info correctly - * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. - * FIXME(eric) This doesn't make any sense. - */ - SCpnt->channel = device->channel; - SCpnt->lun = device->lun; - SCpnt->target = device->id; + SCpnt->underflow = 0; /* Do not flag underflow conditions */ SCpnt->state = SCSI_STATE_INITIALIZING; SCpnt->owner = SCSI_OWNER_HIGHLEVEL; + spin_unlock_irqrestore(&device_request_lock, flags); + + SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", + SCpnt->target, + atomic_read(&SCpnt->host->host_active))); + return SCpnt; } @@ -1354,6 +1170,9 @@ */ void scsi_release_command(Scsi_Cmnd * SCpnt) { + unsigned long flags; + spin_lock_irqsave(&device_request_lock, flags); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -1379,21 +1198,25 @@ atomic_read(&SCpnt->host->eh_wait->count))); up(SCpnt->host->eh_wait); } + spin_unlock_irqrestore(&device_request_lock, flags); } /* * This is inline because we have stack problemes if we recurse to deeply. */ -inline int internal_cmnd(Scsi_Cmnd * SCpnt) +int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) { #ifdef DEBUG_DELAY unsigned long clock; #endif struct Scsi_Host *host; int rtn = 0; + unsigned long flags; unsigned long timeout; + ASSERT_LOCK(&io_request_lock, 0); + #if DEBUG unsigned long *ret = 0; #ifdef __mips__ @@ -1427,11 +1250,9 @@ * interrupt handler (assuming there is one irq-level per * host). */ - spin_unlock_irq(&io_request_lock); while (--ticks_remaining >= 0) mdelay(1 + 999 / HZ); host->resetting = 0; - spin_lock_irq(&io_request_lock); } if (host->hostt->use_new_eh_code) { scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out); @@ -1444,7 +1265,7 @@ * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. */ - SCSI_LOG_MLQUEUE(3, printk("internal_cmnd (host = %d, channel = %d, target = %d, " + SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, " "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n", SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd, SCpnt->buffer, SCpnt->bufflen, SCpnt->done)); @@ -1460,35 +1281,42 @@ * passes a meaningful return value. */ if (host->hostt->use_new_eh_code) { + spin_lock_irqsave(&io_request_lock, flags); rtn = host->hostt->queuecommand(SCpnt, scsi_done); + spin_unlock_irqrestore(&io_request_lock, flags); if (rtn != 0) { + scsi_delete_timer(SCpnt); scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); } } else { + spin_lock_irqsave(&io_request_lock, flags); host->hostt->queuecommand(SCpnt, scsi_old_done); + spin_unlock_irqrestore(&io_request_lock, flags); } } else { int temp; SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command)); + spin_lock_irqsave(&io_request_lock, flags); temp = host->hostt->command(SCpnt); SCpnt->result = temp; #ifdef DEBUG_DELAY + spin_unlock_irqrestore(&io_request_lock, flags); clock = jiffies + 4 * HZ; - spin_unlock_irq(&io_request_lock); while (time_before(jiffies, clock)) barrier(); - spin_lock_irq(&io_request_lock); printk("done(host = %d, result = %04x) : routine at %p\n", host->host_no, temp, host->hostt->command); + spin_lock_irqsave(&io_request_lock, flags); #endif if (host->hostt->use_new_eh_code) { scsi_done(SCpnt); } else { scsi_old_done(SCpnt); } + spin_unlock_irqrestore(&io_request_lock, flags); } - SCSI_LOG_MLQUEUE(3, printk("leaving internal_cmnd()\n")); + SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); return rtn; } @@ -1499,6 +1327,32 @@ * drivers go for the same host at the same time. */ +/* + * Function: scsi_do_cmd + * + * Purpose: Queue a SCSI command + * + * Arguments: SCpnt - command descriptor. + * cmnd - actual SCSI command to be performed. + * buffer - data buffer. + * bufflen - size of data buffer. + * done - completion function to be run. + * timeout - how long to let it run before timeout. + * retries - number of retries we allow. + * + * Lock status: With the new queueing code, this is SMP-safe, and no locks + * need be held upon entry. The old queueing code the lock was + * assumed to be held upon entry. + * + * Returns: Pointer to command descriptor. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * Also, this function is now only used for queueing requests + * for things like ioctls and character device requests - this + * is because we essentially just inject a request into the + * queue for the device. Normal block device handling manipulates + * the queue directly. + */ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *), int timeout, int retries) @@ -1506,6 +1360,8 @@ struct Scsi_Host *host = SCpnt->host; Scsi_Device *device = SCpnt->device; + ASSERT_LOCK(&io_request_lock, 0); + SCpnt->owner = SCSI_OWNER_MIDLEVEL; SCSI_LOG_MLQUEUE(4, @@ -1533,16 +1389,6 @@ * ourselves. */ - SCpnt->pid = scsi_pid++; - - while (SCSI_BLOCK((Scsi_Device *) NULL, host)) { - spin_unlock(&io_request_lock); /* FIXME!!! */ - SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host)); - spin_lock_irq(&io_request_lock); /* FIXME!!! */ - } - - if (host->block) - host_active = host; host->host_busy++; device->device_busy++; @@ -1583,39 +1429,61 @@ SCpnt->internal_timeout = NORMAL_TIMEOUT; SCpnt->abort_reason = 0; SCpnt->result = 0; - internal_cmnd(SCpnt); + + /* + * At this point, we merely set up the command, stick it in the normal + * request queue, and return. Eventually that request will come to the + * top of the list, and will be dispatched. + */ + scsi_insert_special_cmd(SCpnt, 0); SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n")); } -/* This function is the mid-level interrupt routine, which decides how +/* + * This function is the mid-level interrupt routine, which decides how * to handle error conditions. Each invocation of this function must * do one and *only* one of the following: * * 1) Insert command in BH queue. * 2) Activate error handler for host. * - * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could - * come while we are processing the bottom queue, which would cause another command - * to be stuffed onto the bottom queue, and it would in turn be processed as that - * interrupt handler is returning. Given a sufficiently steady rate of returning - * commands, this could cause the stack to overflow. I am not sure what is the most - * appropriate solution here - we should probably keep a depth count, and not process - * any commands while we still have a bottom handler active higher in the stack. - * - * There is currently code in the bottom half handler to monitor recursion in the bottom - * handler and report if it ever happens. If this becomes a problem, it won't be hard to - * engineer something to deal with it so that only the outer layer ever does any real - * processing. + * FIXME(eric) - I am concerned about stack overflow (still). An + * interrupt could come while we are processing the bottom queue, + * which would cause another command to be stuffed onto the bottom + * queue, and it would in turn be processed as that interrupt handler + * is returning. Given a sufficiently steady rate of returning + * commands, this could cause the stack to overflow. I am not sure + * what is the most appropriate solution here - we should probably + * keep a depth count, and not process any commands while we still + * have a bottom handler active higher in the stack. + * + * There is currently code in the bottom half handler to monitor + * recursion in the bottom handler and report if it ever happens. If + * this becomes a problem, it won't be hard to engineer something to + * deal with it so that only the outer layer ever does any real + * processing. */ void scsi_done(Scsi_Cmnd * SCpnt) { + unsigned long flags; + int tstatus; /* * We don't have to worry about this one timing out any more. */ - scsi_delete_timer(SCpnt); + tstatus = scsi_delete_timer(SCpnt); + /* + * If we are unable to remove the timer, it means that the command + * has already timed out. In this case, we have no choice but to + * let the timeout function run, as we have no idea where in fact + * that function could really be. It might be on another processor, + * etc, etc. + */ + if (!tstatus) { + return; + } /* Set the serial numbers back to zero */ SCpnt->serial_number = 0; @@ -1631,6 +1499,8 @@ SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt)); return; } + spin_lock_irqsave(&scsi_bhqueue_lock, flags); + SCpnt->serial_number_at_timeout = 0; SCpnt->state = SCSI_STATE_BHQUEUE; SCpnt->owner = SCSI_OWNER_BH_HANDLER; @@ -1646,6 +1516,10 @@ * We already have the io_request_lock here, since we are called from the * interrupt handler or the error handler. (DB) * + * This may be true at the moment, but I would like to wean all of the low + * level drivers away from using io_request_lock. Technically they should + * all use their own locking. I am adding a small spinlock to protect + * this datastructure to make it safe for that day. (ERY) */ if (!scsi_bh_queue_head) { scsi_bh_queue_head = SCpnt; @@ -1655,6 +1529,7 @@ scsi_bh_queue_tail = SCpnt; } + spin_unlock_irqrestore(&scsi_bhqueue_lock, flags); /* * Mark the bottom half handler to be run. */ @@ -1676,6 +1551,13 @@ * race condition when scsi_done is called after a command has already * timed out but before the time out is processed by the error handler. * (DB) + * + * I believe I have corrected this. We simply monitor the return status of + * del_timer() - if this comes back as 0, it means that the timer has fired + * and that a timeout is in progress. I have modified scsi_done() such + * that in this instance the command is never inserted in the bottom + * half queue. Thus the only time we hold the lock here is when + * we wish to atomically remove the contents of the queue. */ void scsi_bottom_half_handler(void) { @@ -1683,14 +1565,14 @@ Scsi_Cmnd *SCnext; unsigned long flags; - spin_lock_irqsave(&io_request_lock, flags); while (1 == 1) { + spin_lock_irqsave(&scsi_bhqueue_lock, flags); SCpnt = scsi_bh_queue_head; scsi_bh_queue_head = NULL; + spin_unlock_irqrestore(&scsi_bhqueue_lock, flags); if (SCpnt == NULL) { - spin_unlock_irqrestore(&io_request_lock, flags); return; } SCnext = SCpnt->bh_next; @@ -1774,8 +1656,6 @@ } /* while(1==1) */ - spin_unlock_irqrestore(&io_request_lock, flags); - } /* @@ -1796,9 +1676,7 @@ SCpnt->request_bufflen = SCpnt->bufflen; SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->result = 0; - memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); - return internal_cmnd(SCpnt); + return scsi_dispatch_cmd(SCpnt); } /* @@ -1813,32 +1691,14 @@ struct Scsi_Host *host; Scsi_Device *device; + ASSERT_LOCK(&io_request_lock, 0); + host = SCpnt->host; device = SCpnt->device; host->host_busy--; /* Indicate that we are free */ device->device_busy--; /* Decrement device usage counter. */ - if (host->block && host->host_busy == 0) { - host_active = NULL; - - /* For block devices "wake_up" is done in end_scsi_request */ - if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) { - struct Scsi_Host *next; - - for (next = host->block; next != host; next = next->block) - wake_up(&next->host_wait); - } - } - /* - * Now try and drain the mid-level queue if any commands have been - * inserted. Check to see whether the queue even has anything in - * it first, as otherwise this is useless overhead. - */ - if (SCpnt->host->pending_commands != NULL) { - scsi_mlqueue_finish(SCpnt->host, SCpnt->device); - } - wake_up(&host->host_wait); /* * If we have valid sense information, then some kind of recovery @@ -1864,9 +1724,34 @@ static void scsi_unregister_host(Scsi_Host_Template *); #endif +/* + * Function: scsi_malloc + * + * Purpose: Allocate memory from the DMA-safe pool. + * + * Arguments: len - amount of memory we need. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Pointer to memory block. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * This function can only allocate in units of sectors + * (i.e. 512 bytes). + * + * We cannot use the normal system allocator becuase we need + * to be able to guarantee that we can process a complete disk + * I/O request without touching the system allocator. Think + * about it - if the system were heavily swapping, and tried to + * write out a block of memory to disk, and the SCSI code needed + * to allocate more memory in order to be able to write the + * data to disk, you would wedge the system. + */ void *scsi_malloc(unsigned int len) { unsigned int nbits, mask; + unsigned long flags; + int i, j; if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE) return NULL; @@ -1874,6 +1759,8 @@ nbits = len >> 9; mask = (1 << nbits) - 1; + spin_lock_irqsave(&allocator_request_lock, flags); + for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++) for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) { if ((dma_malloc_freelist[i] & (mask << j)) == 0) { @@ -1883,15 +1770,37 @@ SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9))); printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)); #endif + spin_unlock_irqrestore(&allocator_request_lock, flags); return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9)); } } + spin_unlock_irqrestore(&allocator_request_lock, flags); return NULL; /* Nope. No more */ } +/* + * Function: scsi_free + * + * Purpose: Free memory into the DMA-safe pool. + * + * Arguments: ptr - data block we are freeing. + * len - size of block we are freeing. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Nothing + * + * Notes: This function *must* only be used to free memory + * allocated from scsi_malloc(). + * + * Prior to the new queue code, this function was not SMP-safe. + * This function can only allocate in units of sectors + * (i.e. 512 bytes). + */ int scsi_free(void *obj, unsigned int len) { unsigned int page, sector, nbits, mask; + unsigned long flags; #ifdef DEBUG unsigned long ret = 0; @@ -1905,6 +1814,8 @@ SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len)); #endif + spin_lock_irqsave(&allocator_request_lock, flags); + for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) { unsigned long page_addr = (unsigned long) dma_malloc_pages[page]; if ((unsigned long) obj >= page_addr && @@ -1927,6 +1838,7 @@ } scsi_dma_free_sectors += nbits; dma_malloc_freelist[page] &= ~(mask << sector); + spin_unlock_irqrestore(&allocator_request_lock, flags); return 0; } } @@ -1977,10 +1889,13 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt) { + unsigned long flags; struct Scsi_Host *host = SDpnt->host; int j; Scsi_Cmnd *SCpnt; + spin_lock_irqsave(&device_request_lock, flags); + if (SDpnt->queue_depth == 0) SDpnt->queue_depth = host->cmd_per_lun; SDpnt->device_queue = NULL; @@ -2020,8 +1935,10 @@ SDpnt->queue_depth, j); SDpnt->queue_depth = j; SDpnt->has_cmdblocks = (0 != j); - } else + } else { SDpnt->has_cmdblocks = 1; + } + spin_unlock_irqrestore(&device_request_lock, flags); } static ssize_t proc_scsi_gen_write(struct file * file, const char * buf, @@ -2450,6 +2367,7 @@ if (HBA_ptr->host_queue == scd) { HBA_ptr->host_queue = scd->next; } + blk_cleanup_queue(&scd->request_queue); scsi_init_free((char *) scd, sizeof(Scsi_Device)); } else { goto out; @@ -2464,13 +2382,27 @@ #endif /* - * Go through the device list and recompute the most appropriate size - * for the dma pool. Then grab more memory (as required). + * Function: resize_dma_pool + * + * Purpose: Ensure that the DMA pool is sufficiently large to be + * able to guarantee that we can always process I/O requests + * without calling the system allocator. + * + * Arguments: None. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Nothing + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * Go through the device list and recompute the most appropriate + * size for the dma pool. Then grab more memory (as required). */ static void resize_dma_pool(void) { int i, k; unsigned long size; + unsigned long flags; struct Scsi_Host *shpnt; struct Scsi_Host *host = NULL; Scsi_Device *SDpnt; @@ -2480,6 +2412,8 @@ unsigned char **new_dma_malloc_pages = NULL; int out_of_space = 0; + spin_lock_irqsave(&allocator_request_lock, flags); + if (!scsi_hostlist) { /* * Free up the DMA pool. @@ -2499,6 +2433,7 @@ dma_malloc_freelist = NULL; dma_sectors = 0; scsi_dma_free_sectors = 0; + spin_unlock_irqrestore(&allocator_request_lock, flags); return; } /* Next, check to see if we need to extend the DMA buffer pool */ @@ -2569,8 +2504,10 @@ if (new_dma_sectors < dma_sectors) new_dma_sectors = dma_sectors; #endif - if (new_dma_sectors <= dma_sectors) + if (new_dma_sectors <= dma_sectors) { + spin_unlock_irqrestore(&allocator_request_lock, flags); return; /* best to quit while we are in front */ + } for (k = 0; k < 20; ++k) { /* just in case */ out_of_space = 0; @@ -2621,6 +2558,7 @@ break; /* found space ... */ } /* end of for loop */ if (out_of_space) { + spin_unlock_irqrestore(&allocator_request_lock, flags); scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */ printk(" WARNING, not enough memory, pool not expanded\n"); return; @@ -2645,6 +2583,8 @@ dma_sectors = new_dma_sectors; scsi_need_isa_buffer = new_need_isa_buffer; + spin_unlock_irqrestore(&allocator_request_lock, flags); + #ifdef DEBUG_INIT printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors); printk("resize_dma_pool: dma sectors = %d\n", dma_sectors); @@ -2747,8 +2687,6 @@ printk("scsi : %d host%s.\n", next_scsi_host, (next_scsi_host == 1) ? "" : "s"); - scsi_make_blocked_list(); - /* The next step is to call scan_scsis here. This generates the * Scsi_Devices entries */ @@ -2961,6 +2899,7 @@ } SDpnt->has_cmdblocks = 0; + blk_cleanup_queue(&SDpnt->request_queue); /* Next free up the Scsi_Device structures for this host */ shpnt->host_queue = SDpnt->next; scsi_init_free((char *) SDpnt, sizeof(Scsi_Device)); @@ -3016,7 +2955,6 @@ (scsi_memory_upper_value - scsi_init_memory_start) / 1024); #endif - scsi_make_blocked_list(); /* There were some hosts that were loaded at boot time, so we cannot do any more than this */ @@ -3249,12 +3187,11 @@ printk("Dump of scsi host parameters:\n"); i = 0; for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { - printk(" %d %d %d : %d %p\n", + printk(" %d %d %d : %d\n", shpnt->host_failed, shpnt->host_busy, atomic_read(&shpnt->host_active), - shpnt->host_blocked, - shpnt->pending_commands); + shpnt->host_blocked); } @@ -3300,10 +3237,10 @@ /* Now dump the request lists for each block device */ printk("Dump of pending block device requests\n"); for (i = 0; i < MAX_BLKDEV; i++) { - if (blk_dev[i].current_request) { + if (blk_dev[i].request_queue.current_request) { struct request *req; printk("%d: ", i); - req = blk_dev[i].current_request; + req = blk_dev[i].request_queue.current_request; while (req) { printk("(%s %d %ld %ld %ld) ", kdevname(req->rq_dev), @@ -3318,7 +3255,7 @@ } } } - /* printk("wait_for_request = %p\n", &wait_for_request); */ + printk("wait_for_request = %p\n", &wait_for_request); #endif /* CONFIG_SCSI_LOGGING */ /* } */ } #endif /* CONFIG_PROC_FS */ diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi.h linux/drivers/scsi/scsi.h --- v2.3.31/linux/drivers/scsi/scsi.h Thu Nov 11 20:11:48 1999 +++ linux/drivers/scsi/scsi.h Tue Dec 14 00:56:43 1999 @@ -1,13 +1,13 @@ /* * scsi.h Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale * generic SCSI package header file by * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -50,6 +50,21 @@ #endif /* + * Used for debugging the new queueing code. We want to make sure + * that the lock state is consistent with design. Only do this in + * the user space simulator. + */ +#define ASSERT_LOCK(_LOCK, _COUNT) + +#if defined(__SMP__) && defined(CONFIG_USER_DEBUG) +#undef ASSERT_LOCK +#define ASSERT_LOCK(_LOCK,_COUNT) \ + { if( (_LOCK)->lock != _COUNT ) \ + panic("Lock count inconsistent %s %d\n", __FILE__, __LINE__); \ + } +#endif + +/* * Use these to separate status msg and our bytes * * These are set by: @@ -378,6 +393,18 @@ extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt); extern int scsi_block_when_processing_errors(Scsi_Device *); extern void scsi_sleep(int); +extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity, + unsigned int *cyls, unsigned int *hds, + unsigned int *secs); + +/* + * Prototypes for functions in scsi_lib.c + */ +extern void initialize_merge_fn(Scsi_Device * SDpnt); +extern void scsi_request_fn(request_queue_t * q); + +extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int); +extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt); /* * scsi_abort aborts the current command that is executing on host host. @@ -386,17 +413,18 @@ */ extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd, - void *buffer, unsigned bufflen, - void (*done)(struct scsi_cmnd *), - int timeout, int retries); - -extern void scsi_wait_cmd (Scsi_Cmnd *, const void *cmnd , void *buffer, unsigned bufflen, void (*done) (struct scsi_cmnd *), int timeout, int retries); +extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd, + void *buffer, unsigned bufflen, + void (*done) (struct scsi_cmnd *), + int timeout, int retries); -extern Scsi_Cmnd *scsi_allocate_device(struct request **, Scsi_Device *, int); +extern void scsi_request_fn(request_queue_t * q); + +extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int); extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *); @@ -428,9 +456,10 @@ wait_queue_head_t device_wait; /* Used to wait if device is busy */ struct Scsi_Host *host; + request_queue_t request_queue; volatile unsigned short device_busy; /* commands actually active on low-level */ - void (*scsi_request_fn) (void); /* Used to jumpstart things after an - * ioctl */ + int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize + new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ /* public: */ @@ -438,6 +467,8 @@ unsigned int manufacturer; /* Manufacturer of device, for using * vendor-specific cmd's */ + unsigned sector_size; /* size in bytes */ + int attached; /* # of high level drivers attached to * this */ int access_count; /* Count of open channels/mounts */ @@ -475,6 +506,10 @@ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN * because we did a bus reset. */ unsigned device_blocked:1; /* Device returned QUEUE_FULL. */ + unsigned ten:1; /* support ten byte read / write */ + unsigned remap:1; /* support remapping */ + unsigned starved:1; /* unable to process commands because + host busy */ }; @@ -577,16 +612,17 @@ reconnects. Probably == sector size */ - int resid; /* Number of bytes requested to be + int resid; /* Number of bytes requested to be transferred less actual number transferred (0 if not supported) */ struct request request; /* A copy of the command we are working on */ - unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE when - CHECK CONDITION is received on - original command (auto-sense) */ + unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE + * when CHECK CONDITION is + * received on original command + * (auto-sense) */ unsigned flags; @@ -630,6 +666,14 @@ unsigned long pid; /* Process ID, starts at 0 */ }; +/* + * Flag bits for the internal_timeout array + */ +#define NORMAL_TIMEOUT 0 +#define IN_ABORT 1 +#define IN_RESET 2 +#define IN_RESET2 4 +#define IN_RESET3 8 /* * Definitions and prototypes used for scsi mid-level queue. @@ -640,60 +684,15 @@ extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason); extern int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device); +extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, + int sectors); + +extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, + int block_sectors); + #if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR) #include "hosts.h" - -static Scsi_Cmnd *end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors) -{ - struct request *req; - struct buffer_head *bh; - - req = &SCpnt->request; - req->errors = 0; - if (!uptodate) { - printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n", - kdevname(req->rq_dev), req->sector); - } - do { - if ((bh = req->bh) != NULL) { - req->bh = bh->b_reqnext; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - bh->b_reqnext = NULL; - bh->b_end_io(bh, uptodate); - sectors -= bh->b_size >> 9; - if ((bh = req->bh) != NULL) { - req->current_nr_sectors = bh->b_size >> 9; - if (req->nr_sectors < req->current_nr_sectors) { - req->nr_sectors = req->current_nr_sectors; - printk("end_scsi_request: buffer-list destroyed\n"); - } - } - } - } while (sectors && bh); - if (req->bh) { - req->buffer = bh->b_data; - return SCpnt; - } - DEVICE_OFF(req->rq_dev); - if (req->sem != NULL) { - up(req->sem); - } - add_blkdev_randomness(MAJOR(req->rq_dev)); - - if (SCpnt->host->block) { - struct Scsi_Host *next; - - for (next = SCpnt->host->block; next != SCpnt->host; - next = next->block) - wake_up(&next->host_wait); - } - wake_up(&wait_for_request); - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - return NULL; -} /* This is just like INIT_REQUEST, but we need to be aware of the fact diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_debug.c linux/drivers/scsi/scsi_debug.c --- v2.3.31/linux/drivers/scsi/scsi_debug.c Thu Nov 11 20:11:48 1999 +++ linux/drivers/scsi/scsi_debug.c Mon Dec 13 14:10:09 1999 @@ -7,6 +7,7 @@ * anything out of the ordinary is seen. */ +#include #include #include @@ -32,16 +33,18 @@ /* A few options that we want selected */ -#define NR_HOSTS_PRESENT 20 -#define NR_FAKE_DISKS 6 -#define N_HEAD 32 -#define N_SECTOR 64 -#define DISK_READONLY(TGT) (1) +#define NR_HOSTS_PRESENT 1 +#define NR_FAKE_DISKS 3 +#define N_HEAD 255 +#define N_SECTOR 63 +#define N_CYLINDER 524 +#define DISK_READONLY(TGT) (0) #define DISK_REMOVEABLE(TGT) (1) +#define DEVICE_TYPE(TGT) (TGT == 2 ? TYPE_TAPE : TYPE_DISK); /* Do not attempt to use a timer to simulate a real disk with latency */ /* Only use this in the actual kernel, not in the simulator. */ -/* #define IMMEDIATE */ +#define IMMEDIATE /* Skip some consistency checking. Good for benchmarking */ #define SPEEDY @@ -58,11 +61,15 @@ #define START_PARTITION 4 /* Time to wait before completing a command */ -#define DISK_SPEED (HZ/10) /* 100ms */ -#define CAPACITY (0x80000) +#define DISK_SPEED (HZ/10) /* 100ms */ +#define CAPACITY (N_HEAD * N_SECTOR * N_CYLINDER) +#define SIZE(TGT) (TGT == 2 ? 2248 : 512) static int starts[] = -{N_HEAD, N_HEAD * N_SECTOR, 50000, CAPACITY, 0}; +{N_SECTOR, + N_HEAD * N_SECTOR, /* Single cylinder */ + N_HEAD * N_SECTOR * 4, + CAPACITY, 0}; static int npart = 0; #include "scsi_debug.h" @@ -112,21 +119,25 @@ typedef void (*done_fct_t) (Scsi_Cmnd *); -static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = {NULL,}; +static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = +{NULL,}; static void scsi_debug_intr_handle(unsigned long); static struct timer_list timeout[SCSI_DEBUG_MAILBOXES]; -Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] = {NULL,}; -static char SCrst[SCSI_DEBUG_MAILBOXES] = {0,}; +Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] = +{NULL,}; +static char SCrst[SCSI_DEBUG_MAILBOXES] = +{0,}; /* * Semaphore used to simulate bus lockups. */ static int scsi_debug_lockup = 0; -static char sense_buffer[128] = {0,}; +static char sense_buffer[128] = +{0,}; static void scsi_dump(Scsi_Cmnd * SCpnt, int flag) { @@ -197,6 +208,14 @@ sgcount = 0; sgpnt = NULL; + /* + * The io_request_lock *must* be held at this point. + */ + if( io_request_lock.lock == 0 ) + { + printk("Warning - io_request_lock is not held in queuecommand\n"); + } + /* * If we are being notified of the mid-level reposessing a command due to timeout, * just return. @@ -242,6 +261,10 @@ SCpnt->result = 0; done(SCpnt); return 0; + case START_STOP: + SCSI_LOG_LLQUEUE(3, printk("START_STOP\n")); + scsi_debug_errsts = 0; + break; case ALLOW_MEDIUM_REMOVAL: if (cmd[4]) { SCSI_LOG_LLQUEUE(2, printk("Medium removal inhibited...")); @@ -253,7 +276,7 @@ case INQUIRY: SCSI_LOG_LLQUEUE(3, printk("Inquiry...(%p %d)\n", buff, bufflen)); memset(buff, 0, bufflen); - buff[0] = TYPE_DISK; + buff[0] = DEVICE_TYPE(target); buff[1] = DISK_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ buff[2] = 1; buff[4] = 33 - 5; @@ -277,7 +300,10 @@ buff[1] = (CAPACITY >> 16) & 0xff; buff[2] = (CAPACITY >> 8) & 0xff; buff[3] = CAPACITY & 0xff; - buff[6] = 2; /* 512 byte sectors */ + buff[4] = 0; + buff[5] = 0; + buff[6] = (SIZE(target) >> 8) & 0xff; /* 512 byte sectors */ + buff[7] = SIZE(target) & 0xff; scsi_debug_errsts = 0; break; case READ_10: @@ -327,15 +353,23 @@ p = (struct partition *) (buff + 0x1be); i = 0; while (starts[i + 1]) { + int start_cyl, end_cyl; + + start_cyl = starts[i] / N_HEAD / N_SECTOR; + end_cyl = (starts[i + 1] - 1) / N_HEAD / N_SECTOR; + p->boot_ind = 0; + + p->head = (i == 0 ? 1 : 0); + p->sector = 1 | ((start_cyl >> 8) << 6); + p->cyl = (start_cyl & 0xff); + + p->end_head = N_HEAD - 1; + p->end_sector = N_SECTOR | ((end_cyl >> 8) << 6); + p->end_cyl = (end_cyl & 0xff); + p->start_sect = starts[i]; p->nr_sects = starts[i + 1] - starts[i]; p->sys_ind = 0x81; /* Linux partition */ - p->head = (i == 0 ? 1 : 0); - p->sector = 1; - p->cyl = starts[i] / N_HEAD / N_SECTOR; - p->end_head = N_HEAD - 1; - p->end_sector = N_SECTOR; - p->end_cyl = starts[i + 1] / N_HEAD / N_SECTOR; p++; i++; }; @@ -465,6 +499,8 @@ #ifdef IMMEDIATE if (!scsi_debug_lockup) { SCpnt->result = scsi_debug_errsts; + SCint[i] = SCpnt; + do_done[i] = done; scsi_debug_intr_handle(i); /* No timer - do this one right away */ } restore_flags(flags); @@ -490,24 +526,6 @@ return 0; } -volatile static int internal_done_flag = 0; -volatile static int internal_done_errcode = 0; -static void internal_done(Scsi_Cmnd * SCpnt) -{ - internal_done_errcode = SCpnt->result; - ++internal_done_flag; -} - -int scsi_debug_command(Scsi_Cmnd * SCpnt) -{ - DEB(printk("scsi_debug_command: ..calling scsi_debug_queuecommand\n")); - scsi_debug_queuecommand(SCpnt, internal_done); - - while (!internal_done_flag); - internal_done_flag = 0; - return internal_done_errcode; -} - /* A "high" level interrupt handler. This should be called once per jiffy * to simulate a regular scsi disk. We use a timer to do this. */ @@ -589,7 +607,7 @@ int size = disk->capacity; info[0] = N_HEAD; info[1] = N_SECTOR; - info[2] = (size + 2047) >> 11; + info[2] = N_CYLINDER; if (info[2] >= 1024) info[2] = 1024; return 0; @@ -683,6 +701,21 @@ return (len); } + +#ifdef CONFIG_USER_DEBUG +/* + * This is a hack for the user space emulator. It allows us to + * "insert" arbitrary numbers of additional drivers. + */ +void *scsi_debug_get_handle(void) +{ + static Scsi_Host_Template driver_copy = SCSI_DEBUG; + void *rtn; + rtn = kmalloc(sizeof(driver_copy), GFP_ATOMIC); + memcpy(rtn, (void *) &driver_copy, sizeof(driver_copy)); + return rtn; +} +#endif #ifdef MODULE /* Eventually this will go into an include file, but this will be later */ diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_debug.h linux/drivers/scsi/scsi_debug.h --- v2.3.31/linux/drivers/scsi/scsi_debug.h Tue Sep 7 12:14:06 1999 +++ linux/drivers/scsi/scsi_debug.h Sun Dec 12 23:04:20 1999 @@ -27,16 +27,15 @@ #define SCSI_DEBUG {proc_info: scsi_debug_proc_info, \ name: "SCSI DEBUG", \ detect: scsi_debug_detect, \ - command: scsi_debug_command, \ queuecommand: scsi_debug_queuecommand, \ abort: scsi_debug_abort, \ reset: scsi_debug_reset, \ bios_param: scsi_debug_biosparam, \ can_queue: SCSI_DEBUG_CANQUEUE, \ this_id: 7, \ - sg_tablesize: SG_ALL, \ + sg_tablesize: 16, \ cmd_per_lun: 3, \ - unchecked_isa_dma: 1, \ + unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING, \ use_new_eh_code: 1, \ } diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_error.c linux/drivers/scsi/scsi_error.c --- v2.3.31/linux/drivers/scsi/scsi_error.c Fri Oct 15 15:25:14 1999 +++ linux/drivers/scsi/scsi_error.c Sun Dec 12 23:04:20 1999 @@ -35,11 +35,13 @@ #include "hosts.h" #include "constants.h" -#ifdef MODULE +/* + * We must always allow SHUTDOWN_SIGS. Even if we are not a module, + * the host drivers that we are using may be loaded as modules, and + * when we unload these, we need to ensure that the error handler thread + * can be shut down. + */ #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM)) -#else -#define SHUTDOWN_SIGS (0UL) -#endif #ifdef DEBUG #define SENSE_TIMEOUT SCSI_TIMEOUT @@ -128,7 +130,9 @@ * * Arguments: SCset - command that we are canceling timer for. * - * Returns: Amount of time remaining before command would have timed out. + * Returns: 1 if we were able to detach the timer. 0 if we + * blew it, and the timer function has already started + * to run. * * Notes: This should be turned into an inline function. */ @@ -136,8 +140,7 @@ { int rtn; - rtn = jiffies - SCset->eh_timeout.expires; - del_timer(&SCset->eh_timeout); + rtn = del_timer(&SCset->eh_timeout); SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p\n", SCset)); @@ -415,6 +418,7 @@ {REQUEST_SENSE, 0, 0, 0, 255, 0}; unsigned char scsi_result0[256], *scsi_result = NULL; + ASSERT_LOCK(&io_request_lock, 1); memcpy((void *) SCpnt->cmnd, (void *) generic_sense, sizeof(generic_sense)); @@ -563,10 +567,7 @@ add_timer(&timer); - spin_unlock_irq(&io_request_lock); down(&sem); - spin_lock_irq(&io_request_lock); - del_timer(&timer); } @@ -583,6 +584,8 @@ { struct Scsi_Host *host; + ASSERT_LOCK(&io_request_lock, 1); + host = SCpnt->host; retry: @@ -811,7 +814,9 @@ * If we had a successful bus reset, mark the command blocks to expect * a condition code of unit attention. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(BUS_RESET_SETTLE_TIME); + spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -854,7 +859,9 @@ * If we had a successful host reset, mark the command blocks to expect * a condition code of unit attention. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(HOST_RESET_SETTLE_TIME); + spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -1164,6 +1171,8 @@ * * Arguments: host - host that we are restarting * + * Lock status: Assumed that locks are not held upon entry. + * * Returns: Nothing * * Notes: When we entered the error handler, we blocked all further @@ -1172,6 +1181,9 @@ STATIC void scsi_restart_operations(struct Scsi_Host *host) { Scsi_Device *SDpnt; + unsigned long flags; + + ASSERT_LOCK(&io_request_lock, 0); /* * Next free up anything directly waiting upon the host. This will be @@ -1183,18 +1195,23 @@ wake_up(&host->host_wait); /* - * Finally, block devices need an extra kick in the pants. This is because - * the request queueing mechanism may have queued lots of pending requests - * and there won't be a process waiting in a place where we can simply wake - * it up. Thus we simply go through and call the request function to goose - * the various top level drivers and get things moving again. + * Finally we need to re-initiate requests that may be pending. We will + * have had everything blocked while error handling is taking place, and + * now that error recovery is done, we will need to ensure that these + * requests are started. */ + spin_lock_irqsave(&io_request_lock, flags); for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) { - SCSI_LOG_ERROR_RECOVERY(5, printk("Calling request function to restart things...\n")); - - if (SDpnt->scsi_request_fn != NULL) - (*SDpnt->scsi_request_fn) (); + request_queue_t *q; + if ((host->can_queue > 0 && (host->host_busy >= host->can_queue)) + || (host->host_blocked) + || (SDpnt->device_blocked)) { + break; + } + q = &SDpnt->request_queue; + q->request_fn(q); } + spin_unlock_irqrestore(&io_request_lock, flags); } /* @@ -1241,6 +1258,8 @@ Scsi_Cmnd *SCdone; int timed_out; + ASSERT_LOCK(&io_request_lock, 1); + SCdone = NULL; /* @@ -1524,7 +1543,9 @@ * Due to the spinlock, we will never get out of this * loop without a proper wait (DB) */ + spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); + spin_lock_irq(&io_request_lock); goto next_device; } @@ -1617,7 +1638,9 @@ * Due to the spinlock, we will never get out of this * loop without a proper wait. (DB) */ + spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); + spin_lock_irq(&io_request_lock); goto next_device2; } @@ -1768,11 +1791,11 @@ lock_kernel(); /* - * Flush resources + * Flush resources */ - + daemonize(); - + /* * Set the name of this process. */ @@ -1821,6 +1844,9 @@ host->eh_active = 0; + /* The spinlock is really needed up to this point. (DB) */ + spin_unlock_irqrestore(&io_request_lock, flags); + /* * Note - if the above fails completely, the action is to take * individual devices offline and flush the queue of any @@ -1830,8 +1856,6 @@ */ scsi_restart_operations(host); - /* The spinlock is really needed up to this point. (DB) */ - spin_unlock_irqrestore(&io_request_lock, flags); } SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler exiting\n")); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_ioctl.c linux/drivers/scsi/scsi_ioctl.c --- v2.3.31/linux/drivers/scsi/scsi_ioctl.c Thu Nov 11 20:11:48 1999 +++ linux/drivers/scsi/scsi_ioctl.c Sun Dec 12 23:04:20 1999 @@ -19,7 +19,7 @@ #include #define NORMAL_RETRIES 5 -#define NORMAL_TIMEOUT (10 * HZ) +#define IOCTL_NORMAL_TIMEOUT (10 * HZ) #define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ) #define START_STOP_TIMEOUT (60 * HZ) #define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ) @@ -69,7 +69,7 @@ /* * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. - * The NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. + * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. * * dev is the SCSI device struct ptr, *(int *) arg is the length of the * input data, if any, not including the command string & counts, @@ -105,22 +105,18 @@ static int ioctl_internal_command(Scsi_Device * dev, char *cmd, int timeout, int retries) { - unsigned long flags; int result; Scsi_Cmnd *SCpnt; Scsi_Device *SDpnt; - spin_lock_irqsave(&io_request_lock, flags); SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", cmd[0])); - SCpnt = scsi_allocate_device(NULL, dev, 1); + SCpnt = scsi_allocate_device(dev, 1); { DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down(&sem); - spin_lock_irqsave(&io_request_lock, flags); SCpnt->request.sem = NULL; } @@ -167,11 +163,8 @@ scsi_release_command(SCpnt); SCpnt = NULL; - if (!SDpnt->was_reset && SDpnt->scsi_request_fn) - (*SDpnt->scsi_request_fn) (); wake_up(&SDpnt->device_wait); - spin_unlock_irqrestore(&io_request_lock, flags); return result; } @@ -183,34 +176,33 @@ * The structure that we are passed should look like: * * struct sdata { - * unsigned int inlen; [i] Length of data to be written to device + * unsigned int inlen; [i] Length of data to be written to device * unsigned int outlen; [i] Length of data to be read from device * unsigned char cmd[x]; [i] SCSI command (6 <= x <= 12). - * [o] Data read from device starts here. - * [o] On error, sense buffer starts here. + * [o] Data read from device starts here. + * [o] On error, sense buffer starts here. * unsigned char wdata[y]; [i] Data written to device starts here. * }; * Notes: - * - The SCSI command length is determined by examining the 1st byte - * of the given command. There is no way to override this. - * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha). - * - The length (x + y) must be at least OMAX_SB_LEN bytes long to - * accomodate the sense buffer when an error occurs. - * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that - * old code will not be surprised. - * - If a Unix error occurs (e.g. ENOMEM) then the user will receive - * a negative return and the Unix error code in 'errno'. - * If the SCSI command succeeds then 0 is returned. - * Positive numbers returned are the compacted SCSI error codes (4 - * bytes in one int) where the lowest byte is the SCSI status. - * See the drivers/scsi/scsi.h file for more information on this. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha). + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accomodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + * See the drivers/scsi/scsi.h file for more information on this. * */ -#define OMAX_SB_LEN 16 /* Old sense buffer length */ +#define OMAX_SB_LEN 16 /* Old sense buffer length */ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) { - unsigned long flags; char *buf; unsigned char cmd[12]; char *cmd_in; @@ -251,9 +243,7 @@ buf_needed = (buf_needed + 511) & ~511; if (buf_needed > MAX_BUF) buf_needed = MAX_BUF; - spin_lock_irqsave(&io_request_lock, flags); buf = (char *) scsi_malloc(buf_needed); - spin_unlock_irqrestore(&io_request_lock, flags); if (!buf) return -ENOMEM; memset(buf, 0, buf_needed); @@ -299,23 +289,21 @@ retries = NORMAL_RETRIES; break; default: - timeout = NORMAL_TIMEOUT; + timeout = IOCTL_NORMAL_TIMEOUT; retries = NORMAL_RETRIES; break; } #ifndef DEBUG_NO_CMD - spin_lock_irqsave(&io_request_lock, flags); - SCpnt = scsi_allocate_device(NULL, dev, 1); + SCpnt = scsi_allocate_device(dev, 1); { DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down(&sem); SCpnt->request.sem = NULL; } @@ -339,7 +327,6 @@ } result = SCpnt->result; - spin_lock_irqsave(&io_request_lock, flags); wake_up(&SCpnt->device->device_wait); SDpnt = SCpnt->device; @@ -349,10 +336,7 @@ if (buf) scsi_free(buf, buf_needed); - if (SDpnt->scsi_request_fn) - (*SDpnt->scsi_request_fn) (); - spin_unlock_irqrestore(&io_request_lock, flags); return result; #else { @@ -445,7 +429,7 @@ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = SCSI_REMOVAL_PREVENT; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); break; case SCSI_IOCTL_DOORUNLOCK: if (!dev->removable || !dev->lockable) @@ -455,14 +439,14 @@ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = SCSI_REMOVAL_ALLOW; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_TEST_UNIT_READY: scsi_cmd[0] = TEST_UNIT_READY; scsi_cmd[1] = dev->lun << 5; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 0; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); break; case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- v2.3.31/linux/drivers/scsi/scsi_lib.c Wed Dec 31 16:00:00 1969 +++ linux/drivers/scsi/scsi_lib.c Sun Dec 12 23:04:20 1999 @@ -0,0 +1,781 @@ +/* + * scsi_lib.c Copyright (C) 1999 Eric Youngdale + * + * SCSI queueing library. + * Initial versions: Eric Youngdale (eric@andante.org). + * Based upon conversations with large numbers + * of people at Linux Expo. + */ + +/* + * The fundamental purpose of this file is to contain a library of utility + * routines that can be used by low-level drivers. Ultimately the idea + * is that there should be a sufficiently rich number of functions that it + * would be possible for a driver author to fashion a queueing function for + * a low-level driver if they wished. Note however that this file also + * contains the "default" versions of these functions, as we don't want to + * go through and retrofit queueing functions into all 30 some-odd drivers. + */ + +#define __NO_VERSION__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define __KERNEL_SYSCALLS__ + +#include + +#include +#include +#include + +#include "scsi.h" +#include "hosts.h" +#include "constants.h" +#include + +/* + * This entire source file deals with the new queueing code. + */ + +/* + * Function: scsi_insert_special_cmd() + * + * Purpose: Insert pre-formed command into request queue. + * + * Arguments: SCpnt - command that is ready to be queued. + * at_head - boolean. True if we should insert at head + * of queue, false if we should insert at tail. + * + * Lock status: Assumed that lock is not held upon entry. + * + * Returns: Nothing + * + * Notes: This function is called from character device and from + * ioctl types of functions where the caller knows exactly + * what SCSI command needs to be issued. The idea is that + * we merely inject the command into the queue (at the head + * for now), and then call the queue request function to actually + * process it. + */ +int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) +{ + unsigned long flags; + request_queue_t *q; + + ASSERT_LOCK(&io_request_lock, 0); + + /* + * The SCpnt already contains a request structure - we will doctor the + * thing up with the appropriate values and use that in the actual + * request queue. + */ + q = &SCpnt->device->request_queue; + SCpnt->request.cmd = SPECIAL; + SCpnt->request.special = (void *) SCpnt; + + /* + * For the moment, we insert at the head of the queue. This may turn + * out to be a bad idea, but we will see about that when we get there. + */ + spin_lock_irqsave(&io_request_lock, flags); + + if (at_head) { + SCpnt->request.next = q->current_request; + q->current_request = &SCpnt->request; + } else { + /* + * FIXME(eric) - we always insert at the tail of the list. Otherwise + * ioctl commands would always take precedence over normal I/O. + */ + SCpnt->request.next = NULL; + if (q->current_request == NULL) { + q->current_request = &SCpnt->request; + } else { + struct request *req; + + for (req = q->current_request; req; req = req->next) { + if (req->next == NULL) { + req->next = &SCpnt->request; + } + } + } + } + + /* + * Now hit the requeue function for the queue. If the host is already + * busy, so be it - we have nothing special to do. If the host can queue + * it, then send it off. + */ + q->request_fn(q); + spin_unlock_irqrestore(&io_request_lock, flags); + return 0; +} + +/* + * Function: scsi_init_cmd_errh() + * + * Purpose: Initialize SCpnt fields related to error handling. + * + * Arguments: SCpnt - command that is ready to be queued. + * + * Returns: Nothing + * + * Notes: This function has the job of initializing a number of + * fields related to error handling. Typically this will + * be called once for each command, as required. + */ +int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) +{ + ASSERT_LOCK(&io_request_lock, 0); + + SCpnt->owner = SCSI_OWNER_MIDLEVEL; + SCpnt->reset_chain = NULL; + SCpnt->serial_number = 0; + SCpnt->serial_number_at_timeout = 0; + SCpnt->flags = 0; + SCpnt->retries = 0; + + SCpnt->abort_reason = 0; + + memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); + + if (SCpnt->cmd_len == 0) + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + + /* + * We need saved copies of a number of fields - this is because + * error handling may need to overwrite these with different values + * to run different commands, and once error handling is complete, + * we will need to restore these values prior to running the actual + * command. + */ + SCpnt->old_use_sg = SCpnt->use_sg; + SCpnt->old_cmd_len = SCpnt->cmd_len; + memcpy((void *) SCpnt->data_cmnd, + (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd)); + SCpnt->buffer = SCpnt->request_buffer; + SCpnt->bufflen = SCpnt->request_bufflen; + + SCpnt->reset_chain = NULL; + + SCpnt->internal_timeout = NORMAL_TIMEOUT; + SCpnt->abort_reason = 0; + + return 1; +} + +/* + * Function: scsi_queue_next_request() + * + * Purpose: Handle post-processing of completed commands. + * + * Arguments: SCpnt - command that may need to be requeued. + * + * Returns: Nothing + * + * Notes: After command completion, there may be blocks left + * over which weren't finished by the previous command + * this can be for a number of reasons - the main one is + * that a medium error occurred, and the sectors after + * the bad block need to be re-read. + * + * If SCpnt is NULL, it means that the previous command + * was completely finished, and we should simply start + * a new command, if possible. + */ +void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) +{ + int all_clear; + unsigned long flags; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + ASSERT_LOCK(&io_request_lock, 0); + + spin_lock_irqsave(&io_request_lock, flags); + if (SCpnt != NULL) { + + /* + * For some reason, we are not done with this request. + * This happens for I/O errors in the middle of the request, + * in which case we need to request the blocks that come after + * the bad sector. + */ + SCpnt->request.next = q->current_request; + q->current_request = &SCpnt->request; + SCpnt->request.special = (void *) SCpnt; + } + /* + * Just hit the requeue function for the queue. + * FIXME - if this queue is empty, check to see if we might need to + * start requests for other devices attached to the same host. + */ + q->request_fn(q); + + /* + * Now see whether there are other devices on the bus which + * might be starved. If so, hit the request function. If we + * don't find any, then it is safe to reset the flag. If we + * find any device that it is starved, it isn't safe to reset the + * flag as the queue function releases the lock and thus some + * other device might have become starved along the way. + */ + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + all_clear = 1; + if (SHpnt->some_device_starved) { + for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { + request_queue_t *q; + if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked)) { + break; + } + if (SDpnt->device_blocked || !SDpnt->starved) { + continue; + } + q = &SDpnt->request_queue; + q->request_fn(q); + all_clear = 0; + } + if (SDpnt == NULL && all_clear) { + SHpnt->some_device_starved = 0; + } + } + spin_unlock_irqrestore(&io_request_lock, flags); +} + +/* + * Function: scsi_end_request() + * + * Purpose: Post-processing of completed commands called from interrupt + * handler. + * + * Arguments: SCpnt - command that is complete. + * uptodate - 1 if I/O indicates success, 0 for I/O error. + * sectors - number of sectors we want to mark. + * + * Lock status: Assumed that lock is not held upon entry. + * + * Returns: Nothing + * + * Notes: This is called for block device requests in order to + * mark some number of sectors as complete. + */ +Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors) +{ + struct request *req; + struct buffer_head *bh; + + ASSERT_LOCK(&io_request_lock, 0); + + req = &SCpnt->request; + req->errors = 0; + if (!uptodate) { + printk(" I/O error: dev %s, sector %lu\n", + kdevname(req->rq_dev), req->sector); + } + do { + if ((bh = req->bh) != NULL) { + req->bh = bh->b_reqnext; + req->nr_sectors -= bh->b_size >> 9; + req->sector += bh->b_size >> 9; + bh->b_reqnext = NULL; + sectors -= bh->b_size >> 9; + bh->b_end_io(bh, uptodate); + if ((bh = req->bh) != NULL) { + req->current_nr_sectors = bh->b_size >> 9; + if (req->nr_sectors < req->current_nr_sectors) { + req->nr_sectors = req->current_nr_sectors; + printk("scsi_end_request: buffer-list destroyed\n"); + } + } + } + } while (sectors && bh); + + /* + * If there are blocks left over at the end, set up the command + * to queue the remainder of them. + */ + if (req->bh) { + req->buffer = bh->b_data; + return SCpnt; + } + /* + * This request is done. If there is someone blocked waiting for this + * request, wake them up. Typically used to wake up processes trying + * to swap a page into memory. + */ + if (req->sem != NULL) { + up(req->sem); + } + add_blkdev_randomness(MAJOR(req->rq_dev)); + scsi_release_command(SCpnt); + return NULL; +} + +/* + * Function: scsi_io_completion() + * + * Purpose: Completion processing for block device I/O requests. + * + * Arguments: SCpnt - command that is finished. + * + * Lock status: Assumed that no lock is held upon entry. + * + * Returns: Nothing + * + * Notes: This function is matched in terms of capabilities to + * the function that created the scatter-gather list. + * In other words, if there are no bounce buffers + * (the normal case for most drivers), we don't need + * the logic to deal with cleaning up afterwards. + */ +void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, + int block_sectors) +{ + int result = SCpnt->result; + int this_count = SCpnt->bufflen >> 9; + request_queue_t *q = &SCpnt->device->request_queue; + + ASSERT_LOCK(&io_request_lock, 0); + + /* + * Free up any indirection buffers we allocated for DMA purposes. + * For the case of a READ, we need to copy the data out of the + * bounce buffer and into the real buffer. + */ + if (SCpnt->use_sg) { + struct scatterlist *sgpnt; + int i; + + sgpnt = (struct scatterlist *) SCpnt->buffer; + + for (i = 0; i < SCpnt->use_sg; i++) { + if (sgpnt[i].alt_address) { + if (SCpnt->request.cmd == READ) { + memcpy(sgpnt[i].alt_address, + sgpnt[i].address, + sgpnt[i].length); + } + scsi_free(sgpnt[i].address, sgpnt[i].length); + } + } + scsi_free(SCpnt->buffer, SCpnt->sglist_len); + } else { + if (SCpnt->buffer != SCpnt->request.buffer) { + if (SCpnt->request.cmd == READ) { + memcpy(SCpnt->request.buffer, SCpnt->buffer, + SCpnt->bufflen); + } + scsi_free(SCpnt->buffer, SCpnt->bufflen); + } + } + /* + * Next deal with any sectors which we were able to correctly + * handle. + */ + if (good_sectors > 0) { + SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n", + SCpnt->request.nr_sectors, + good_sectors)); + SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg)); + + SCpnt->request.errors = 0; + /* + * If multiple sectors are requested in one buffer, then + * they will have been finished off by the first command. + * If not, then we have a multi-buffer command. + */ + SCpnt = scsi_end_request(SCpnt, 1, good_sectors); + + /* + * If the command completed without error, then either finish off the + * rest of the command, or start a new one. + */ + if (result == 0) { + scsi_queue_next_request(q, SCpnt); + return; + } + } + /* + * Now, if we were good little boys and girls, Santa left us a request + * sense buffer. We can extract information from this, so we + * can choose a block to remap, etc. + */ + if (driver_byte(result) != 0) { + if (suggestion(result) == SUGGEST_REMAP) { +#ifdef REMAP + /* + * Not yet implemented. A read will fail after being remapped, + * a write will call the strategy routine again. + */ + if (SCpnt->device->remap) { + result = 0; + } +#endif + } + if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70 + && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { + if (SCpnt->device->removable) { + /* detected disc change. set a bit and quietly refuse + * further access. + */ + SCpnt->device->changed = 1; + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + } else { + /* + * Must have been a power glitch, or a bus reset. + * Could not have been a media change, so we just retry + * the request and see what happens. + */ + scsi_queue_next_request(q, SCpnt); + return; + } + } + /* If we had an ILLEGAL REQUEST returned, then we may have + * performed an unsupported command. The only thing this should be + * would be a ten byte read where only a six byte read was supported. + * Also, on a system where READ CAPACITY failed, we have have read + * past the end of the disk. + */ + + switch (SCpnt->sense_buffer[2]) { + case ILLEGAL_REQUEST: + if (SCpnt->device->ten) { + SCpnt->device->ten = 0; + scsi_queue_next_request(q, SCpnt); + result = 0; + } else { + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + } + break; + case NOT_READY: + printk(KERN_INFO "Device %x not ready.\n", + SCpnt->request.rq_dev); + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + break; + case MEDIUM_ERROR: + case VOLUME_OVERFLOW: + printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", + SCpnt->host->host_no, (int) SCpnt->channel, + (int) SCpnt->target, (int) SCpnt->lun); + print_command(SCpnt->cmnd); + print_sense("sd", SCpnt); + SCpnt = scsi_end_request(SCpnt, 0, block_sectors); + scsi_queue_next_request(q, SCpnt); + return; + default: + break; + } + } /* driver byte != 0 */ + if (result) { + printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n", + SCpnt->device->host->host_no, + SCpnt->device->channel, + SCpnt->device->id, + SCpnt->device->lun, result); + + if (driver_byte(result) & DRIVER_SENSE) + print_sense("sd", SCpnt); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors); + scsi_queue_next_request(q, SCpnt); + return; + } +} + +/* + * Function: scsi_get_request_dev() + * + * Purpose: Find the upper-level driver that is responsible for this + * request + * + * Arguments: request - I/O request we are preparing to queue. + * + * Lock status: No locks assumed to be held, but as it happens the + * io_request_lock is held when this is called. + * + * Returns: Nothing + * + * Notes: The requests in the request queue may have originated + * from any block device driver. We need to find out which + * one so that we can later form the appropriate command. + */ +struct Scsi_Device_Template *scsi_get_request_dev(struct request *req) +{ + struct Scsi_Device_Template *spnt; + kdev_t dev = req->rq_dev; + int major = MAJOR(dev); + + ASSERT_LOCK(&io_request_lock, 1); + + for (spnt = scsi_devicelist; spnt; spnt = spnt->next) { + /* + * Search for a block device driver that supports this + * major. + */ + if (spnt->blk && spnt->major == major) { + return spnt; + } + } + return NULL; +} + +/* + * Function: scsi_request_fn() + * + * Purpose: Generic version of request function for SCSI hosts. + * + * Arguments: q - Pointer to actual queue. + * + * Returns: Nothing + * + * Lock status: IO request lock assumed to be held when called. + * + * Notes: The theory is that this function is something which individual + * drivers could also supply if they wished to. The problem + * is that we have 30 some odd low-level drivers in the kernel + * tree already, and it would be most difficult to retrofit + * this crap into all of them. Thus this function has the job + * of acting as a generic queue manager for all of those existing + * drivers. + */ +void scsi_request_fn(request_queue_t * q) +{ + struct request *req; + Scsi_Cmnd *SCpnt; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + struct Scsi_Device_Template *STpnt; + + ASSERT_LOCK(&io_request_lock, 1); + + SDpnt = (Scsi_Device *) q->queuedata; + if (!SDpnt) { + panic("Missing device"); + } + SHpnt = SDpnt->host; + + /* + * If the host for this device is in error recovery mode, don't + * do anything at all here. When the host leaves error recovery + * mode, it will automatically restart things and start queueing + * commands again. Same goes if the queue is actually plugged, + * if the device itself is blocked, or if the host is fully + * occupied. + */ + if (SHpnt->in_recovery + || q->plugged) { + return; + } + /* + * To start with, we keep looping until the queue is empty, or until + * the host is no longer able to accept any more requests. + */ + while (1 == 1) { + /* + * If the host cannot accept another request, then quit. + */ + if (SDpnt->device_blocked) { + break; + } + if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked)) { + /* + * If we are unable to process any commands at all for this + * device, then we consider it to be starved. What this means + * is that there are no outstanding commands for this device + * and hence we need a little help getting it started again + * once the host isn't quite so busy. + */ + if (SDpnt->device_busy == 0) { + SDpnt->starved = 1; + SHpnt->some_device_starved = 1; + } + break; + } else { + SDpnt->starved = 0; + } + /* + * Loop through all of the requests in this queue, and find + * one that is queueable. + */ + req = q->current_request; + + /* + * If we couldn't find a request that could be queued, then we + * can also quit. + */ + if (!req) { + break; + } + /* + * Find the actual device driver associated with this command. + * The SPECIAL requests are things like character device or + * ioctls, which did not originate from ll_rw_blk. + */ + if (req->special != NULL) { + STpnt = NULL; + SCpnt = (Scsi_Cmnd *) req->special; + } else { + STpnt = scsi_get_request_dev(req); + if (!STpnt) { + panic("Unable to find device associated with request"); + } + /* + * Now try and find a command block that we can use. + */ + SCpnt = scsi_allocate_device(SDpnt, FALSE); + /* + * If so, we are ready to do something. Bump the count + * while the queue is locked and then break out of the loop. + * Otherwise loop around and try another request. + */ + if (!SCpnt) { + break; + } + SHpnt->host_busy++; + SDpnt->device_busy++; + } + + /* + * FIXME(eric) + * I am not sure where the best place to do this is. We need + * to hook in a place where we are likely to come if in user + * space. Technically the error handling thread should be + * doing this crap, but the error handler isn't used by + * most hosts. + */ + if (SDpnt->was_reset) { + /* + * We need to relock the door, but we might + * be in an interrupt handler. Only do this + * from user space, since we do not want to + * sleep from an interrupt. + */ + if (SDpnt->removable && !in_interrupt()) { + spin_unlock_irq(&io_request_lock); + scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0); + SDpnt->was_reset = 0; + spin_lock_irq(&io_request_lock); + continue; + } + SDpnt->was_reset = 0; + } + /* + * Finally, before we release the lock, we copy the + * request to the command block, and remove the + * request from the request list. Note that we always + * operate on the queue head - there is absolutely no + * reason to search the list, because all of the commands + * in this queue are for the same device. + */ + q->current_request = req->next; + + if (req->special == NULL) { + memcpy(&SCpnt->request, req, sizeof(struct request)); + + /* + * We have copied the data out of the request block - it is now in + * a field in SCpnt. Release the request block. + */ + req->next = NULL; + req->rq_status = RQ_INACTIVE; + wake_up(&wait_for_request); + } + /* + * Now it is finally safe to release the lock. We are not going + * to noodle the request list until this request has been queued + * and we loop back to queue another. + */ + spin_unlock_irq(&io_request_lock); + + if (req->special == NULL) { + /* + * This will do a couple of things: + * 1) Fill in the actual SCSI command. + * 2) Fill in any other upper-level specific fields (timeout). + * + * If this returns 0, it means that the request failed (reading + * past end of disk, reading offline device, etc). This won't + * actually talk to the device, but some kinds of consistency + * checking may cause the request to be rejected immediately. + */ + if (STpnt == NULL) { + STpnt = scsi_get_request_dev(req); + } + /* + * This sets up the scatter-gather table (allocating if + * required). Hosts that need bounce buffers will also + * get those allocated here. + */ + if (!SDpnt->scsi_init_io_fn(SCpnt)) { + continue; + } + /* + * Initialize the actual SCSI command for this request. + */ + if (!STpnt->init_command(SCpnt)) { + continue; + } + } + /* + * Finally, initialize any error handling parameters, and set up + * the timers for timeouts. + */ + scsi_init_cmd_errh(SCpnt); + + /* + * Dispatch the command to the low-level driver. + */ + scsi_dispatch_cmd(SCpnt); + + /* + * Now we need to grab the lock again. We are about to mess with + * the request queue and try to find another command. + */ + spin_lock_irq(&io_request_lock); + } + + /* + * If this is a single-lun device, and we are currently finished + * with this device, then see if we need to get another device + * started. + */ + if (SDpnt->single_lun + && q->current_request == NULL + && SDpnt->device_busy == 0) { + request_queue_t *q; + + for (SDpnt = SHpnt->host_queue; + SDpnt; + SDpnt = SDpnt->next) { + if (((SHpnt->can_queue > 0) + && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked) + || (SDpnt->device_blocked)) { + break; + } + q = &SDpnt->request_queue; + q->request_fn(q); + } + } +} diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c --- v2.3.31/linux/drivers/scsi/scsi_merge.c Wed Dec 31 16:00:00 1969 +++ linux/drivers/scsi/scsi_merge.c Mon Dec 13 14:10:09 1999 @@ -0,0 +1,771 @@ +/* + * scsi_merge.c Copyright (C) 1999 Eric Youngdale + * + * SCSI queueing library. + * Initial versions: Eric Youngdale (eric@andante.org). + * Based upon conversations with large numbers + * of people at Linux Expo. + */ + +/* + * This file contains queue management functions that are used by SCSI. + * Typically this is used for several purposes. First, we need to ensure + * that commands do not grow so large that they cannot be handled all at + * once by a host adapter. The various flavors of merge functions included + * here serve this purpose. + * + * Note that it would be quite trivial to allow the low-level driver the + * flexibility to define it's own queue handling functions. For the time + * being, the hooks are not present. Right now we are just using the + * data in the host template as an indicator of how we should be handling + * queues, and we select routines that are optimized for that purpose. + * + * Some hosts do not impose any restrictions on the size of a request. + * In such cases none of the merge functions in this file are called, + * and we allow ll_rw_blk to merge requests in the default manner. + * This isn't guaranteed to be optimal, but it should be pretty darned + * good. If someone comes up with ideas of better ways of managing queues + * to improve on the default behavior, then certainly fit it into this + * scheme in whatever manner makes the most sense. Please note that + * since each device has it's own queue, we have considerable flexibility + * in queue management. + */ + +#define __NO_VERSION__ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define __KERNEL_SYSCALLS__ + +#include + +#include +#include +#include + +#include "scsi.h" +#include "hosts.h" +#include "constants.h" +#include + +#ifdef CONFIG_SCSI_DEBUG_QUEUES +/* + * Enable a bunch of additional consistency checking. Turn this off + * if you are benchmarking. + */ + +static int dump_stats(struct request *req, + int use_clustering, + int dma_host, + int segments) +{ + struct buffer_head *bh; + + /* + * Dump the information that we have. We know we have an + * inconsistency. + */ + printk("nr_segments is %lx\n", req->nr_segments); + printk("counted segments is %x\n", segments); + printk("Flags %d %d\n", use_clustering, dma_host); + for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) + { + printk("Segment 0x%p, blocks %d, addr 0x%lx\n", + bh, + bh->b_size >> 9, + virt_to_phys(bh->b_data - 1)); + } + panic("Ththththaats all folks. Too dangerous to continue.\n"); +} + + +/* + * Simple sanity check that we will use for the first go around + * in order to ensure that we are doing the counting correctly. + * This can be removed for optimization. + */ +#define SANITY_CHECK(req, _CLUSTER, _DMA) \ + if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) ) \ + { \ + __label__ here; \ +here: \ + printk("Incorrect segment count at 0x%p", &&here); \ + dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \ + } +#else +#define SANITY_CHECK(req, _CLUSTER, _DMA) +#endif + +/* + * FIXME(eric) - the original disk code disabled clustering for MOD + * devices. I have no idea why we thought this was a good idea - my + * guess is that it was an attempt to limit the size of requests to MOD + * devices. + */ +#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering && \ + SD->type != TYPE_MOD) + +/* + * This entire source file deals with the new queueing code. + */ + +/* + * Function: __count_segments() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: Count of the number of SG segments for the request. + * + * Lock status: + * + * Notes: This is only used for diagnostic purposes. + */ +__inline static int __count_segments(struct request *req, + int use_clustering, + int dma_host) +{ + int ret = 1; + struct buffer_head *bh; + + for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + ret++; + } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) { + /* + * This one is OK. Let it go. + */ + continue; + } + ret++; + } else { + ret++; + } + } + return ret; +} + +/* + * Function: __scsi_merge_fn() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Some drivers have limited scatter-gather table sizes, and + * thus they cannot queue an infinitely large command. This + * function is called from ll_rw_blk before it attempts to merge + * a new block into a request to make sure that the request will + * not become too large. + * + * This function is not designed to be directly called. Instead + * it should be referenced from other functions where the + * use_clustering and dma_host parameters should be integer + * constants. The compiler should thus be able to properly + * optimize the code, eliminating stuff that is irrelevant. + * It is more maintainable to do this way with a single function + * than to have 4 separate functions all doing roughly the + * same thing. + */ +__inline static int __scsi_merge_fn(request_queue_t * q, + struct request *req, + struct buffer_head *bh, + int use_clustering, + int dma_host) +{ + unsigned int sector, count; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + + count = bh->b_size >> 9; + sector = bh->b_rsector; + + /* + * We come in here in one of two cases. The first is that we + * are checking to see if we can add the buffer to the end of the + * request, the other is to see if we should add the request to the + * start. + */ + if (req->sector + req->nr_sectors == sector) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + goto new_segment; + } + if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) { + /* + * This one is OK. Let it go. + */ + return 1; + } + } + goto new_segment; + } else if (req->sector - count == sector) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + goto new_segment; + } + if (CONTIGUOUS_BUFFERS(bh, req->bh)) { + /* + * This one is OK. Let it go. + */ + return 1; + } + } + goto new_segment; + } else { + panic("Attempt to merge sector that doesn't belong"); + } + new_segment: + if (req->nr_segments < SHpnt->sg_tablesize) { + /* + * This will form the start of a new segment. Bump the + * counter. + */ + req->nr_segments++; + return 1; + } else { + return 0; + } +} + +/* + * Function: scsi_merge_fn_() + * + * Purpose: queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Optimized for different cases depending upon whether + * ISA DMA is in use and whether clustering should be used. + */ +#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA) \ +static int _FUNCTION(request_queue_t * q, \ + struct request * req, \ + struct buffer_head * bh) \ +{ \ + int ret; \ + SANITY_CHECK(req, _CLUSTER, _DMA); \ + ret = __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \ + return ret; \ +} + +MERGEFCT(scsi_merge_fn_, 0, 0) +MERGEFCT(scsi_merge_fn_d, 0, 1) +MERGEFCT(scsi_merge_fn_c, 1, 0) +MERGEFCT(scsi_merge_fn_dc, 1, 1) +/* + * Function: __scsi_merge_requests_fn() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * next - 2nd request that we might want to combine with req + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 if it is OK to merge the two requests. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Some drivers have limited scatter-gather table sizes, and + * thus they cannot queue an infinitely large command. This + * function is called from ll_rw_blk before it attempts to merge + * a new block into a request to make sure that the request will + * not become too large. + * + * This function is not designed to be directly called. Instead + * it should be referenced from other functions where the + * use_clustering and dma_host parameters should be integer + * constants. The compiler should thus be able to properly + * optimize the code, eliminating stuff that is irrelevant. + * It is more maintainable to do this way with a single function + * than to have 4 separate functions all doing roughly the + * same thing. + */ +__inline static int __scsi_merge_requests_fn(request_queue_t * q, + struct request *req, + struct request *next, + int use_clustering, + int dma_host) +{ + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + + /* + * If the two requests together are too large (even assuming that we + * can merge the boundary requests into one segment, then don't + * allow the merge. + */ + if (req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) { + return 0; + } + /* + * The main question is whether the two segments at the boundaries + * would be considered one or two. + */ + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + goto dont_combine; + } + if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) { + /* + * This one is OK. Let it go. + */ + req->nr_segments += next->nr_segments - 1; + return 1; + } + } + dont_combine: + /* + * We know that the two requests at the boundary should not be combined. + * Make sure we can fix something that is the sum of the two. + * A slightly stricter test than we had above. + */ + if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) { + return 0; + } else { + /* + * This will form the start of a new segment. Bump the + * counter. + */ + req->nr_segments += next->nr_segments; + return 1; + } +} + +/* + * Function: scsi_merge_requests_fn_() + * + * Purpose: queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Optimized for different cases depending upon whether + * ISA DMA is in use and whether clustering should be used. + */ +#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA) \ +static int _FUNCTION(request_queue_t * q, \ + struct request * req, \ + struct request * next) \ +{ \ + int ret; \ + SANITY_CHECK(req, _CLUSTER, _DMA); \ + ret = __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \ + return ret; \ +} + +MERGEREQFCT(scsi_merge_requests_fn_, 0, 0) +MERGEREQFCT(scsi_merge_requests_fn_d, 0, 1) +MERGEREQFCT(scsi_merge_requests_fn_c, 1, 0) +MERGEREQFCT(scsi_merge_requests_fn_dc, 1, 1) +/* + * Function: __init_io() + * + * Purpose: Prototype for io initialize function. + * + * Arguments: SCpnt - Command descriptor we wish to initialize + * sg_count_valid - 1 if the sg count in the req is valid. + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 on success. + * + * Lock status: + * + * Notes: Only the SCpnt argument should be a non-constant variable. + * This function is designed in such a way that it will be + * invoked from a series of small stubs, each of which would + * be optimized for specific circumstances. + * + * The advantage of this is that hosts that don't do DMA + * get versions of the function that essentially don't have + * any of the DMA code. Same goes for clustering - in the + * case of hosts with no need for clustering, there is no point + * in a whole bunch of overhead. + * + * Finally, in the event that a host has set can_queue to SG_ALL + * implying that there is no limit to the length of a scatter + * gather list, the sg count in the request won't be valid + * (mainly because we don't need queue management functions + * which keep the tally uptodate. + */ +__inline static int __init_io(Scsi_Cmnd * SCpnt, + int sg_count_valid, + int use_clustering, + int dma_host) +{ + struct buffer_head *bh; + struct buffer_head *bhprev; + char *buff; + int count; + int i; + struct request *req; + struct scatterlist *sgpnt; + int this_count; + + /* + * FIXME(eric) - don't inline this - it doesn't depend on the + * integer flags. Come to think of it, I don't think this is even + * needed any more. Need to play with it and see if we hit the + * panic. If not, then don't bother. + */ + if (!SCpnt->request.bh) { + /* + * Case of page request (i.e. raw device), or unlinked buffer + * Typically used for swapping, but this isn't how we do + * swapping any more. + */ + panic("I believe this is dead code. If we hit this, I was wrong"); +#if 0 + SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9; + SCpnt->request_buffer = SCpnt->request.buffer; + SCpnt->use_sg = 0; + /* + * FIXME(eric) - need to handle DMA here. + */ +#endif + return 1; + } + req = &SCpnt->request; + /* + * First we need to know how many scatter gather segments are needed. + */ + if (!sg_count_valid) { + count = __count_segments(req, use_clustering, dma_host); + } else { + count = req->nr_segments; + } + + /* + * If the dma pool is nearly empty, then queue a minimal request + * with a single segment. Typically this will satisfy a single + * buffer. + */ + if (dma_host && scsi_dma_free_sectors <= 10) { + this_count = SCpnt->request.current_nr_sectors; + goto single_segment; + } + /* + * Don't bother with scatter-gather if there is only one segment. + */ + if (count == 1) { + this_count = SCpnt->request.nr_sectors; + goto single_segment; + } + SCpnt->use_sg = count; + + /* + * Allocate the actual scatter-gather table itself. + * scsi_malloc can only allocate in chunks of 512 bytes + */ + SCpnt->sglist_len = (SCpnt->use_sg + * sizeof(struct scatterlist) + 511) & ~511; + + sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len); + + /* + * Now fill the scatter-gather table. + */ + if (!sgpnt) { + /* + * If we cannot allocate the scatter-gather table, then + * simply write the first buffer all by itself. + */ + printk("Warning - running *really* short on DMA buffers\n"); + this_count = SCpnt->request.current_nr_sectors; + goto single_segment; + } + /* + * Next, walk the list, and fill in the addresses and sizes of + * each segment. + */ + memset(sgpnt, 0, SCpnt->sglist_len); + SCpnt->request_buffer = (char *) sgpnt; + SCpnt->request_bufflen = 0; + bhprev = NULL; + + for (count = 0, bh = SCpnt->request.bh; + bh; bh = bh->b_reqnext) { + if (use_clustering && bhprev != NULL) { + if (dma_host && + virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) { + /* Nothing - fall through */ + } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) { + /* + * This one is OK. Let it go. + */ + sgpnt[count - 1].length += bh->b_size; + if (!dma_host) { + SCpnt->request_bufflen += bh->b_size; + } + bhprev = bh; + continue; + } + } + count++; + sgpnt[count - 1].address = bh->b_data; + sgpnt[count - 1].length += bh->b_size; + if (!dma_host) { + SCpnt->request_bufflen += bh->b_size; + } + bhprev = bh; + } + + /* + * Verify that the count is correct. + */ + if (count != SCpnt->use_sg) { + panic("Incorrect sg segment count"); + } + if (!dma_host) { + return 1; + } + /* + * Now allocate bounce buffers, if needed. + */ + SCpnt->request_bufflen = 0; + for (i = 0; i < count; i++) { + SCpnt->request_bufflen += sgpnt[i].length; + if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 > + ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) { + sgpnt[i].alt_address = sgpnt[i].address; + sgpnt[i].address = + (char *) scsi_malloc(sgpnt[i].length); + /* + * If we cannot allocate memory for this DMA bounce + * buffer, then queue just what we have done so far. + */ + if (sgpnt[i].address == NULL) { + printk("Warning - running low on DMA memory\n"); + SCpnt->request_bufflen -= sgpnt[i].length; + SCpnt->use_sg = i; + if (i == 0) { + panic("DMA pool exhausted"); + } + break; + } + if (SCpnt->request.cmd == WRITE) { + memcpy(sgpnt[i].address, sgpnt[i].alt_address, + sgpnt[i].length); + } + } + } + return 1; + + single_segment: + /* + * Come here if for any reason we choose to do this as a single + * segment. Possibly the entire request, or possibly a small + * chunk of the entire request. + */ + bh = SCpnt->request.bh; + buff = SCpnt->request.buffer; + + if (dma_host) { + /* + * Allocate a DMA bounce buffer. If the allocation fails, fall + * back and allocate a really small one - enough to satisfy + * the first buffer. + */ + if (virt_to_phys(SCpnt->request.bh->b_data) + + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) { + buff = (char *) scsi_malloc(this_count << 9); + if (!buff) { + printk("Warning - running low on DMA memory\n"); + this_count = SCpnt->request.current_nr_sectors; + buff = (char *) scsi_malloc(this_count << 9); + if (!buff) { + panic("Unable to allocate DMA buffer\n"); + } + } + if (SCpnt->request.cmd == WRITE) + memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9); + } + } + SCpnt->request_bufflen = this_count << 9; + SCpnt->request_buffer = buff; + SCpnt->use_sg = 0; + return 1; +} + +#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA) \ +static int _FUNCTION(Scsi_Cmnd * SCpnt) \ +{ \ + return __init_io(SCpnt, _VALID, _CLUSTER, _DMA); \ +} + +/* + * ll_rw_blk.c now keeps track of the number of segments in + * a request. Thus we don't have to do it any more here. + * We always force "_VALID" to 1. Eventually clean this up + * and get rid of the extra argument. + */ +#if 0 +/* Old definitions */ +INITIO(scsi_init_io_, 0, 0, 0) +INITIO(scsi_init_io_d, 0, 0, 1) +INITIO(scsi_init_io_c, 0, 1, 0) +INITIO(scsi_init_io_dc, 0, 1, 1) + +/* Newer redundant definitions. */ +INITIO(scsi_init_io_, 1, 0, 0) +INITIO(scsi_init_io_d, 1, 0, 1) +INITIO(scsi_init_io_c, 1, 1, 0) +INITIO(scsi_init_io_dc, 1, 1, 1) +#endif + +INITIO(scsi_init_io_v, 1, 0, 0) +INITIO(scsi_init_io_vd, 1, 0, 1) +INITIO(scsi_init_io_vc, 1, 1, 0) +INITIO(scsi_init_io_vdc, 1, 1, 1) +/* + * Function: initialize_merge_fn() + * + * Purpose: Initialize merge function for a host + * + * Arguments: SHpnt - Host descriptor. + * + * Returns: Nothing. + * + * Lock status: + * + * Notes: + */ +void initialize_merge_fn(Scsi_Device * SDpnt) +{ + request_queue_t *q; + struct Scsi_Host *SHpnt; + SHpnt = SDpnt->host; + + q = &SDpnt->request_queue; + + /* + * If the host has already selected a merge manager, then don't + * pick a new one. + */ + if (q->merge_fn != NULL) { + return; + } + /* + * If this host has an unlimited tablesize, then don't bother with a + * merge manager. The whole point of the operation is to make sure + * that requests don't grow too large, and this host isn't picky. + */ + if (SHpnt->sg_tablesize == SG_ALL) { + if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_v; + } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vd; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vc; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vdc; + } + return; + } + /* + * Now pick out the correct function. + */ + if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + q->merge_fn = scsi_merge_fn_; + q->merge_requests_fn = scsi_merge_requests_fn_; + SDpnt->scsi_init_io_fn = scsi_init_io_v; + } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + q->merge_fn = scsi_merge_fn_d; + q->merge_requests_fn = scsi_merge_requests_fn_d; + SDpnt->scsi_init_io_fn = scsi_init_io_vd; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + q->merge_fn = scsi_merge_fn_c; + q->merge_requests_fn = scsi_merge_requests_fn_c; + SDpnt->scsi_init_io_fn = scsi_init_io_vc; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + q->merge_fn = scsi_merge_fn_dc; + q->merge_requests_fn = scsi_merge_requests_fn_dc; + SDpnt->scsi_init_io_fn = scsi_init_io_vdc; + } +} diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_obsolete.c linux/drivers/scsi/scsi_obsolete.c --- v2.3.31/linux/drivers/scsi/scsi_obsolete.c Tue Sep 7 12:14:06 1999 +++ linux/drivers/scsi/scsi_obsolete.c Sun Dec 12 23:04:20 1999 @@ -13,7 +13,7 @@ * Tommy Thorn * Thomas Wuensche * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * @@ -84,13 +84,15 @@ extern void scsi_old_done(Scsi_Cmnd * SCpnt); int update_timeout(Scsi_Cmnd *, int); extern void scsi_old_times_out(Scsi_Cmnd * SCpnt); -extern void internal_cmnd(Scsi_Cmnd * SCpnt); + +extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt); extern volatile struct Scsi_Host *host_active; #define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \ || (HOST->can_queue && HOST->host_busy >= HOST->can_queue)) -static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 255, 0}; +static unsigned char generic_sense[6] = +{REQUEST_SENSE, 0, 0, 0, 255, 0}; /* * This is the number of clock ticks we should wait before we time out @@ -232,7 +234,13 @@ SCpnt->use_sg = 0; SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); SCpnt->result = 0; - internal_cmnd(SCpnt); + /* + * Ugly, ugly. The newer interfaces all assume that the lock + * isn't held. Mustn't disappoint, or we deadlock the system. + */ + spin_unlock_irq(&io_request_lock); + scsi_dispatch_cmd(SCpnt); + spin_lock_irq(&io_request_lock); } @@ -443,7 +451,7 @@ __LINE__); } } - /* end WAS_SENSE */ + /* end WAS_SENSE */ else { #ifdef DEBUG printk("COMMAND COMPLETE message returned, " @@ -628,7 +636,14 @@ SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; SCpnt->result = 0; - internal_cmnd(SCpnt); + /* + * Ugly, ugly. The newer interfaces all + * assume that the lock isn't held. Mustn't + * disappoint, or we deadlock the system. + */ + spin_unlock_irq(&io_request_lock); + scsi_dispatch_cmd(SCpnt); + spin_lock_irq(&io_request_lock); } break; default: @@ -641,22 +656,18 @@ #endif host->host_busy--; /* Indicate that we are free */ - if (host->block && host->host_busy == 0) { - host_active = NULL; - - /* For block devices "wake_up" is done in end_scsi_request */ - if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) { - struct Scsi_Host *next; - - for (next = host->block; next != host; next = next->block) - wake_up(&next->host_wait); - } - } - wake_up(&host->host_wait); SCpnt->result = result | ((exit & 0xff) << 24); SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; + /* + * The upper layers assume the lock isn't held. We mustn't + * disappoint them. When the new error handling code is in + * use, the upper code is run from a bottom half handler, so + * it isn't an issue. + */ + spin_unlock_irq(&io_request_lock); SCpnt->done(SCpnt); + spin_lock_irq(&io_request_lock); } #undef CMD_FINISHED #undef REDO @@ -925,8 +936,7 @@ if (host->last_reset - jiffies > 20UL * HZ) host->last_reset = jiffies; } else { - if (!host->block) - host->host_busy++; + host->host_busy++; host->last_reset = jiffies; host->resetting = 1; SCpnt->flags |= (WAS_RESET | IS_RESETTING); @@ -939,8 +949,7 @@ if (time_before(host->last_reset, jiffies) || (time_after(host->last_reset, jiffies + 20 * HZ))) host->last_reset = jiffies; - if (!host->block) - host->host_busy--; + host->host_busy--; } if (reset_flags & SCSI_RESET_SYNCHRONOUS) SCpnt->flags &= ~SYNC_RESET; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_queue.c linux/drivers/scsi/scsi_queue.c --- v2.3.31/linux/drivers/scsi/scsi_queue.c Tue Sep 7 12:14:06 1999 +++ linux/drivers/scsi/scsi_queue.c Sun Dec 12 23:04:20 1999 @@ -56,14 +56,6 @@ static const char RCSid[] = "$Header: /mnt/ide/home/eric/CVSROOT/linux/drivers/scsi/scsi_queue.c,v 1.1 1997/10/21 11:16:38 eric Exp $"; -/* - * Lock used to prevent more than one process from frobbing the list at the - * same time. FIXME(eric) - there should be separate spinlocks for each host. - * This will reduce contention. - */ - -spinlock_t scsi_mlqueue_lock = SPIN_LOCK_UNLOCKED; -spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED; /* * Function: scsi_mlqueue_insert() @@ -73,6 +65,8 @@ * Arguments: cmd - command that we are adding to queue. * reason - why we are inserting command to queue. * + * Lock status: Assumed that lock is not held upon entry. + * * Returns: Nothing. * * Notes: We do this for one of two cases. Either the host is busy @@ -84,8 +78,6 @@ */ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) { - Scsi_Cmnd *cpnt; - unsigned long flags; struct Scsi_Host *host; SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd)); @@ -126,12 +118,12 @@ * If a host is inactive and cannot queue any commands, I don't see * how things could possibly work anyways. */ - if (cmd->device->device_busy == 0) { + if (cmd->device->device_blocked == 0) { if (scsi_retry_command(cmd) == 0) { return 0; } } - cmd->device->device_busy = TRUE; + cmd->device->device_blocked = TRUE; cmd->device_wait = TRUE; } @@ -143,142 +135,9 @@ cmd->bh_next = NULL; /* - * As a performance enhancement, look to see whether the list is - * empty. If it is, then we can just atomicly insert the command - * in the list and return without locking. + * Insert this command at the head of the queue for it's device. + * It will go before all other commands that are already in the queue. */ - if (host->pending_commands == NULL) { - cpnt = xchg(&host->pending_commands, cmd); - if (cpnt == NULL) { - return 0; - } - /* - * Rats. Something slipped in while we were exchanging. - * Swap it back and fall through to do it the hard way. - */ - cmd = xchg(&host->pending_commands, cpnt); - - } - /* - * Next append the command to the list of pending commands. - */ - spin_lock_irqsave(&scsi_mlqueue_lock, flags); - for (cpnt = host->pending_commands; cpnt && cpnt->bh_next; - cpnt = cpnt->bh_next) { - continue; - } - if (cpnt != NULL) { - cpnt->bh_next = cmd; - } else { - host->pending_commands = cmd; - } - - spin_unlock_irqrestore(&scsi_mlqueue_lock, flags); - return 0; -} - -/* - * Function: scsi_mlqueue_finish() - * - * Purpose: Try and queue commands from the midlevel queue. - * - * Arguments: host - host that just finished a command. - * device - device that just finished a command. - * - * Returns: Nothing. - * - * Notes: This could be called either from an interrupt context or a - * normal process context. - */ -int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device) -{ - Scsi_Cmnd *cpnt; - unsigned long flags; - Scsi_Cmnd *next; - Scsi_Cmnd *prev; - int reason = 0; - int rtn; - - SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish starting\n")); - /* - * First, clear the flag for the host/device. We will then start - * pushing commands through until either something else blocks, or - * the queue is empty. - */ - if (host->host_blocked) { - reason = SCSI_MLQUEUE_HOST_BUSY; - host->host_blocked = FALSE; - } - if (device->device_busy) { - reason = SCSI_MLQUEUE_DEVICE_BUSY; - device->device_busy = FALSE; - } - /* - * Walk the list of commands to see if there is anything we can - * queue. This probably needs to be optimized for performance at - * some point. - */ - prev = NULL; - spin_lock_irqsave(&scsi_mlqueue_remove_lock, flags); - for (cpnt = host->pending_commands; cpnt; cpnt = next) { - next = cpnt->bh_next; - /* - * First, see if this command is suitable for being retried now. - */ - if (reason == SCSI_MLQUEUE_HOST_BUSY) { - /* - * The host was busy, but isn't any more. Thus we may be - * able to queue the command now, but we were waiting for - * the device, then we should keep waiting. Similarily, if - * the device is now busy, we should also keep waiting. - */ - if ((cpnt->host_wait == FALSE) - || (device->device_busy == TRUE)) { - prev = cpnt; - continue; - } - } - if (reason == SCSI_MLQUEUE_DEVICE_BUSY) { - /* - * The device was busy, but isn't any more. Thus we may be - * able to queue the command now, but we were waiting for - * the host, then we should keep waiting. Similarily, if - * the host is now busy, we should also keep waiting. - */ - if ((cpnt->device_wait == FALSE) - || (host->host_blocked == TRUE)) { - prev = cpnt; - continue; - } - } - /* - * First, remove the command from the list. - */ - if (prev == NULL) { - host->pending_commands = next; - } else { - prev->bh_next = next; - } - cpnt->bh_next = NULL; - - rtn = scsi_retry_command(cpnt); - - /* - * If we got a non-zero return value, it means that the host rejected - * the command. The internal_cmnd function will have added the - * command back to the end of the list, so we don't have anything - * more to do here except return. - */ - if (rtn) { - spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags); - SCSI_LOG_MLQUEUE(1, printk("Unable to remove command %p from mlqueue\n", cpnt)); - goto finish; - } - SCSI_LOG_MLQUEUE(1, printk("Removed command %p from mlqueue\n", cpnt)); - } - - spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags); -finish: - SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish returning\n")); + scsi_insert_special_cmd(cmd, 1); return 0; } diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/scsi_syms.c linux/drivers/scsi/scsi_syms.c --- v2.3.31/linux/drivers/scsi/scsi_syms.c Thu Nov 11 20:11:48 1999 +++ linux/drivers/scsi/scsi_syms.c Sun Dec 12 23:04:20 1999 @@ -33,8 +33,8 @@ * modules. */ -extern void print_command (unsigned char *command); -extern void print_sense(const char * devclass, Scsi_Cmnd * SCpnt); +extern void print_command(unsigned char *command); +extern void print_sense(const char *devclass, Scsi_Cmnd * SCpnt); extern const char *const scsi_device_types[]; @@ -60,13 +60,12 @@ EXPORT_SYMBOL(scsi_dma_free_sectors); EXPORT_SYMBOL(kernel_scsi_ioctl); EXPORT_SYMBOL(scsi_need_isa_buffer); -EXPORT_SYMBOL(scsi_request_queueable); EXPORT_SYMBOL(scsi_release_command); EXPORT_SYMBOL(print_Scsi_Cmnd); EXPORT_SYMBOL(scsi_block_when_processing_errors); EXPORT_SYMBOL(scsi_mark_host_reset); EXPORT_SYMBOL(scsi_ioctl_send_command); -#if defined(CONFIG_SCSI_LOGGING) /* { */ +#if defined(CONFIG_SCSI_LOGGING) /* { */ EXPORT_SYMBOL(scsi_logging_level); #endif @@ -75,6 +74,9 @@ EXPORT_SYMBOL(proc_print_scsidevice); EXPORT_SYMBOL(proc_scsi); +EXPORT_SYMBOL(scsi_io_completion); +EXPORT_SYMBOL(scsi_end_request); + /* * These are here only while I debug the rest of the scsi stuff. */ @@ -83,5 +85,4 @@ EXPORT_SYMBOL(scsi_devicelist); EXPORT_SYMBOL(scsi_device_types); - -#endif /* CONFIG_MODULES */ +#endif /* CONFIG_MODULES */ diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sd.c linux/drivers/scsi/sd.c --- v2.3.31/linux/drivers/scsi/sd.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/scsi/sd.c Mon Dec 13 14:08:40 1999 @@ -1,6 +1,6 @@ /* * sd.c Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * Linux scsi disk driver * Initial versions: Drew Eckhardt @@ -8,11 +8,11 @@ * * * - * Modified by Eric Youngdale ericy@cais.com to + * Modified by Eric Youngdale ericy@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * - * Modified by Eric Youngdale eric@aib.com to support loadable + * Modified by Eric Youngdale eric@andante.org to support loadable * low-level scsi drivers. * * Modified by Jirka Hanika geo@ff.cuni.cz to support more @@ -96,13 +96,15 @@ static int sd_init_onedisk(int); -static void requeue_sd_request(Scsi_Cmnd * SCpnt); static int sd_init(void); static void sd_finish(void); static int sd_attach(Scsi_Device *); static int sd_detect(Scsi_Device *); static void sd_detach(Scsi_Device *); +static void rw_intr(Scsi_Cmnd * SCpnt); + +static int sd_init_command(Scsi_Cmnd *); static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { @@ -196,12 +198,170 @@ } struct Scsi_Device_Template sd_template = { - NULL, "disk", "sd", NULL, TYPE_DISK, - SCSI_DISK0_MAJOR, 0, 0, 0, 1, - sd_detect, sd_init, - sd_finish, sd_attach, sd_detach + name:"disk", + tag:"sd", + scsi_type:TYPE_DISK, + major:SCSI_DISK0_MAJOR, + blk:1, + detect:sd_detect, + init:sd_init, + finish:sd_finish, + attach:sd_attach, + detach:sd_detach, + init_command:sd_init_command, }; +static request_queue_t *sd_find_queue(kdev_t dev) +{ + Scsi_Disk *dpnt; + int target; + target = DEVICE_NR(dev); + + dpnt = &rscsi_disks[target]; + if (!dpnt) + return NULL; /* No such device */ + return &dpnt->device->request_queue; +} + +static int sd_init_command(Scsi_Cmnd * SCpnt) +{ + int dev, devm, block, this_count; + Scsi_Disk *dpnt; + char nbuff[6]; + + devm = MINOR(SCpnt->request.rq_dev); + dev = DEVICE_NR(SCpnt->request.rq_dev); + + block = SCpnt->request.sector; + this_count = SCpnt->request_bufflen >> 9; + + SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block)); + + dpnt = &rscsi_disks[dev]; + if (devm >= (sd_template.dev_max << 4) || + !dpnt || + !dpnt->device->online || + block + SCpnt->request.nr_sectors > sd[devm].nr_sects) { + SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); + return 0; + } + block += sd[devm].start_sect; + if (dpnt->device->changed) { + /* + * quietly refuse to do anything to a changed disc until the changed + * bit has been reset + */ + /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + SCSI_LOG_HLQUEUE(2, sd_devname(devm, nbuff)); + SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n", + nbuff, dev, block)); + + /* + * If we have a 1K hardware sectorsize, prevent access to single + * 512 byte sectors. In theory we could handle this - in fact + * the scsi cdrom driver must be able to handle this because + * we typically use 1K blocksizes, and cdroms typically have + * 2K hardware sectorsizes. Of course, things are simpler + * with the cdrom, since it is read-only. For performance + * reasons, the filesystems should be able to handle this + * and not force the scsi disk driver to use bounce buffers + * for this. + */ + if (dpnt->device->sector_size == 1024) { + if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { + printk("sd.c:Bad block number requested"); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 1; + this_count = this_count >> 1; + } + } + if (dpnt->device->sector_size == 2048) { + if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { + printk("sd.c:Bad block number requested"); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 2; + this_count = this_count >> 2; + } + } + switch (SCpnt->request.cmd) { + case WRITE: + if (!dpnt->device->writeable) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + SCpnt->cmnd[0] = WRITE_6; + break; + case READ: + SCpnt->cmnd[0] = READ_6; + break; + default: + panic("Unknown sd command %d\n", SCpnt->request.cmd); + } + + SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", + nbuff, + (SCpnt->request.cmd == WRITE) ? "writing" : "reading", + this_count, SCpnt->request.nr_sectors)); + + SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0; + + if (((this_count > 0xff) || (block > 0x1fffff)) || SCpnt->device->ten) { + if (this_count > 0xffff) + this_count = 0xffff; + + SCpnt->cmnd[0] += READ_10 - READ_6; + SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; + SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; + SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; + SCpnt->cmnd[5] = (unsigned char) block & 0xff; + SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; + SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; + SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; + } else { + if (this_count > 0xff) + this_count = 0xff; + + SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); + SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); + SCpnt->cmnd[3] = (unsigned char) block & 0xff; + SCpnt->cmnd[4] = (unsigned char) this_count; + SCpnt->cmnd[5] = 0; + } + + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + SCpnt->transfersize = dpnt->device->sector_size; + SCpnt->underflow = this_count << 9; + + SCpnt->allowed = MAX_RETRIES; + SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ? + SD_TIMEOUT : SD_MOD_TIMEOUT); + + /* + * This is the completion routine we use. This is matched in terms + * of capability to this function. + */ + SCpnt->done = rw_intr; + + /* + * This indicates that the command is ready from our end to be + * queued. + */ + return 1; +} + static int sd_open(struct inode *inode, struct file *filp) { int target; @@ -245,8 +405,9 @@ return -EROFS; } /* - * It is possible that the disk changing stuff resulted in the device being taken - * offline. If this is the case, report this to the user, and don't pretend that + * It is possible that the disk changing stuff resulted in the device + * being taken offline. If this is the case, report this to the user, + * and don't pretend that * the open actually succeeded. */ if (!rscsi_disks[target].device->online) { @@ -359,7 +520,7 @@ int good_sectors = (result == 0 ? this_count : 0); int block_sectors = 1; - sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff); + SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff)); SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff, SCpnt->host->host_no, @@ -369,213 +530,57 @@ /* Handle MEDIUM ERRORs that indicate partial success. Since this is a - relatively rare error condition, no care is taken to avoid unnecessary - additional work such as memcpy's that could be avoided. - */ - - if (driver_byte(result) != 0 && /* An error occurred */ - SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */ - SCpnt->sense_buffer[2] == MEDIUM_ERROR) { - long error_sector = (SCpnt->sense_buffer[3] << 24) | - (SCpnt->sense_buffer[4] << 16) | - (SCpnt->sense_buffer[5] << 8) | - SCpnt->sense_buffer[6]; - int sector_size = - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size; - if (SCpnt->request.bh != NULL) - block_sectors = SCpnt->request.bh->b_size >> 9; - if (sector_size == 1024) { - error_sector <<= 1; - if (block_sectors < 2) - block_sectors = 2; - } else if (sector_size == 2048) { - error_sector <<= 2; - if (block_sectors < 4) - block_sectors = 4; - } else if (sector_size == 256) - error_sector >>= 1; - error_sector -= sd[SD_PARTITION(SCpnt->request.rq_dev)].start_sect; - error_sector &= ~(block_sectors - 1); - good_sectors = error_sector - SCpnt->request.sector; - if (good_sectors < 0 || good_sectors >= this_count) - good_sectors = 0; - } - /* - * First case : we assume that the command succeeded. One of two things - * will happen here. Either we will be finished, or there will be more - * sectors that we were unable to read last time. - */ - - if (good_sectors > 0) { - - SCSI_LOG_HLCOMPLETE(1, printk("%s : %ld sectors remain.\n", nbuff, - SCpnt->request.nr_sectors)); - SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg)); - - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - -#if 0 - SCSI_LOG_HLCOMPLETE(3, printk(":%p %p %d\n", sgpnt[i].alt_address, sgpnt[i].address, - sgpnt[i].length)); -#endif - - if (sgpnt[i].alt_address) { - if (SCpnt->request.cmd == READ) - memcpy(sgpnt[i].alt_address, sgpnt[i].address, - sgpnt[i].length); - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - - /* Free list of scatter-gather pointers */ - scsi_free(SCpnt->buffer, SCpnt->sglist_len); - } else { - if (SCpnt->buffer != SCpnt->request.buffer) { - SCSI_LOG_HLCOMPLETE(3, printk("nosg: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - - if (SCpnt->request.cmd == READ) - memcpy(SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen); - scsi_free(SCpnt->buffer, SCpnt->bufflen); - } - } - /* - * If multiple sectors are requested in one buffer, then - * they will have been finished off by the first command. - * If not, then we have a multi-buffer command. - */ - if (SCpnt->request.nr_sectors > this_count) { - SCpnt->request.errors = 0; - - if (!SCpnt->request.bh) { - SCSI_LOG_HLCOMPLETE(2, printk("%s : handling page request, no buffer\n", - nbuff)); - - /* - * The SCpnt->request.nr_sectors field is always done in - * 512 byte sectors, even if this really isn't the case. - */ - panic("sd.c: linked page request (%lx %x)", - SCpnt->request.sector, this_count); - } - } - SCpnt = end_scsi_request(SCpnt, 1, good_sectors); - if (result == 0) { - requeue_sd_request(SCpnt); - return; - } - } - if (good_sectors == 0) { - - /* Free up any indirection buffers we allocated for DMA purposes. */ - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - SCSI_LOG_HLCOMPLETE(3, printk("err: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - if (sgpnt[i].alt_address) { - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ - } else { - SCSI_LOG_HLCOMPLETE(2, printk("nosgerr: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - if (SCpnt->buffer != SCpnt->request.buffer) - scsi_free(SCpnt->buffer, SCpnt->bufflen); - } - } - /* - * Now, if we were good little boys and girls, Santa left us a request - * sense buffer. We can extract information from this, so we - * can choose a block to remap, etc. + relatively rare error condition, no care is taken to avoid + unnecessary additional work such as memcpy's that could be avoided. */ + /* An error occurred */ if (driver_byte(result) != 0) { - if (suggestion(result) == SUGGEST_REMAP) { -#ifdef REMAP - /* - * Not yet implemented. A read will fail after being remapped, - * a write will call the strategy routine again. - */ - if rscsi_disks - [DEVICE_NR(SCpnt->request.rq_dev)].remap - { - result = 0; - } -#endif - } - if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { - if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { - if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) { - /* detected disc change. set a bit and quietly refuse - * further access. - */ - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1; - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sd_request(SCpnt); - return; - } else { - /* - * Must have been a power glitch, or a bus reset. - * Could not have been a media change, so we just retry - * the request and see what happens. - */ - requeue_sd_request(SCpnt); - return; - } + /* Sense data is valid */ + if (SCpnt->sense_buffer[0] == 0xF0 && SCpnt->sense_buffer[2] == MEDIUM_ERROR) { + long error_sector = (SCpnt->sense_buffer[3] << 24) | + (SCpnt->sense_buffer[4] << 16) | + (SCpnt->sense_buffer[5] << 8) | + SCpnt->sense_buffer[6]; + if (SCpnt->request.bh != NULL) + block_sectors = SCpnt->request.bh->b_size >> 9; + switch (SCpnt->device->sector_size) { + case 1024: + error_sector <<= 1; + if (block_sectors < 2) + block_sectors = 2; + break; + case 2048: + error_sector <<= 2; + if (block_sectors < 4) + block_sectors = 4; + break; + case 256: + error_sector >>= 1; + break; + default: + break; } + error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect; + error_sector &= ~(block_sectors - 1); + good_sectors = error_sector - SCpnt->request.sector; + if (good_sectors < 0 || good_sectors >= this_count) + good_sectors = 0; } - /* If we had an ILLEGAL REQUEST returned, then we may have - * performed an unsupported command. The only thing this should be - * would be a ten byte read where only a six byte read was supported. - * Also, on a system where READ CAPACITY failed, we have have read - * past the end of the disk. - */ - if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) { - if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) { - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0; - requeue_sd_request(SCpnt); - result = 0; - } else { - /* ???? */ + if (SCpnt->device->ten == 1) { + if (SCpnt->cmnd[0] == READ_10 || + SCpnt->cmnd[0] == WRITE_10) + SCpnt->device->ten = 0; } } - if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) { - printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sd", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sd_request(SCpnt); - return; - } - } /* driver byte != 0 */ - if (result) { - printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n", - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result); - - if (driver_byte(result) & DRIVER_SENSE) - print_sense("sd", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors); - requeue_sd_request(SCpnt); - return; } + /* + * This calls the generic completion function, now that we know + * how many actual sectors finished, and how many sectors we need + * to say have failed. + */ + scsi_io_completion(SCpnt, good_sectors, block_sectors); } /* * requeue_sd_request() is the request handler function for the sd driver. @@ -583,532 +588,6 @@ * them to SCSI commands. */ -static void do_sd_request(void) -{ - Scsi_Cmnd *SCpnt = NULL; - Scsi_Device *SDev; - struct request *req = NULL; - int flag = 0; - - while (1 == 1) { - if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) { - return; - } - INIT_SCSI_REQUEST; - SDev = rscsi_disks[CURRENT_DEV].device; - - /* - * If the host for this device is in error recovery mode, don't - * do anything at all here. When the host leaves error recovery - * mode, it will automatically restart things and start queueing - * commands again. - */ - if (SDev->host->in_recovery) { - return; - } - /* - * I am not sure where the best place to do this is. We need - * to hook in a place where we are likely to come if in user - * space. - */ - if (SDev->was_reset) { - /* - * We need to relock the door, but we might - * be in an interrupt handler. Only do this - * from user space, since we do not want to - * sleep from an interrupt. FIXME(eric) - do this - * from the kernel error handling thred. - */ - if (SDev->removable && !in_interrupt()) { - spin_unlock_irq(&io_request_lock); /* FIXME!!!! */ - scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0); - /* scsi_ioctl may allow CURRENT to change, so start over. */ - SDev->was_reset = 0; - spin_lock_irq(&io_request_lock); /* FIXME!!!! */ - continue; - } - SDev->was_reset = 0; - } - /* We have to be careful here. scsi_allocate_device will get a free pointer, - * but there is no guarantee that it is queueable. In normal usage, - * we want to call this, because other types of devices may have the - * host all tied up, and we want to make sure that we have at least - * one request pending for this type of device. We can also come - * through here while servicing an interrupt, because of the need to - * start another command. If we call scsi_allocate_device more than once, - * then the system can wedge if the command is not queueable. The - * scsi_request_queueable function is safe because it checks to make sure - * that the host is able to take another command before it returns - * a pointer. - */ - - if (flag++ == 0) - SCpnt = scsi_allocate_device(&CURRENT, - rscsi_disks[CURRENT_DEV].device, 0); - else - SCpnt = NULL; - - /* - * The following restore_flags leads to latency problems. FIXME. - * Using a "sti()" gets rid of the latency problems but causes - * race conditions and crashes. - */ - - /* This is a performance enhancement. We dig down into the request - * list and try to find a queueable request (i.e. device not busy, - * and host able to accept another command. If we find one, then we - * queue it. This can make a big difference on systems with more than - * one disk drive. We want to have the interrupts off when monkeying - * with the request list, because otherwise the kernel might try to - * slip in a request in between somewhere. - * - * FIXME(eric) - this doesn't belong at this level. The device code in - * ll_rw_blk.c should know how to dig down into the device queue to - * figure out what it can deal with, and what it can't. Consider - * possibility of pulling entire queue down into scsi layer. - */ - if (!SCpnt && sd_template.nr_dev > 1) { - struct request *req1; - req1 = NULL; - req = CURRENT; - while (req) { - SCpnt = scsi_request_queueable(req, - rscsi_disks[DEVICE_NR(req->rq_dev)].device); - if (SCpnt) - break; - req1 = req; - req = req->next; - } - if (SCpnt && req->rq_status == RQ_INACTIVE) { - if (req == CURRENT) - CURRENT = CURRENT->next; - else - req1->next = req->next; - } - } - if (!SCpnt) - return; /* Could not find anything to do */ - - /* Queue command */ - requeue_sd_request(SCpnt); - } /* While */ -} - -static void requeue_sd_request(Scsi_Cmnd * SCpnt) -{ - int dev, devm, block, this_count; - unsigned char cmd[10]; - char nbuff[6]; - int bounce_size, contiguous; - int max_sg; - struct buffer_head *bh, *bhp; - char *buff, *bounce_buffer; - -repeat: - - if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) { - do_sd_request(); - return; - } - devm = SD_PARTITION(SCpnt->request.rq_dev); - dev = DEVICE_NR(SCpnt->request.rq_dev); - - block = SCpnt->request.sector; - this_count = 0; - - SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block)); - - if (devm >= (sd_template.dev_max << 4) || - !rscsi_disks[dev].device || - !rscsi_disks[dev].device->online || - block + SCpnt->request.nr_sectors > sd[devm].nr_sects) { - SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); - goto repeat; - } - block += sd[devm].start_sect; - - if (rscsi_disks[dev].device->changed) { - /* - * quietly refuse to do anything to a changed disc until the changed - * bit has been reset - */ - /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - sd_devname(devm >> 4, nbuff); - SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n", - nbuff, dev, block)); - - /* - * If we have a 1K hardware sectorsize, prevent access to single - * 512 byte sectors. In theory we could handle this - in fact - * the scsi cdrom driver must be able to handle this because - * we typically use 1K blocksizes, and cdroms typically have - * 2K hardware sectorsizes. Of course, things are simpler - * with the cdrom, since it is read-only. For performance - * reasons, the filesystems should be able to handle this - * and not force the scsi disk driver to use bounce buffers - * for this. - */ - if (rscsi_disks[dev].sector_size == 1024) - if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { - printk("sd.c:Bad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - if (rscsi_disks[dev].sector_size == 2048) - if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { - printk("sd.c:Bad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - if (rscsi_disks[dev].sector_size == 4096) - if ((block & 7) || (SCpnt->request.nr_sectors & 7)) { - printk("sd.cBad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - switch (SCpnt->request.cmd) { - case WRITE: - if (!rscsi_disks[dev].device->writeable) { - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - cmd[0] = WRITE_6; - break; - case READ: - cmd[0] = READ_6; - break; - default: - panic("Unknown sd command %d\n", SCpnt->request.cmd); - } - - SCpnt->this_count = 0; - - /* If the host adapter can deal with very large scatter-gather - * requests, it is a waste of time to cluster - */ - contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 : 1); - bounce_buffer = NULL; - bounce_size = (SCpnt->request.nr_sectors << 9); - - /* First see if we need a bounce buffer for this request. If we do, make - * sure that we can allocate a buffer. Do not waste space by allocating - * a bounce buffer if we are straddling the 16Mb line - */ - if (contiguous && SCpnt->request.bh && - virt_to_phys(SCpnt->request.bh->b_data) - + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD - && SCpnt->host->unchecked_isa_dma) { - if (virt_to_phys(SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD) - bounce_buffer = (char *) scsi_malloc(bounce_size); - if (!bounce_buffer) - contiguous = 0; - } - if (contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext) - for (bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, - bhp = bhp->b_reqnext) { - if (!CONTIGUOUS_BUFFERS(bh, bhp)) { - if (bounce_buffer) - scsi_free(bounce_buffer, bounce_size); - contiguous = 0; - break; - } - } - if (!SCpnt->request.bh || contiguous) { - - /* case of page request (i.e. raw device), or unlinked buffer */ - this_count = SCpnt->request.nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - - } else if (SCpnt->host->sg_tablesize == 0 || - (scsi_need_isa_buffer && scsi_dma_free_sectors <= 10)) { - - /* Case of host adapter that cannot scatter-gather. We also - * come here if we are running low on DMA buffer memory. We set - * a threshold higher than that we would need for this request so - * we leave room for other requests. Even though we would not need - * it all, we need to be conservative, because if we run low enough - * we have no choice but to panic. - */ - if (SCpnt->host->sg_tablesize != 0 && - scsi_need_isa_buffer && - scsi_dma_free_sectors <= 10) - printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n"); - - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - - } else { - - /* Scatter-gather capable host adapter */ - struct scatterlist *sgpnt; - int count, this_count_max; - int counted; - - bh = SCpnt->request.bh; - this_count = 0; - this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff); - count = 0; - bhp = NULL; - while (bh) { - if ((this_count + (bh->b_size >> 9)) > this_count_max) - break; - if (!bhp || !CONTIGUOUS_BUFFERS(bhp, bh) || - !CLUSTERABLE_DEVICE(SCpnt) || - (SCpnt->host->unchecked_isa_dma && - virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD)) { - if (count < SCpnt->host->sg_tablesize) - count++; - else - break; - } - this_count += (bh->b_size >> 9); - bhp = bh; - bh = bh->b_reqnext; - } -#if 0 - if (SCpnt->host->unchecked_isa_dma && - virt_to_phys(SCpnt->request.bh->b_data - 1) == ISA_DMA_THRESHOLD) - count--; -#endif - SCpnt->use_sg = count; /* Number of chains */ - /* scsi_malloc can only allocate in chunks of 512 bytes */ - count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511; - - SCpnt->sglist_len = count; - max_sg = count / sizeof(struct scatterlist); - if (SCpnt->host->sg_tablesize < max_sg) - max_sg = SCpnt->host->sg_tablesize; - sgpnt = (struct scatterlist *) scsi_malloc(count); - if (!sgpnt) { - printk("Warning - running *really* short on DMA buffers\n"); - SCpnt->use_sg = 0; /* No memory left - bail out */ - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - } else { - memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only - * if memory is available - */ - buff = (char *) sgpnt; - counted = 0; - for (count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext; - count < SCpnt->use_sg && bh; - count++, bh = bhp) { - - bhp = bh->b_reqnext; - - if (!sgpnt[count].address) - sgpnt[count].address = bh->b_data; - sgpnt[count].length += bh->b_size; - counted += bh->b_size >> 9; - - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 > - ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) && - !sgpnt[count].alt_address) { - sgpnt[count].alt_address = sgpnt[count].address; - /* We try to avoid exhausting the DMA pool, since it is - * easier to control usage here. In other places we might - * have a more pressing need, and we would be screwed if - * we ran out */ - if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 10) { - sgpnt[count].address = NULL; - } else { - sgpnt[count].address = - (char *) scsi_malloc(sgpnt[count].length); - } - /* If we start running low on DMA buffers, we abort the - * scatter-gather operation, and free all of the memory - * we have allocated. We want to ensure that all scsi - * operations are able to do at least a non-scatter/gather - * operation */ - if (sgpnt[count].address == NULL) { /* Out of dma memory */ -#if 0 - printk("Warning: Running low on SCSI DMA buffers"); - /* Try switching back to a non s-g operation. */ - while (--count >= 0) { - if (sgpnt[count].alt_address) - scsi_free(sgpnt[count].address, - sgpnt[count].length); - } - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - scsi_free(sgpnt, SCpnt->sglist_len); -#endif - SCpnt->use_sg = count; - this_count = counted -= bh->b_size >> 9; - break; - } - } - /* Only cluster buffers if we know that we can supply DMA - * buffers large enough to satisfy the request. Do not cluster - * a new request if this would mean that we suddenly need to - * start using DMA bounce buffers */ - if (bhp && CONTIGUOUS_BUFFERS(bh, bhp) - && CLUSTERABLE_DEVICE(SCpnt)) { - char *tmp; - - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length + - bhp->b_size - 1 > ISA_DMA_THRESHOLD && - (SCpnt->host->unchecked_isa_dma) && - !sgpnt[count].alt_address) - continue; - - if (!sgpnt[count].alt_address) { - count--; - continue; - } - if (scsi_dma_free_sectors > 10) - tmp = (char *) scsi_malloc(sgpnt[count].length - + bhp->b_size); - else { - tmp = NULL; - max_sg = SCpnt->use_sg; - } - if (tmp) { - scsi_free(sgpnt[count].address, sgpnt[count].length); - sgpnt[count].address = tmp; - count--; - continue; - } - /* If we are allowed another sg chain, then increment - * counter so we can insert it. Otherwise we will end - up truncating */ - - if (SCpnt->use_sg < max_sg) - SCpnt->use_sg++; - } /* contiguous buffers */ - } /* for loop */ - - /* This is actually how many we are going to transfer */ - this_count = counted; - - if (count < SCpnt->use_sg || SCpnt->use_sg - > SCpnt->host->sg_tablesize) { - bh = SCpnt->request.bh; - printk("Use sg, count %d %x %d\n", - SCpnt->use_sg, count, scsi_dma_free_sectors); - printk("maxsg = %x, counted = %d this_count = %d\n", - max_sg, counted, this_count); - while (bh) { - printk("[%p %x] ", bh->b_data, bh->b_size); - bh = bh->b_reqnext; - } - if (SCpnt->use_sg < 16) - for (count = 0; count < SCpnt->use_sg; count++) - printk("{%d:%p %p %d} ", count, - sgpnt[count].address, - sgpnt[count].alt_address, - sgpnt[count].length); - panic("Ooops"); - } - if (SCpnt->request.cmd == WRITE) - for (count = 0; count < SCpnt->use_sg; count++) - if (sgpnt[count].alt_address) - memcpy(sgpnt[count].address, sgpnt[count].alt_address, - sgpnt[count].length); - } /* Able to malloc sgpnt */ - } /* Host adapter capable of scatter-gather */ - - /* Now handle the possibility of DMA to addresses > 16Mb */ - - if (SCpnt->use_sg == 0) { - if (virt_to_phys(buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && - (SCpnt->host->unchecked_isa_dma)) { - if (bounce_buffer) - buff = bounce_buffer; - else - buff = (char *) scsi_malloc(this_count << 9); - if (buff == NULL) { /* Try backing off a bit if we are low on mem */ - this_count = SCpnt->request.current_nr_sectors; - buff = (char *) scsi_malloc(this_count << 9); - if (!buff) - panic("Ran out of DMA buffers."); - } - if (SCpnt->request.cmd == WRITE) - memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9); - } - } - SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", - nbuff, - (SCpnt->request.cmd == WRITE) ? "writing" : "reading", - this_count, SCpnt->request.nr_sectors)); - - cmd[1] = (SCpnt->lun << 5) & 0xe0; - - if (rscsi_disks[dev].sector_size == 4096) { - if (block & 7) - panic("sd.c:Bad block number requested"); - if (this_count & 7) - panic("sd.c:Bad block number requested"); - block = block >> 3; - this_count = block >> 3; - } - if (rscsi_disks[dev].sector_size == 2048) { - if (block & 3) - panic("sd.c:Bad block number requested"); - if (this_count & 3) - panic("sd.c:Bad block number requested"); - block = block >> 2; - this_count = this_count >> 2; - } - if (rscsi_disks[dev].sector_size == 1024) { - if (block & 1) - panic("sd.c:Bad block number requested"); - if (this_count & 1) - panic("sd.c:Bad block number requested"); - block = block >> 1; - this_count = this_count >> 1; - } - if (rscsi_disks[dev].sector_size == 256) { - block = block << 1; - this_count = this_count << 1; - } - if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten) { - if (this_count > 0xffff) - this_count = 0xffff; - - cmd[0] += READ_10 - READ_6; - cmd[2] = (unsigned char) (block >> 24) & 0xff; - cmd[3] = (unsigned char) (block >> 16) & 0xff; - cmd[4] = (unsigned char) (block >> 8) & 0xff; - cmd[5] = (unsigned char) block & 0xff; - cmd[6] = cmd[9] = 0; - cmd[7] = (unsigned char) (this_count >> 8) & 0xff; - cmd[8] = (unsigned char) this_count & 0xff; - } else { - if (this_count > 0xff) - this_count = 0xff; - - cmd[1] |= (unsigned char) ((block >> 16) & 0x1f); - cmd[2] = (unsigned char) ((block >> 8) & 0xff); - cmd[3] = (unsigned char) block & 0xff; - cmd[4] = (unsigned char) this_count; - cmd[5] = 0; - } - - /* - * We shouldn't disconnect in the middle of a sector, so with a dumb - * host adapter, it's safe to assume that we can at least transfer - * this many bytes between each connect / disconnect. - */ - - SCpnt->transfersize = rscsi_disks[dev].sector_size; - SCpnt->underflow = this_count << 9; - SCpnt->cmd_len = 0; - scsi_do_cmd(SCpnt, (void *) cmd, buff, - this_count * rscsi_disks[dev].sector_size, - rw_intr, - (SCpnt->device->type == TYPE_DISK ? - SD_TIMEOUT : SD_MOD_TIMEOUT), - MAX_RETRIES); -} static int check_scsidisk_media_change(kdev_t full_dev) { @@ -1128,10 +607,10 @@ return 0; /* - * If the device is offline, don't send any commands - just pretend as if - * the command failed. If the device ever comes back online, we can deal with - * it then. It is only because of unrecoverable errors that we would ever - * take a device offline in the first place. + * If the device is offline, don't send any commands - just pretend as + * if the command failed. If the device ever comes back online, we + * can deal with it then. It is only because of unrecoverable errors + * that we would ever take a device offline in the first place. */ if (rscsi_disks[target].device->online == FALSE) { rscsi_disks[target].ready = 0; @@ -1149,10 +628,11 @@ */ retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_START_UNIT, 0); - if (retval) { /* Unable to test, unit probably not ready. This usually - * means there is no disc in the drive. Mark as changed, - * and we will figure it out later once the drive is - * available again. */ + if (retval) { /* Unable to test, unit probably not ready. + * This usually means there is no disc in the + * drive. Mark as changed, and we will figure + * it out later once the drive is available + * again. */ rscsi_disks[target].ready = 0; rscsi_disks[target].device->changed = 1; @@ -1173,19 +653,17 @@ return retval; } -static void sd_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd , - void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), - int timeout, int retries) +static void sd_wait_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, + void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *), + int timeout, int retries) { DECLARE_MUTEX_LOCKED(sem); - + SCpnt->request.sem = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; - scsi_do_cmd (SCpnt, (void *) cmnd, - buffer, bufflen, done, timeout, retries); - spin_unlock_irq(&io_request_lock); - down (&sem); - spin_lock_irq(&io_request_lock); + scsi_do_cmd(SCpnt, (void *) cmnd, + buffer, bufflen, done, timeout, retries); + down(&sem); SCpnt->request.sem = NULL; } @@ -1207,6 +685,7 @@ unsigned char *buffer; unsigned long spintime_value = 0; int the_result, retries, spintime; + int sector_size; Scsi_Cmnd *SCpnt; /* @@ -1221,14 +700,13 @@ if (rscsi_disks[i].device->online == FALSE) { return i; } - spin_lock_irq(&io_request_lock); - /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is * considered a fatal error, and many devices report such an error * just after a scsi bus reset. */ - SCpnt = scsi_allocate_device(NULL, rscsi_disks[i].device, 1); + SCpnt = scsi_allocate_device(rscsi_disks[i].device, 1); + buffer = (unsigned char *) scsi_malloc(512); spintime = 0; @@ -1237,7 +715,7 @@ /* Spinup needs to be done for module loads too. */ do { retries = 0; - + while (retries < 3) { cmd[0] = TEST_UNIT_READY; cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; @@ -1259,11 +737,9 @@ /* Look for non-removable devices that return NOT_READY. * Issue command to spin up drive for these cases. */ if (the_result && !rscsi_disks[i].device->removable && - SCpnt->sense_buffer[2] == NOT_READY) - { + SCpnt->sense_buffer[2] == NOT_READY) { unsigned long time1; - if (!spintime) - { + if (!spintime) { printk("%s: Spinning up disk...", nbuff); cmd[0] = START_STOP; cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; @@ -1275,19 +751,19 @@ SCpnt->sense_buffer[2] = 0; sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); } - spintime = 1; spintime_value = jiffies; - time1 = jiffies + HZ; - spin_unlock_irq(&io_request_lock); - while(time_before(jiffies, time1)); /* Wait 1 second for next try */ + time1 = HZ; + /* Wait 1 second for next try */ + do { + current->state = TASK_UNINTERRUPTIBLE; + time1 = schedule_timeout(time1); + } while(time1); printk("."); - spin_lock_irq(&io_request_lock); } - } while(the_result && spintime && time_after(spintime_value+100*HZ, jiffies)); - + } while (the_result && spintime && time_after(spintime_value + 100 * HZ, jiffies)); if (spintime) { if (the_result) printk("not responding...\n"); @@ -1305,7 +781,7 @@ SCpnt->sense_buffer[2] = 0; sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; retries--; @@ -1344,7 +820,7 @@ printk("%s : block size assumed to be 512 bytes, disk size 1GB. \n", nbuff); rscsi_disks[i].capacity = 0x1fffff; - rscsi_disks[i].sector_size = 512; + sector_size = 512; /* Set dirty bit for removable devices if not ready - sometimes drives * will not report this properly. */ @@ -1363,38 +839,29 @@ (buffer[2] << 8) | buffer[3]); - rscsi_disks[i].sector_size = (buffer[4] << 24) | + sector_size = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; - if (rscsi_disks[i].sector_size == 0) { - rscsi_disks[i].sector_size = 512; + if (sector_size == 0) { + sector_size = 512; printk("%s : sector size 0 reported, assuming 512.\n", nbuff); } - if (rscsi_disks[i].sector_size != 512 && - rscsi_disks[i].sector_size != 1024 && - rscsi_disks[i].sector_size != 2048 && - rscsi_disks[i].sector_size != 4096 && - rscsi_disks[i].sector_size != 256) { + if (sector_size != 512 && + sector_size != 1024 && + sector_size != 2048 && + sector_size != 4096 && + sector_size != 256) { printk("%s : unsupported sector size %d.\n", - nbuff, rscsi_disks[i].sector_size); - if (rscsi_disks[i].device->removable) { - rscsi_disks[i].capacity = 0; - } else { - printk("scsi : deleting disk entry.\n"); - sd_detach(rscsi_disks[i].device); - rscsi_disks[i].device = NULL; - - /* Wake up a process waiting for device */ - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - SCpnt = NULL; - scsi_free(buffer, 512); - spin_unlock_irq(&io_request_lock); - - return i; - } + nbuff, sector_size); + /* + * The user might want to re-format the drive with + * a supported sectorsize. Once this happens, it + * would be relatively trivial to set the thing up. + * For this reason, we leave the thing in the table. + */ + rscsi_disks[i].capacity = 0; } - if (rscsi_disks[i].sector_size == 2048) { + if (sector_size == 2048) { int m; /* @@ -1414,7 +881,7 @@ */ int m, mb; int sz_quot, sz_rem; - int hard_sector = rscsi_disks[i].sector_size; + int hard_sector = sector_size; /* There are 16 minors allocated for each major device */ for (m = i << 4; m < ((i + 1) << 4); m++) { sd_hardsizes[m] = hard_sector; @@ -1429,13 +896,13 @@ nbuff, hard_sector, rscsi_disks[i].capacity, mb, sz_quot, sz_rem); } - if (rscsi_disks[i].sector_size == 4096) + if (sector_size == 4096) rscsi_disks[i].capacity <<= 3; - if (rscsi_disks[i].sector_size == 2048) + if (sector_size == 2048) rscsi_disks[i].capacity <<= 2; /* Change into 512 byte sectors */ - if (rscsi_disks[i].sector_size == 1024) + if (sector_size == 1024) rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */ - if (rscsi_disks[i].sector_size == 256) + if (sector_size == 256) rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */ } @@ -1465,7 +932,7 @@ /* same code as READCAPA !! */ sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; @@ -1479,15 +946,15 @@ } } /* check for write protect */ + SCpnt->device->ten = 1; + SCpnt->device->remap = 1; + SCpnt->device->sector_size = sector_size; /* Wake up a process waiting for device */ wake_up(&SCpnt->device->device_wait); scsi_release_command(SCpnt); SCpnt = NULL; - rscsi_disks[i].ten = 1; - rscsi_disks[i].remap = 1; scsi_free(buffer, 512); - spin_unlock_irq(&io_request_lock); return i; } @@ -1572,23 +1039,14 @@ return 0; } -/* - * sd_get_queue() returns the queue which corresponds to a given device. - */ -static struct request **sd_get_queue(kdev_t dev) -{ - return &blk_dev[MAJOR_NR].current_request; -} + static void sd_finish() { struct gendisk *gendisk; int i; for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) { - /* FIXME: After 2.2 we should implement multiple sd queues */ - blk_dev[SD_MAJOR(i)].request_fn = DEVICE_REQUEST; - if (i) - blk_dev[SD_MAJOR(i)].queue = sd_get_queue; + blk_dev[SD_MAJOR(i)].queue = sd_find_queue; } for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next) if (gendisk == sd_gendisks) @@ -1658,7 +1116,6 @@ if (i >= sd_template.dev_max) panic("scsi_devices corrupt (sd)"); - SDp->scsi_request_fn = do_sd_request; rscsi_disks[i].device = SDp; rscsi_disks[i].has_part_table = 0; sd_template.nr_dev++; @@ -1713,7 +1170,7 @@ * to make sure that everything remains consistent. */ sd_blocksizes[index] = 1024; - if (rscsi_disks[target].sector_size == 2048) + if (rscsi_disks[target].device->sector_size == 2048) sd_blocksizes[index] = 2048; else sd_blocksizes[index] = 1024; @@ -1824,7 +1281,7 @@ } for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) { - blk_dev[SD_MAJOR(i)].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(SD_MAJOR(i))); blk_size[SD_MAJOR(i)] = NULL; hardsect_size[SD_MAJOR(i)] = NULL; read_ahead[SD_MAJOR(i)] = 0; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sd.h linux/drivers/scsi/sd.h --- v2.3.31/linux/drivers/scsi/sd.h Tue Sep 7 12:14:06 1999 +++ linux/drivers/scsi/sd.h Tue Dec 14 00:56:51 1999 @@ -5,7 +5,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -27,14 +27,11 @@ typedef struct scsi_disk { unsigned capacity; /* size in blocks */ - unsigned sector_size; /* size in bytes */ Scsi_Device *device; unsigned char ready; /* flag ready for FLOPTICAL */ unsigned char write_prot; /* flag write_protect for rmvable dev */ unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ - unsigned ten:1; /* support ten byte read / write */ - unsigned remap:1; /* support remapping */ unsigned has_part_table:1; /* has partition table */ } Scsi_Disk; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sg.c linux/drivers/scsi/sg.c --- v2.3.31/linux/drivers/scsi/sg.c Mon Oct 11 15:38:15 1999 +++ linux/drivers/scsi/sg.c Sun Dec 12 23:04:20 1999 @@ -355,7 +355,6 @@ static ssize_t sg_write(struct file * filp, const char * buf, size_t count, loff_t *ppos) { - unsigned long flags; int mxsize, cmd_size, k; unsigned char cmnd[MAX_COMMAND_SIZE]; int input_size; @@ -432,8 +431,9 @@ return k; /* probably out of space --> ENOMEM */ } /* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */ - if (! (SCpnt = scsi_allocate_device(NULL, sdp->device, - !(filp->f_flags & O_NONBLOCK)))) { + if (! (SCpnt = scsi_allocate_device(sdp->device, + !(filp->f_flags & O_NONBLOCK)))) + { sg_finish_rem_req(srp, NULL, 0); return -EAGAIN; /* No available command blocks at the moment */ } @@ -448,7 +448,6 @@ cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5); /* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */ - spin_lock_irqsave(&io_request_lock, flags); SCpnt->use_sg = srp->data.use_sg; SCpnt->sglist_len = srp->data.sglist_len; SCpnt->bufflen = srp->data.bufflen; @@ -467,7 +466,6 @@ (void *)SCpnt->buffer, mxsize, sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES); /* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */ - spin_unlock_irqrestore(&io_request_lock, flags); /* SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */ return count; } @@ -1116,7 +1114,9 @@ scsi_add_timer(scpnt, scpnt->timeout_per_command, scsi_old_times_out); #else + spin_unlock_irq(&io_request_lock); scsi_sleep(HZ); /* just sleep 1 second and hope ... */ + spin_lock_irq(&io_request_lock); #endif } diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sim710_d.h linux/drivers/scsi/sim710_d.h --- v2.3.31/linux/drivers/scsi/sim710_d.h Mon Nov 1 13:56:26 1999 +++ linux/drivers/scsi/sim710_d.h Wed Dec 8 15:17:55 1999 @@ -1,3 +1,4 @@ +/* DO NOT EDIT - Generated automatically by script_asm.pl */ static u32 SCRIPT[] = { /* diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sr.c linux/drivers/scsi/sr.c --- v2.3.31/linux/drivers/scsi/sr.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/scsi/sr.c Mon Dec 13 14:08:40 1999 @@ -1,17 +1,17 @@ /* * sr.c Copyright (C) 1992 David Giller - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * adapted from: * sd.c Copyright (C) 1992 Drew Eckhardt * Linux scsi disk driver by * Drew Eckhardt * - * Modified by Eric Youngdale ericy@cais.com to + * Modified by Eric Youngdale ericy@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * - * Modified by Eric Youngdale eric@aib.com to support loadable + * Modified by Eric Youngdale eric@andante.org to support loadable * low-level scsi drivers. * * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to @@ -60,17 +60,28 @@ static int sr_detect(Scsi_Device *); static void sr_detach(Scsi_Device *); -struct Scsi_Device_Template sr_template = { - NULL, "cdrom", "sr", NULL, TYPE_ROM, - SCSI_CDROM_MAJOR, 0, 0, 0, 1, - sr_detect, sr_init, - sr_finish, sr_attach, sr_detach +static int sr_init_command(Scsi_Cmnd *); + +struct Scsi_Device_Template sr_template = +{ + name:"cdrom", + tag:"sr", + scsi_type:TYPE_ROM, + major:SCSI_CDROM_MAJOR, + blk:1, + detect:sr_detect, + init:sr_init, + finish:sr_finish, + attach:sr_attach, + detach:sr_detach, + init_command:sr_init_command }; Scsi_CD *scsi_CDs = NULL; static int *sr_sizes = NULL; static int *sr_blocksizes = NULL; +static int *sr_hardsizes = NULL; static int sr_open(struct cdrom_device_info *, int); void get_sectorsize(int); @@ -82,7 +93,7 @@ static void sr_release(struct cdrom_device_info *cdi) { - if (scsi_CDs[MINOR(cdi->dev)].sector_size > 2048) + if (scsi_CDs[MINOR(cdi->dev)].device->sector_size > 2048) sr_set_blocklength(MINOR(cdi->dev), 2048); sync_dev(cdi->dev); scsi_CDs[MINOR(cdi->dev)].device->access_count--; @@ -108,7 +119,7 @@ sr_audio_ioctl, /* audio ioctl */ sr_dev_ioctl, /* device-specific ioctl */ CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | - CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | + CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET, @@ -165,7 +176,7 @@ */ scsi_CDs[MINOR(cdi->dev)].needs_sector_size = 1; - scsi_CDs[MINOR(cdi->dev)].sector_size = 2048; + scsi_CDs[MINOR(cdi->dev)].device->sector_size = 2048; } return retval; } @@ -178,7 +189,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt) { int result = SCpnt->result; - int this_count = SCpnt->this_count; + int this_count = SCpnt->bufflen >> 9; int good_sectors = (result == 0 ? this_count : 0); int block_sectors = 0; @@ -191,6 +202,7 @@ avoid unnecessary additional work such as memcpy's that could be avoided. */ + if (driver_byte(result) != 0 && /* An error occurred */ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */ (SCpnt->sense_buffer[2] == MEDIUM_ERROR || @@ -205,177 +217,169 @@ block_sectors = SCpnt->request.bh->b_size >> 9; if (block_sectors < 4) block_sectors = 4; - if (scsi_CDs[device_nr].sector_size == 2048) + if (scsi_CDs[device_nr].device->sector_size == 2048) error_sector <<= 2; error_sector &= ~(block_sectors - 1); good_sectors = error_sector - SCpnt->request.sector; if (good_sectors < 0 || good_sectors >= this_count) good_sectors = 0; /* - The SCSI specification allows for the value returned by READ - CAPACITY to be up to 75 2K sectors past the last readable - block. Therefore, if we hit a medium error within the last - 75 2K sectors, we decrease the saved size value. + * The SCSI specification allows for the value returned by READ + * CAPACITY to be up to 75 2K sectors past the last readable + * block. Therefore, if we hit a medium error within the last + * 75 2K sectors, we decrease the saved size value. */ if ((error_sector >> 1) < sr_sizes[device_nr] && scsi_CDs[device_nr].capacity - error_sector < 4 * 75) sr_sizes[device_nr] = error_sector >> 1; } - if (good_sectors > 0) { /* Some sectors were read successfully. */ - if (SCpnt->use_sg == 0) { - if (SCpnt->buffer != SCpnt->request.buffer) { - int offset; - offset = (SCpnt->request.sector % 4) << 9; - memcpy((char *) SCpnt->request.buffer, - (char *) SCpnt->buffer + offset, - good_sectors << 9); - /* Even though we are not using scatter-gather, we look - * ahead and see if there is a linked request for the - * other half of this buffer. If there is, then satisfy - * it. */ - if ((offset == 0) && good_sectors == 2 && - SCpnt->request.nr_sectors > good_sectors && - SCpnt->request.bh && - SCpnt->request.bh->b_reqnext && - SCpnt->request.bh->b_reqnext->b_size == 1024) { - memcpy((char *) SCpnt->request.bh->b_reqnext->b_data, - (char *) SCpnt->buffer + 1024, - 1024); - good_sectors += 2; - }; + /* + * This calls the generic completion function, now that we know + * how many actual sectors finished, and how many sectors we need + * to say have failed. + */ + scsi_io_completion(SCpnt, good_sectors, block_sectors); +} - scsi_free(SCpnt->buffer, 2048); - } - } else { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - if (sgpnt[i].alt_address) { - if (sgpnt[i].alt_address != sgpnt[i].address) { - memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length); - }; - scsi_free(sgpnt[i].address, sgpnt[i].length); - }; - }; - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ - if (SCpnt->request.sector % 4) - good_sectors -= 2; - /* See if there is a padding record at the end that needs to be removed */ - if (good_sectors > SCpnt->request.nr_sectors) - good_sectors -= 2; - }; -#ifdef DEBUG - printk("(%x %x %x) ", SCpnt->request.bh, SCpnt->request.nr_sectors, - good_sectors); -#endif - if (SCpnt->request.nr_sectors > this_count) { - SCpnt->request.errors = 0; - if (!SCpnt->request.bh) - panic("sr.c: linked page request (%lx %x)", - SCpnt->request.sector, this_count); - } - SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */ - if (result == 0) { - requeue_sr_request(SCpnt); - return; - } - } - if (good_sectors == 0) { - /* We only come through here if no sectors were read successfully. */ +static request_queue_t *sr_find_queue(kdev_t dev) +{ + /* + * No such device + */ + if (MINOR(dev) >= sr_template.dev_max || !scsi_CDs[MINOR(dev)].device) + return NULL; - /* Free up any indirection buffers we allocated for DMA purposes. */ - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - if (sgpnt[i].alt_address) { - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ - } else { - if (SCpnt->buffer != SCpnt->request.buffer) - scsi_free(SCpnt->buffer, SCpnt->bufflen); - } + return &scsi_CDs[MINOR(dev)].device->request_queue; +} +static int sr_init_command(Scsi_Cmnd * SCpnt) +{ + int dev, devm, block, this_count; + + devm = MINOR(SCpnt->request.rq_dev); + dev = DEVICE_NR(SCpnt->request.rq_dev); + + block = SCpnt->request.sector; + this_count = SCpnt->request_bufflen >> 9; + + if (!SCpnt->request.bh) { + /* + * Umm, yeah, right. Swapping to a cdrom. Nice try. + */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; } - if (driver_byte(result) != 0) { - if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { - if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { - /* detected disc change. set a bit and quietly refuse - * further access. */ - - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1; - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); - return; - } - } - if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) { - printk("sr%d: CD-ROM error: ", - DEVICE_NR(SCpnt->request.rq_dev)); - print_sense("sr", SCpnt); - printk("command was: "); - print_command(SCpnt->cmnd); - if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) { - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0; - requeue_sr_request(SCpnt); - result = 0; - return; - } else { - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); /* Do next request */ - return; - } + SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %d, block = %d\n", devm, block)); + if (dev >= sr_template.nr_dev || + !scsi_CDs[dev].device || + !scsi_CDs[dev].device->online) { + SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); + return 0; + } + if (scsi_CDs[dev].device->changed) { + /* + * quietly refuse to do anything to a changed disc until the changed + * bit has been reset + */ + /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + /* + * we do lazy blocksize switching (when reading XA sectors, + * see CDROMREADMODE2 ioctl) + */ + if (scsi_CDs[dev].device->sector_size > 2048) { + if (!in_interrupt()) + sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048); + else + printk("sr: can't switch blocksize: in interrupt\n"); + } + if (SCpnt->request.cmd == WRITE) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + if (scsi_CDs[dev].device->sector_size == 1024) { + if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { + printk("sr.c:Bad 1K block number requested (%d %ld)", + block, SCpnt->request.nr_sectors); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 1; + this_count = this_count >> 1; } - if (SCpnt->sense_buffer[2] == NOT_READY) { - printk(KERN_INFO "sr%d: CD-ROM not ready. Make sure you have a disc in the drive.\n", - DEVICE_NR(SCpnt->request.rq_dev)); - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); /* Do next request */ - return; - } - if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) { - printk("scsi%d: MEDIUM ERROR on " - "channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sr", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sr_request(SCpnt); - return; + } + if (scsi_CDs[dev].device->sector_size == 2048) { + if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { + printk("sr.c:Bad 2K block number requested (%d %ld)", + block, SCpnt->request.nr_sectors); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 2; + this_count = this_count >> 2; } - if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) { - printk("scsi%d: VOLUME OVERFLOW on " - "channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sr", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sr_request(SCpnt); - return; + } + switch (SCpnt->request.cmd) { + case WRITE: + if (!scsi_CDs[dev].device->writeable) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; } + SCpnt->cmnd[0] = WRITE_10; + break; + case READ: + SCpnt->cmnd[0] = READ_10; + break; + default: + panic("Unknown sr command %d\n", SCpnt->request.cmd); } - /* We only get this far if we have an error we have not recognized */ - if (result) { - printk("SCSI CD error : host %d id %d lun %d return code = %03x\n", - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no, - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id, - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, - result); - if (status_byte(result) == CHECK_CONDITION) - print_sense("sr", SCpnt); + SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n", + devm, + (SCpnt->request.cmd == WRITE) ? "writing" : "reading", + this_count, SCpnt->request.nr_sectors)); + + SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0; + + if (this_count > 0xffff) + this_count = 0xffff; + + SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; + SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; + SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; + SCpnt->cmnd[5] = (unsigned char) block & 0xff; + SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; + SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; + SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors); - requeue_sr_request(SCpnt); - } + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + SCpnt->transfersize = scsi_CDs[dev].device->sector_size; + SCpnt->underflow = this_count << 9; + + SCpnt->allowed = MAX_RETRIES; + SCpnt->timeout_per_command = SR_TIMEOUT; + + /* + * This is the completion routine we use. This is matched in terms + * of capability to this function. + */ + SCpnt->done = rw_intr; + + /* + * This indicates that the command is ready from our end to be + * queued. + */ + return 1; } static int sr_open(struct cdrom_device_info *cdi, int purpose) @@ -416,390 +420,6 @@ * translate them to SCSI commands. */ -static void do_sr_request(void) -{ - Scsi_Cmnd *SCpnt = NULL; - struct request *req = NULL; - Scsi_Device *SDev; - int flag = 0; - - while (1 == 1) { - if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) { - return; - }; - - INIT_SCSI_REQUEST; - - SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device; - - /* - * If the host for this device is in error recovery mode, don't - * do anything at all here. When the host leaves error recovery - * mode, it will automatically restart things and start queueing - * commands again. - */ - if (SDev->host->in_recovery) { - return; - } - /* - * I am not sure where the best place to do this is. We need - * to hook in a place where we are likely to come if in user - * space. - */ - if (SDev->was_reset) { - /* - * We need to relock the door, but we might - * be in an interrupt handler. Only do this - * from user space, since we do not want to - * sleep from an interrupt. - */ - if (SDev->removable && !in_interrupt()) { - spin_unlock_irq(&io_request_lock); /* FIXME!!!! */ - scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0); - spin_lock_irq(&io_request_lock); /* FIXME!!!! */ - /* scsi_ioctl may allow CURRENT to change, so start over. */ - SDev->was_reset = 0; - continue; - } - SDev->was_reset = 0; - } - /* we do lazy blocksize switching (when reading XA sectors, - * see CDROMREADMODE2 ioctl) */ - if (scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].sector_size > 2048) { - if (!in_interrupt()) - sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048); -#if 1 - else - printk("sr: can't switch blocksize: in interrupt\n"); -#endif - } - if (flag++ == 0) - SCpnt = scsi_allocate_device(&CURRENT, - scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0); - else - SCpnt = NULL; - - /* This is a performance enhancement. We dig down into the request list and - * try to find a queueable request (i.e. device not busy, and host able to - * accept another command. If we find one, then we queue it. This can - * make a big difference on systems with more than one disk drive. We want - * to have the interrupts off when monkeying with the request list, because - * otherwise the kernel might try to slip in a request in between somewhere. */ - - if (!SCpnt && sr_template.nr_dev > 1) { - struct request *req1; - req1 = NULL; - req = CURRENT; - while (req) { - SCpnt = scsi_request_queueable(req, - scsi_CDs[DEVICE_NR(req->rq_dev)].device); - if (SCpnt) - break; - req1 = req; - req = req->next; - } - if (SCpnt && req->rq_status == RQ_INACTIVE) { - if (req == CURRENT) - CURRENT = CURRENT->next; - else - req1->next = req->next; - } - } - if (!SCpnt) - return; /* Could not find anything to do */ - - wake_up(&wait_for_request); - - /* Queue command */ - requeue_sr_request(SCpnt); - } /* While */ -} - -void requeue_sr_request(Scsi_Cmnd * SCpnt) -{ - unsigned int dev, block, realcount; - unsigned char cmd[10], *buffer, tries; - int this_count, start, end_rec; - - tries = 2; - -repeat: - if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) { - do_sr_request(); - return; - } - dev = MINOR(SCpnt->request.rq_dev); - block = SCpnt->request.sector; - buffer = NULL; - this_count = 0; - - if (dev >= sr_template.nr_dev) { - /* printk("CD-ROM request error: invalid device.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (!scsi_CDs[dev].use) { - /* printk("CD-ROM request error: device marked not in use.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (!scsi_CDs[dev].device->online) { - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (scsi_CDs[dev].device->changed) { - /* - * quietly refuse to do anything to a changed disc - * until the changed bit has been reset - */ - /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - switch (SCpnt->request.cmd) { - case WRITE: - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - break; - case READ: - cmd[0] = READ_6; - break; - default: - panic("Unknown sr command %d\n", SCpnt->request.cmd); - } - - cmd[1] = (SCpnt->lun << 5) & 0xe0; - - /* - * Now do the grungy work of figuring out which sectors we need, and - * where in memory we are going to put them. - * - * The variables we need are: - * - * this_count= number of 512 byte sectors being read - * block = starting cdrom sector to read. - * realcount = # of cdrom sectors to read - * - * The major difference between a scsi disk and a scsi cdrom - * is that we will always use scatter-gather if we can, because we can - * work around the fact that the buffer cache has a block size of 1024, - * and we have 2048 byte sectors. This code should work for buffers that - * are any multiple of 512 bytes long. - */ - - SCpnt->use_sg = 0; - - if (SCpnt->host->sg_tablesize > 0 && - (!scsi_need_isa_buffer || - scsi_dma_free_sectors >= 10)) { - struct buffer_head *bh; - struct scatterlist *sgpnt; - int count, this_count_max; - bh = SCpnt->request.bh; - this_count = 0; - count = 0; - this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4; - /* Calculate how many links we can use. First see if we need - * a padding record at the start */ - this_count = SCpnt->request.sector % 4; - if (this_count) - count++; - while (bh && count < SCpnt->host->sg_tablesize) { - if ((this_count + (bh->b_size >> 9)) > this_count_max) - break; - this_count += (bh->b_size >> 9); - count++; - bh = bh->b_reqnext; - }; - /* Fix up in case of an odd record at the end */ - end_rec = 0; - if (this_count % 4) { - if (count < SCpnt->host->sg_tablesize) { - count++; - end_rec = (4 - (this_count % 4)) << 9; - this_count += 4 - (this_count % 4); - } else { - count--; - this_count -= (this_count % 4); - }; - }; - SCpnt->use_sg = count; /* Number of chains */ - /* scsi_malloc can only allocate in chunks of 512 bytes */ - count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511; - - SCpnt->sglist_len = count; - sgpnt = (struct scatterlist *) scsi_malloc(count); - if (!sgpnt) { - printk("Warning - running *really* short on DMA buffers\n"); - SCpnt->use_sg = 0; /* No memory left - bail out */ - } else { - buffer = (unsigned char *) sgpnt; - count = 0; - bh = SCpnt->request.bh; - if (SCpnt->request.sector % 4) { - sgpnt[count].length = (SCpnt->request.sector % 4) << 9; - sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length); - if (!sgpnt[count].address) - panic("SCSI DMA pool exhausted."); - sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete - if needed */ - count++; - }; - for (bh = SCpnt->request.bh; count < SCpnt->use_sg; - count++, bh = bh->b_reqnext) { - if (bh) { /* Need a placeholder at the end of the record? */ - sgpnt[count].address = bh->b_data; - sgpnt[count].length = bh->b_size; - sgpnt[count].alt_address = NULL; - } else { - sgpnt[count].address = (char *) scsi_malloc(end_rec); - if (!sgpnt[count].address) - panic("SCSI DMA pool exhausted."); - sgpnt[count].length = end_rec; - sgpnt[count].alt_address = sgpnt[count].address; - if (count + 1 != SCpnt->use_sg) - panic("Bad sr request list"); - break; - }; - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 > - ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) { - sgpnt[count].alt_address = sgpnt[count].address; - /* We try to avoid exhausting the DMA pool, since it is easier - * to control usage here. In other places we might have a more - * pressing need, and we would be screwed if we ran out */ - if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 5) { - sgpnt[count].address = NULL; - } else { - sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length); - }; - /* If we start running low on DMA buffers, we abort the scatter-gather - * operation, and free all of the memory we have allocated. We want to - * ensure that all scsi operations are able to do at least a non-scatter/gather - * operation */ - if (sgpnt[count].address == NULL) { /* Out of dma memory */ - printk("Warning: Running low on SCSI DMA buffers\n"); - /* Try switching back to a non scatter-gather operation. */ - while (--count >= 0) { - if (sgpnt[count].alt_address) - scsi_free(sgpnt[count].address, sgpnt[count].length); - }; - SCpnt->use_sg = 0; - scsi_free(buffer, SCpnt->sglist_len); - break; - }; /* if address == NULL */ - }; /* if need DMA fixup */ - }; /* for loop to fill list */ -#ifdef DEBUG - printk("SR: %d %d %d %d %d *** ", SCpnt->use_sg, SCpnt->request.sector, - this_count, - SCpnt->request.current_nr_sectors, - SCpnt->request.nr_sectors); - for (count = 0; count < SCpnt->use_sg; count++) - printk("SGlist: %d %x %x %x\n", count, - sgpnt[count].address, - sgpnt[count].alt_address, - sgpnt[count].length); -#endif - }; /* Able to allocate scatter-gather list */ - }; - - if (SCpnt->use_sg == 0) { - /* We cannot use scatter-gather. Do this the old fashion way */ - if (!SCpnt->request.bh) - this_count = SCpnt->request.nr_sectors; - else - this_count = (SCpnt->request.bh->b_size >> 9); - - start = block % 4; - if (start) { - this_count = ((this_count > 4 - start) ? - (4 - start) : (this_count)); - buffer = (unsigned char *) scsi_malloc(2048); - } else if (this_count < 4) { - buffer = (unsigned char *) scsi_malloc(2048); - } else { - this_count -= this_count % 4; - buffer = (unsigned char *) SCpnt->request.buffer; - if (virt_to_phys(buffer) + (this_count << 9) > ISA_DMA_THRESHOLD && - SCpnt->host->unchecked_isa_dma) - buffer = (unsigned char *) scsi_malloc(this_count << 9); - } - }; - - if (scsi_CDs[dev].sector_size == 2048) - block = block >> 2; /* These are the sectors that the cdrom uses */ - else - block = block & 0xfffffffc; - - realcount = (this_count + 3) / 4; - - if (scsi_CDs[dev].sector_size == 512) - realcount = realcount << 2; - - /* - * Note: The scsi standard says that READ_6 is *optional*, while - * READ_10 is mandatory. Thus there is no point in using - * READ_6. - */ - if (scsi_CDs[dev].ten) { - if (realcount > 0xffff) { - realcount = 0xffff; - this_count = realcount * (scsi_CDs[dev].sector_size >> 9); - } - cmd[0] += READ_10 - READ_6; - cmd[2] = (unsigned char) (block >> 24) & 0xff; - cmd[3] = (unsigned char) (block >> 16) & 0xff; - cmd[4] = (unsigned char) (block >> 8) & 0xff; - cmd[5] = (unsigned char) block & 0xff; - cmd[6] = cmd[9] = 0; - cmd[7] = (unsigned char) (realcount >> 8) & 0xff; - cmd[8] = (unsigned char) realcount & 0xff; - } else { - if (realcount > 0xff) { - realcount = 0xff; - this_count = realcount * (scsi_CDs[dev].sector_size >> 9); - } - cmd[1] |= (unsigned char) ((block >> 16) & 0x1f); - cmd[2] = (unsigned char) ((block >> 8) & 0xff); - cmd[3] = (unsigned char) block & 0xff; - cmd[4] = (unsigned char) realcount; - cmd[5] = 0; - } - -#ifdef DEBUG - { - int i; - printk("ReadCD: %d %d %d %d\n", block, realcount, buffer, this_count); - printk("Use sg: %d\n", SCpnt->use_sg); - printk("Dumping command: "); - for (i = 0; i < 12; i++) - printk("%2.2x ", cmd[i]); - printk("\n"); - }; -#endif - - /* Some dumb host adapters can speed transfers by knowing the - * minimum transfersize in advance. - * - * We shouldn't disconnect in the middle of a sector, but the cdrom - * sector size can be larger than the size of a buffer and the - * transfer may be split to the size of a buffer. So it's safe to - * assume that we can at least transfer the minimum of the buffer - * size (1024) and the sector size between each connect / disconnect. - */ - - SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ? - 1024 : scsi_CDs[dev].sector_size; - - SCpnt->this_count = this_count; - scsi_do_cmd(SCpnt, (void *) cmd, buffer, - realcount * scsi_CDs[dev].sector_size, - rw_intr, SR_TIMEOUT, MAX_RETRIES); -} static int sr_detect(Scsi_Device * SDp) { @@ -833,7 +453,7 @@ if (i >= sr_template.dev_max) panic("scsi_devices corrupt (sr)"); - SDp->scsi_request_fn = do_sr_request; + scsi_CDs[i].device = SDp; sr_template.nr_dev++; @@ -860,12 +480,13 @@ unsigned char cmd[10]; unsigned char *buffer; int the_result, retries; + int sector_size; Scsi_Cmnd *SCpnt; - spin_lock_irq(&io_request_lock); buffer = (unsigned char *) scsi_malloc(512); - SCpnt = scsi_allocate_device(NULL, scsi_CDs[i].device, 1); - spin_unlock_irq(&io_request_lock); + + + SCpnt = scsi_allocate_device(scsi_CDs[i].device, 1); retries = 3; do { @@ -879,8 +500,8 @@ /* Do the command and wait.. */ - scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer, - 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES); + scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, + 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; retries--; @@ -894,7 +515,7 @@ if (the_result) { scsi_CDs[i].capacity = 0x1fffff; - scsi_CDs[i].sector_size = 2048; /* A guess, just in case */ + sector_size = 2048; /* A guess, just in case */ scsi_CDs[i].needs_sector_size = 1; } else { #if 0 @@ -905,9 +526,9 @@ (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); - scsi_CDs[i].sector_size = (buffer[4] << 24) | + sector_size = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; - switch (scsi_CDs[i].sector_size) { + switch (sector_size) { /* * HP 4020i CD-Recorder reports 2340 byte sectors * Philips CD-Writers report 2352 byte sectors @@ -917,7 +538,7 @@ case 0: case 2340: case 2352: - scsi_CDs[i].sector_size = 2048; + sector_size = 2048; /* fall through */ case 2048: scsi_CDs[i].capacity *= 4; @@ -926,11 +547,13 @@ break; default: printk("sr%d: unsupported sector size %d.\n", - i, scsi_CDs[i].sector_size); + i, sector_size); scsi_CDs[i].capacity = 0; scsi_CDs[i].needs_sector_size = 1; } + scsi_CDs[i].device->sector_size = sector_size; + /* * Add this so that we have the ability to correctly gauge * what the device is capable of. @@ -959,9 +582,7 @@ "" }; - spin_lock_irq(&io_request_lock); buffer = (unsigned char *) scsi_malloc(512); - spin_unlock_irq(&io_request_lock); cmd[0] = MODE_SENSE; cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0; cmd[2] = 0x2a; @@ -1008,19 +629,19 @@ if ((buffer[n + 3] & 0x1) == 0) /* can't write CD-R media */ scsi_CDs[i].cdi.mask |= CDC_CD_R; - if ((buffer[n+6] & 0x8) == 0) + if ((buffer[n + 6] & 0x8) == 0) /* can't eject */ scsi_CDs[i].cdi.mask |= CDC_OPEN_TRAY; - if ((buffer[n+6] >> 5) == mechtype_individual_changer || - (buffer[n+6] >> 5) == mechtype_cartridge_changer) - scsi_CDs[i].cdi.capacity = - cdrom_number_of_slots(&(scsi_CDs[i].cdi)); + if ((buffer[n + 6] >> 5) == mechtype_individual_changer || + (buffer[n + 6] >> 5) == mechtype_cartridge_changer) + scsi_CDs[i].cdi.capacity = + cdrom_number_of_slots(&(scsi_CDs[i].cdi)); if (scsi_CDs[i].cdi.capacity <= 1) - /* not a changer */ + /* not a changer */ scsi_CDs[i].cdi.mask |= CDC_SELECT_DISC; /*else I don't think it can close its tray - scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */ + scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */ scsi_free(buffer, 512); @@ -1036,27 +657,23 @@ Scsi_Device *device = scsi_CDs[MINOR(cdi->dev)].device; unsigned char *buffer = cgc->buffer; int buflen; - int stat; /* get the device */ - SCpnt = scsi_allocate_device(NULL, device, 1); + SCpnt = scsi_allocate_device(device, 1); if (SCpnt == NULL) return -ENODEV; /* this just doesn't seem right /axboe */ /* use buffer for ISA DMA */ buflen = (cgc->buflen + 511) & ~511; if (cgc->buffer && SCpnt->host->unchecked_isa_dma && - (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) { - spin_lock_irq(&io_request_lock); + (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) { buffer = scsi_malloc(buflen); - spin_unlock_irq(&io_request_lock); if (buffer == NULL) { printk("sr: SCSI DMA pool exhausted."); return -ENOMEM; } memcpy(buffer, cgc->buffer, cgc->buflen); } - /* set the LUN */ cgc->cmd[1] |= device->lun << 5; @@ -1065,10 +682,11 @@ /* scsi_do_cmd sets the command length */ SCpnt->cmd_len = 0; - scsi_wait_cmd (SCpnt, (void *)cgc->cmd, (void *)buffer, cgc->buflen, - sr_init_done, SR_TIMEOUT, MAX_RETRIES); + scsi_wait_cmd(SCpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen, + sr_init_done, SR_TIMEOUT, MAX_RETRIES); - stat = SCpnt->result; + if ((cgc->stat = SCpnt->result)) + cgc->sense = (struct request_sense *) SCpnt->sense_buffer; /* release */ SCpnt->request.rq_dev = MKDEV(0, 0); @@ -1081,7 +699,8 @@ scsi_free(buffer, buflen); } - return stat; + + return cgc->stat; } static int sr_registered = 0; @@ -1113,12 +732,18 @@ sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC); + sr_hardsizes = (int *) scsi_init_malloc(sr_template.dev_max * + sizeof(int), GFP_ATOMIC); /* * These are good guesses for the time being. */ for (i = 0; i < sr_template.dev_max; i++) + { sr_blocksizes[i] = 2048; + sr_hardsizes[i] = 2048; + } blksize_size[MAJOR_NR] = sr_blocksizes; + hardsect_size[MAJOR_NR] = sr_hardsizes; return 0; } @@ -1127,7 +752,7 @@ int i; char name[6]; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_dev[MAJOR_NR].queue = sr_find_queue; blk_size[MAJOR_NR] = sr_sizes; for (i = 0; i < sr_template.nr_dev; ++i) { @@ -1136,7 +761,7 @@ if (scsi_CDs[i].capacity) continue; scsi_CDs[i].capacity = 0x1fffff; - scsi_CDs[i].sector_size = 2048; /* A guess, just in case */ + scsi_CDs[i].device->sector_size = 2048; /* A guess, just in case */ scsi_CDs[i].needs_sector_size = 1; scsi_CDs[i].device->changed = 1; /* force recheck CD type */ #if 0 @@ -1145,8 +770,9 @@ printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size); #endif scsi_CDs[i].use = 1; - scsi_CDs[i].ten = 1; - scsi_CDs[i].remap = 1; + + scsi_CDs[i].device->ten = 1; + scsi_CDs[i].device->remap = 1; scsi_CDs[i].readcd_known = 0; scsi_CDs[i].readcd_cdda = 0; sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9); @@ -1234,9 +860,12 @@ scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int)); sr_blocksizes = NULL; + scsi_init_free((char *) sr_hardsizes, sr_template.dev_max * sizeof(int)); + sr_hardsizes = NULL; } blksize_size[MAJOR_NR] = NULL; - blk_dev[MAJOR_NR].request_fn = NULL; + hardsect_size[MAJOR_NR] = sr_hardsizes; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); blk_size[MAJOR_NR] = NULL; read_ahead[MAJOR_NR] = 0; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sr.h linux/drivers/scsi/sr.h --- v2.3.31/linux/drivers/scsi/sr.h Tue Sep 7 12:14:06 1999 +++ linux/drivers/scsi/sr.h Sun Dec 12 23:04:20 1999 @@ -9,7 +9,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -21,15 +21,12 @@ typedef struct { unsigned capacity; /* size in blocks */ - unsigned sector_size; /* size in bytes */ Scsi_Device *device; unsigned int vendor; /* vendor code, see sr_vendor.c */ unsigned long ms_offset; /* for reading multisession-CD's */ unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */ unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift */ unsigned needs_sector_size:1; /* needs to get sector size */ - unsigned ten:1; /* support ten byte commands */ - unsigned remap:1; /* support remapping */ unsigned use:1; /* is this device still supportable */ unsigned xa_flag:1; /* CD has XA sectors ? */ unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sr_ioctl.c linux/drivers/scsi/sr_ioctl.c --- v2.3.31/linux/drivers/scsi/sr_ioctl.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/scsi/sr_ioctl.c Mon Dec 13 14:08:40 1999 @@ -16,7 +16,7 @@ #include "sr.h" #if 0 -# define DEBUG +#define DEBUG #endif /* The sr_is_xa() seems to trigger firmware bugs with some drives :-( @@ -32,134 +32,120 @@ static void sr_ioctl_done(Scsi_Cmnd * SCpnt) { - struct request * req; - - req = &SCpnt->request; - req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ - - if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) { - memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen); - scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511); - SCpnt->buffer = req->buffer; - } - - if (req->sem != NULL) { - up(req->sem); - } + struct request *req; + + req = &SCpnt->request; + req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ + + if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) { + memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen); + scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511); + SCpnt->buffer = req->buffer; + } + if (req->sem != NULL) { + up(req->sem); + } } /* We do our own retries because we want to know what the specific error code is. Normally the UNIT_ATTENTION code will automatically clear after one error */ -int sr_do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength, int quiet) +int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet) { - Scsi_Cmnd * SCpnt; - Scsi_Device * SDev; - int result, err = 0, retries = 0; - unsigned long flags; - char * bounce_buffer; - - spin_lock_irqsave(&io_request_lock, flags); - SDev = scsi_CDs[target].device; - SCpnt = scsi_allocate_device(NULL, scsi_CDs[target].device, 1); - spin_unlock_irqrestore(&io_request_lock, flags); - - /* use ISA DMA buffer if necessary */ - SCpnt->request.buffer=buffer; - if (buffer && SCpnt->host->unchecked_isa_dma && - (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) { - bounce_buffer = (char *)scsi_malloc((buflength + 511) & ~511); - if (bounce_buffer == NULL) { - printk("SCSI DMA pool exhausted."); - return -ENOMEM; - } - memcpy(bounce_buffer, (char *)buffer, buflength); - buffer = bounce_buffer; - } - -retry: - if( !scsi_block_when_processing_errors(SDev) ) - return -ENODEV; - - scsi_wait_cmd(SCpnt, (void *)sr_cmd, (void *)buffer, buflength, - sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES); - - result = SCpnt->result; - - /* Minimal error checking. Ignore cases we know about, and report the rest. */ - if(driver_byte(result) != 0) { - switch(SCpnt->sense_buffer[2] & 0xf) { - case UNIT_ATTENTION: - scsi_CDs[target].device->changed = 1; - if (!quiet) - printk(KERN_INFO "sr%d: disc change detected.\n", target); - if (retries++ < 10) - goto retry; - err = -ENOMEDIUM; - break; - case NOT_READY: /* This happens if there is no disc in drive */ - if (SCpnt->sense_buffer[12] == 0x04 && - SCpnt->sense_buffer[13] == 0x01) { - /* sense: Logical unit is in process of becoming ready */ - if (!quiet) - printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target); - if (retries++ < 10) { - /* sleep 2 sec and try again */ - /* - * The spinlock is silly - we should really lock more of this - * function, but the minimal locking required to not lock up - * is around this - scsi_sleep() assumes we hold the spinlock. - */ - spin_lock_irqsave(&io_request_lock, flags); - scsi_sleep(2*HZ); - spin_unlock_irqrestore(&io_request_lock, flags); - goto retry; - } else { - /* 20 secs are enough? */ - err = -ENOMEDIUM; - break; + Scsi_Cmnd *SCpnt; + Scsi_Device *SDev; + int result, err = 0, retries = 0; + char *bounce_buffer; + + SDev = scsi_CDs[target].device; + SCpnt = scsi_allocate_device(scsi_CDs[target].device, 1); + + /* use ISA DMA buffer if necessary */ + SCpnt->request.buffer = buffer; + if (buffer && SCpnt->host->unchecked_isa_dma && + (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) { + bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511); + if (bounce_buffer == NULL) { + printk("SCSI DMA pool exhausted."); + return -ENOMEM; } - } - if (!quiet) - printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n",target); + memcpy(bounce_buffer, (char *) buffer, buflength); + buffer = bounce_buffer; + } + retry: + if (!scsi_block_when_processing_errors(SDev)) + return -ENODEV; + + + scsi_wait_cmd(SCpnt, (void *) sr_cmd, (void *) buffer, buflength, + sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES); + + result = SCpnt->result; + + /* Minimal error checking. Ignore cases we know about, and report the rest. */ + if (driver_byte(result) != 0) { + switch (SCpnt->sense_buffer[2] & 0xf) { + case UNIT_ATTENTION: + scsi_CDs[target].device->changed = 1; + if (!quiet) + printk(KERN_INFO "sr%d: disc change detected.\n", target); + if (retries++ < 10) + goto retry; + err = -ENOMEDIUM; + break; + case NOT_READY: /* This happens if there is no disc in drive */ + if (SCpnt->sense_buffer[12] == 0x04 && + SCpnt->sense_buffer[13] == 0x01) { + /* sense: Logical unit is in process of becoming ready */ + if (!quiet) + printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target); + if (retries++ < 10) { + /* sleep 2 sec and try again */ + scsi_sleep(2 * HZ); + goto retry; + } else { + /* 20 secs are enough? */ + err = -ENOMEDIUM; + break; + } + } + if (!quiet) + printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n", target); #ifdef DEBUG - print_sense("sr", SCpnt); + print_sense("sr", SCpnt); #endif - err = -ENOMEDIUM; - break; - case ILLEGAL_REQUEST: - if (!quiet) - printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL " - "REQUEST.\n", target); - if (SCpnt->sense_buffer[12] == 0x20 && - SCpnt->sense_buffer[13] == 0x00) { - /* sense: Invalid command operation code */ - err = -EDRIVE_CANT_DO_THIS; - } else { - err = -EINVAL; - } + err = -ENOMEDIUM; + break; + case ILLEGAL_REQUEST: + if (!quiet) + printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL " + "REQUEST.\n", target); + if (SCpnt->sense_buffer[12] == 0x20 && + SCpnt->sense_buffer[13] == 0x00) { + /* sense: Invalid command operation code */ + err = -EDRIVE_CANT_DO_THIS; + } else { + err = -EINVAL; + } #ifdef DEBUG - print_command(sr_cmd); - print_sense("sr", SCpnt); + print_command(sr_cmd); + print_sense("sr", SCpnt); #endif - break; - default: - printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target); - print_command(sr_cmd); - print_sense("sr", SCpnt); - err = -EIO; - } - } - - spin_lock_irqsave(&io_request_lock, flags); - result = SCpnt->result; - /* Wake up a process waiting for device*/ - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - SCpnt = NULL; - spin_unlock_irqrestore(&io_request_lock, flags); - return err; + break; + default: + printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target); + print_command(sr_cmd); + print_sense("sr", SCpnt); + err = -EIO; + } + } + result = SCpnt->result; + /* Wake up a process waiting for device */ + wake_up(&SCpnt->device->device_wait); + scsi_release_command(SCpnt); + SCpnt = NULL; + return err; } /* ---------------------------------------------------------------------- */ @@ -167,95 +153,94 @@ static int test_unit_ready(int minor) { - u_char sr_cmd[10]; + u_char sr_cmd[10]; - sr_cmd[0] = GPCMD_TEST_UNIT_READY; - sr_cmd[1] = ((scsi_CDs[minor].device -> lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1); + sr_cmd[0] = GPCMD_TEST_UNIT_READY; + sr_cmd[1] = ((scsi_CDs[minor].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1); } int sr_tray_move(struct cdrom_device_info *cdi, int pos) { - u_char sr_cmd[10]; + u_char sr_cmd[10]; - sr_cmd[0] = GPCMD_START_STOP_UNIT; - sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device -> lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0; - sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */; - - return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0); + sr_cmd[0] = GPCMD_START_STOP_UNIT; + sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0; + sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ; + + return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0); } int sr_lock_door(struct cdrom_device_info *cdi, int lock) { - return scsi_ioctl (scsi_CDs[MINOR(cdi->dev)].device, - lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK, - 0); + return scsi_ioctl(scsi_CDs[MINOR(cdi->dev)].device, + lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK, + 0); } int sr_drive_status(struct cdrom_device_info *cdi, int slot) { - if (CDSL_CURRENT != slot) { - /* we have no changer support */ - return -EINVAL; - } - - if (0 == test_unit_ready(MINOR(cdi->dev))) - return CDS_DISC_OK; + if (CDSL_CURRENT != slot) { + /* we have no changer support */ + return -EINVAL; + } + if (0 == test_unit_ready(MINOR(cdi->dev))) + return CDS_DISC_OK; - return CDS_TRAY_OPEN; + return CDS_TRAY_OPEN; } int sr_disk_status(struct cdrom_device_info *cdi) { - struct cdrom_tochdr toc_h; - struct cdrom_tocentry toc_e; - int i,rc,have_datatracks = 0; - - /* look for data tracks */ - if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h))) - return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; - - for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { - toc_e.cdte_track = i; - toc_e.cdte_format = CDROM_LBA; - if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e)) - return CDS_NO_INFO; - if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { - have_datatracks = 1; - break; - } - } - if (!have_datatracks) - return CDS_AUDIO; - - if (scsi_CDs[MINOR(cdi->dev)].xa_flag) - return CDS_XA_2_1; - else - return CDS_DATA_1; + struct cdrom_tochdr toc_h; + struct cdrom_tocentry toc_e; + int i, rc, have_datatracks = 0; + + /* look for data tracks */ + if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h))) + return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; + + for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { + toc_e.cdte_track = i; + toc_e.cdte_format = CDROM_LBA; + if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e)) + return CDS_NO_INFO; + if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { + have_datatracks = 1; + break; + } + } + if (!have_datatracks) + return CDS_AUDIO; + + if (scsi_CDs[MINOR(cdi->dev)].xa_flag) + return CDS_XA_2_1; + else + return CDS_DATA_1; } int sr_get_last_session(struct cdrom_device_info *cdi, - struct cdrom_multisession* ms_info) + struct cdrom_multisession *ms_info) { - ms_info->addr.lba=scsi_CDs[MINOR(cdi->dev)].ms_offset; - ms_info->xa_flag=scsi_CDs[MINOR(cdi->dev)].xa_flag || - (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0); + ms_info->addr.lba = scsi_CDs[MINOR(cdi->dev)].ms_offset; + ms_info->xa_flag = scsi_CDs[MINOR(cdi->dev)].xa_flag || + (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0); return 0; } -int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn) +int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) { - u_char sr_cmd[10]; + u_char sr_cmd[10]; char buffer[32]; - int result; - + int result; + sr_cmd[0] = GPCMD_READ_SUBCHANNEL; sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5); - sr_cmd[2] = 0x40; /* I do want the subchannel info */ - sr_cmd[3] = 0x02; /* Give me medium catalog number info */ + sr_cmd[2] = 0x40; /* I do want the subchannel info */ + sr_cmd[3] = 0x02; /* Give me medium catalog number info */ sr_cmd[4] = sr_cmd[5] = 0; sr_cmd[6] = 0; sr_cmd[7] = 0; @@ -263,9 +248,9 @@ sr_cmd[9] = 0; result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0); - - memcpy (mcn->medium_catalog_number, buffer + 9, 13); - mcn->medium_catalog_number[13] = 0; + + memcpy(mcn->medium_catalog_number, buffer + 9, 13); + mcn->medium_catalog_number[13] = 0; return result; } @@ -273,26 +258,26 @@ int sr_reset(struct cdrom_device_info *cdi) { invalidate_buffers(cdi->dev); - return 0; + return 0; } int sr_select_speed(struct cdrom_device_info *cdi, int speed) { - u_char sr_cmd[12]; + u_char sr_cmd[12]; - if (speed == 0) - speed = 0xffff; /* set to max */ - else - speed *= 177; /* Nx to kbyte/s */ - - memset(sr_cmd,0,12); - sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ + if (speed == 0) + speed = 0xffff; /* set to max */ + else + speed *= 177; /* Nx to kbyte/s */ + + memset(sr_cmd, 0, 12); + sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ sr_cmd[1] = (scsi_CDs[MINOR(cdi->dev)].device->lun) << 5; - sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ - sr_cmd[3] = speed & 0xff; /* LSB */ + sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ + sr_cmd[3] = speed & 0xff; /* LSB */ - if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0)) - return -EIO; + if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0)) + return -EIO; return 0; } @@ -302,73 +287,72 @@ /* only cdromreadtochdr and cdromreadtocentry are left - for use with the */ /* sr_disk_status interface for the generic cdrom driver. */ -int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg) +int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { - u_char sr_cmd[10]; - int result, target = MINOR(cdi->dev); - unsigned char buffer[32]; - - switch (cmd) - { - case CDROMREADTOCHDR: - { - struct cdrom_tochdr* tochdr = (struct cdrom_tochdr*)arg; - - sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - sr_cmd[6] = 0; - sr_cmd[7] = 0; /* MSB of length (12) */ - sr_cmd[8] = 12; /* LSB of length */ - sr_cmd[9] = 0; - - result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1); - - tochdr->cdth_trk0 = buffer[2]; - tochdr->cdth_trk1 = buffer[3]; - - break; - } - - case CDROMREADTOCENTRY: - { - struct cdrom_tocentry* tocentry = (struct cdrom_tocentry*)arg; - - sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | - (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - sr_cmd[6] = tocentry->cdte_track; - sr_cmd[7] = 0; /* MSB of length (12) */ - sr_cmd[8] = 12; /* LSB of length */ - sr_cmd[9] = 0; - - result = sr_do_ioctl (target, sr_cmd, buffer, 12, 0); - - tocentry->cdte_ctrl = buffer[5] & 0xf; - tocentry->cdte_adr = buffer[5] >> 4; - tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; - if (tocentry->cdte_format == CDROM_MSF) { - tocentry->cdte_addr.msf.minute = buffer[9]; - tocentry->cdte_addr.msf.second = buffer[10]; - tocentry->cdte_addr.msf.frame = buffer[11]; - } else - tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) - + buffer[10]) << 8) + buffer[11]; - - break; - } - - default: - return -EINVAL; - } + u_char sr_cmd[10]; + int result, target = MINOR(cdi->dev); + unsigned char buffer[32]; + + switch (cmd) { + case CDROMREADTOCHDR: + { + struct cdrom_tochdr *tochdr = (struct cdrom_tochdr *) arg; + + sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + sr_cmd[6] = 0; + sr_cmd[7] = 0; /* MSB of length (12) */ + sr_cmd[8] = 12; /* LSB of length */ + sr_cmd[9] = 0; + + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1); + + tochdr->cdth_trk0 = buffer[2]; + tochdr->cdth_trk1 = buffer[3]; + + break; + } + + case CDROMREADTOCENTRY: + { + struct cdrom_tocentry *tocentry = (struct cdrom_tocentry *) arg; + + sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | + (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + sr_cmd[6] = tocentry->cdte_track; + sr_cmd[7] = 0; /* MSB of length (12) */ + sr_cmd[8] = 12; /* LSB of length */ + sr_cmd[9] = 0; + + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0); + + tocentry->cdte_ctrl = buffer[5] & 0xf; + tocentry->cdte_adr = buffer[5] >> 4; + tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; + if (tocentry->cdte_format == CDROM_MSF) { + tocentry->cdte_addr.msf.minute = buffer[9]; + tocentry->cdte_addr.msf.second = buffer[10]; + tocentry->cdte_addr.msf.frame = buffer[11]; + } else + tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + + buffer[10]) << 8) + buffer[11]; + + break; + } + + default: + return -EINVAL; + } #if 0 - if (result) - printk("DEBUG: sr_audio: result for ioctl %x: %x\n",cmd,result); + if (result) + printk("DEBUG: sr_audio: result for ioctl %x: %x\n", cmd, result); #endif - - return result; + + return result; } /* ----------------------------------------------------------------------- @@ -385,73 +369,78 @@ * blksize: 2048 | 2336 | 2340 | 2352 */ -int -sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize) +int sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize) { - unsigned char cmd[12]; + unsigned char cmd[12]; #ifdef DEBUG - printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n", - minor,lba,format,blksize); + printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n", + minor, lba, format, blksize); #endif - memset(cmd,0,12); - cmd[0] = GPCMD_READ_CD; /* READ_CD */ - cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2); - cmd[2] = (unsigned char)(lba >> 24) & 0xff; - cmd[3] = (unsigned char)(lba >> 16) & 0xff; - cmd[4] = (unsigned char)(lba >> 8) & 0xff; - cmd[5] = (unsigned char) lba & 0xff; - cmd[8] = 1; - switch (blksize) { - case 2336: cmd[9] = 0x58; break; - case 2340: cmd[9] = 0x78; break; - case 2352: cmd[9] = 0xf8; break; - default: cmd[9] = 0x10; break; - } - return sr_do_ioctl(minor, cmd, dest, blksize, 0); + memset(cmd, 0, 12); + cmd[0] = GPCMD_READ_CD; /* READ_CD */ + cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2); + cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cmd[5] = (unsigned char) lba & 0xff; + cmd[8] = 1; + switch (blksize) { + case 2336: + cmd[9] = 0x58; + break; + case 2340: + cmd[9] = 0x78; + break; + case 2352: + cmd[9] = 0xf8; + break; + default: + cmd[9] = 0x10; + break; + } + return sr_do_ioctl(minor, cmd, dest, blksize, 0); } /* * read sectors with blocksizes other than 2048 */ -int -sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) +int sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) { - unsigned char cmd[12]; /* the scsi-command */ - int rc; - - /* we try the READ CD command first... */ - if (scsi_CDs[minor].readcd_known) { - rc = sr_read_cd(minor, dest, lba, 0, blksize); - if (-EDRIVE_CANT_DO_THIS != rc) - return rc; - scsi_CDs[minor].readcd_known = 0; - printk("CDROM does'nt support READ CD (0xbe) command\n"); - /* fall & retry the other way */ - } - - /* ... if this fails, we switch the blocksize using MODE SELECT */ - if (blksize != scsi_CDs[minor].sector_size) - if (0 != (rc = sr_set_blocklength(minor, blksize))) - return rc; + unsigned char cmd[12]; /* the scsi-command */ + int rc; + /* we try the READ CD command first... */ + if (scsi_CDs[minor].readcd_known) { + rc = sr_read_cd(minor, dest, lba, 0, blksize); + if (-EDRIVE_CANT_DO_THIS != rc) + return rc; + scsi_CDs[minor].readcd_known = 0; + printk("CDROM does'nt support READ CD (0xbe) command\n"); + /* fall & retry the other way */ + } + /* ... if this fails, we switch the blocksize using MODE SELECT */ + if (blksize != scsi_CDs[minor].device->sector_size) { + if (0 != (rc = sr_set_blocklength(minor, blksize))) + return rc; + } #ifdef DEBUG - printk("sr%d: sr_read_sector lba=%d blksize=%d\n",minor,lba,blksize); + printk("sr%d: sr_read_sector lba=%d blksize=%d\n", minor, lba, blksize); #endif - - memset(cmd,0,12); - cmd[0] = GPCMD_READ_10; - cmd[1] = (scsi_CDs[minor].device->lun << 5); - cmd[2] = (unsigned char)(lba >> 24) & 0xff; - cmd[3] = (unsigned char)(lba >> 16) & 0xff; - cmd[4] = (unsigned char)(lba >> 8) & 0xff; - cmd[5] = (unsigned char) lba & 0xff; - cmd[8] = 1; - rc = sr_do_ioctl(minor, cmd, dest, blksize, 0); - - return rc; + + memset(cmd, 0, 12); + cmd[0] = GPCMD_READ_10; + cmd[1] = (scsi_CDs[minor].device->lun << 5); + cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cmd[5] = (unsigned char) lba & 0xff; + cmd[8] = 1; + rc = sr_do_ioctl(minor, cmd, dest, blksize, 0); + + return rc; } /* @@ -459,55 +448,50 @@ * ret: 1 == mode2 (XA), 0 == mode1, <0 == error */ -int -sr_is_xa(int minor) +int sr_is_xa(int minor) { - unsigned char *raw_sector; - int is_xa; - unsigned long flags; - - if (!xa_test) - return 0; - - spin_lock_irqsave(&io_request_lock, flags); - raw_sector = (unsigned char *) scsi_malloc(2048+512); - spin_unlock_irqrestore(&io_request_lock, flags); - if (!raw_sector) return -ENOMEM; - if (0 == sr_read_sector(minor,scsi_CDs[minor].ms_offset+16, - CD_FRAMESIZE_RAW1,raw_sector)) { - is_xa = (raw_sector[3] == 0x02) ? 1 : 0; - } else { - /* read a raw sector failed for some reason. */ - is_xa = -1; - } - spin_lock_irqsave(&io_request_lock, flags); - scsi_free(raw_sector, 2048+512); - spin_unlock_irqrestore(&io_request_lock, flags); + unsigned char *raw_sector; + int is_xa; + + if (!xa_test) + return 0; + + raw_sector = (unsigned char *) scsi_malloc(2048 + 512); + if (!raw_sector) + return -ENOMEM; + if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16, + CD_FRAMESIZE_RAW1, raw_sector)) { + is_xa = (raw_sector[3] == 0x02) ? 1 : 0; + } else { + /* read a raw sector failed for some reason. */ + is_xa = -1; + } + scsi_free(raw_sector, 2048 + 512); #ifdef DEBUG - printk("sr%d: sr_is_xa: %d\n",minor,is_xa); + printk("sr%d: sr_is_xa: %d\n", minor, is_xa); #endif - return is_xa; + return is_xa; } int sr_dev_ioctl(struct cdrom_device_info *cdi, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg) { - int target; - - target = MINOR(cdi->dev); - - switch (cmd) { - case BLKROSET: - case BLKROGET: - case BLKRASET: - case BLKRAGET: - case BLKFLSBUF: - case BLKSSZGET: - return blk_ioctl(cdi->dev, cmd, arg); - - default: - return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg); - } + int target; + + target = MINOR(cdi->dev); + + switch (cmd) { + case BLKROSET: + case BLKROGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + case BLKSSZGET: + return blk_ioctl(cdi->dev, cmd, arg); + + default: + return scsi_ioctl(scsi_CDs[target].device, cmd, (void *) arg); + } } /* diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/sr_vendor.c linux/drivers/scsi/sr_vendor.c --- v2.3.31/linux/drivers/scsi/sr_vendor.c Sun Nov 7 16:37:34 1999 +++ linux/drivers/scsi/sr_vendor.c Sun Dec 12 23:04:20 1999 @@ -1,5 +1,5 @@ /* -*-linux-c-*- - * + * vendor-specific code for SCSI CD-ROM's goes here. * * This is needed becauce most of the new features (multisession and @@ -23,15 +23,15 @@ * - TOSHIBA: Detection and support of multisession CD's. * Some XA-Sector tweaking, required for older drives. * - * - SONY: Detection and support of multisession CD's. + * - SONY: Detection and support of multisession CD's. * added by Thomas Quinot * * - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to * work with SONY (SCSI3 now) code. * - * - HP: Much like SONY, but a little different... (Thomas) + * - HP: Much like SONY, but a little different... (Thomas) * HP-Writers only ??? Maybe other CD-Writers work with this too ? - * HP 6020 writers now supported. + * HP 6020 writers now supported. */ #include @@ -47,16 +47,16 @@ #include "sr.h" #if 0 -# define DEBUG +#define DEBUG #endif /* here are some constants to sort the vendors into groups */ -#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ +#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ #define VENDOR_NEC 2 #define VENDOR_TOSHIBA 3 -#define VENDOR_WRITER 4 /* pre-scsi3 writers */ +#define VENDOR_WRITER 4 /* pre-scsi3 writers */ #define VENDOR_ID (scsi_CDs[minor].vendor) @@ -66,7 +66,7 @@ VENDOR_ID = VENDOR_SCSI3; #else char *vendor = scsi_CDs[minor].device->vendor; - char *model = scsi_CDs[minor].device->model; + char *model = scsi_CDs[minor].device->model; /* default */ VENDOR_ID = VENDOR_SCSI3; @@ -77,24 +77,24 @@ if (scsi_CDs[minor].device->type == TYPE_WORM) { VENDOR_ID = VENDOR_WRITER; - } else if (!strncmp (vendor, "NEC", 3)) { + } else if (!strncmp(vendor, "NEC", 3)) { VENDOR_ID = VENDOR_NEC; - if (!strncmp (model,"CD-ROM DRIVE:25", 15) || - !strncmp (model,"CD-ROM DRIVE:36", 15) || - !strncmp (model,"CD-ROM DRIVE:83", 15) || - !strncmp (model,"CD-ROM DRIVE:84 ",16) + if (!strncmp(model, "CD-ROM DRIVE:25", 15) || + !strncmp(model, "CD-ROM DRIVE:36", 15) || + !strncmp(model, "CD-ROM DRIVE:83", 15) || + !strncmp(model, "CD-ROM DRIVE:84 ", 16) #if 0 - /* my NEC 3x returns the read-raw data if a read-raw - is followed by a read for the same sector - aeb */ - || !strncmp (model,"CD-ROM DRIVE:500",16) + /* my NEC 3x returns the read-raw data if a read-raw + is followed by a read for the same sector - aeb */ + || !strncmp(model, "CD-ROM DRIVE:500", 16) #endif - ) + ) /* these can't handle multisession, may hang */ scsi_CDs[minor].cdi.mask |= CDC_MULTI_SESSION; - } else if (!strncmp (vendor, "TOSHIBA", 7)) { + } else if (!strncmp(vendor, "TOSHIBA", 7)) { VENDOR_ID = VENDOR_TOSHIBA; - + } #endif } @@ -105,10 +105,10 @@ int sr_set_blocklength(int minor, int blocklength) { - unsigned char *buffer; /* the buffer for the ioctl */ - unsigned char cmd[12]; /* the scsi-command */ - struct ccs_modesel_head *modesel; - int rc,density = 0; + unsigned char *buffer; /* the buffer for the ioctl */ + unsigned char cmd[12]; /* the scsi-command */ + struct ccs_modesel_head *modesel; + int rc, density = 0; #ifdef CONFIG_BLK_DEV_SR_VENDOR if (VENDOR_ID == VENDOR_TOSHIBA) @@ -116,27 +116,29 @@ #endif buffer = (unsigned char *) scsi_malloc(512); - if (!buffer) return -ENOMEM; + if (!buffer) + return -ENOMEM; #ifdef DEBUG - printk("sr%d: MODE SELECT 0x%x/%d\n",minor,density,blocklength); + printk("sr%d: MODE SELECT 0x%x/%d\n", minor, density, blocklength); #endif - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = MODE_SELECT; cmd[1] = (scsi_CDs[minor].device->lun << 5) | (1 << 4); cmd[4] = 12; - modesel = (struct ccs_modesel_head*)buffer; - memset(modesel,0,sizeof(*modesel)); + modesel = (struct ccs_modesel_head *) buffer; + memset(modesel, 0, sizeof(*modesel)); modesel->block_desc_length = 0x08; - modesel->density = density; - modesel->block_length_med = (blocklength >> 8 ) & 0xff; - modesel->block_length_lo = blocklength & 0xff; - if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) - scsi_CDs[minor].sector_size = blocklength; + modesel->density = density; + modesel->block_length_med = (blocklength >> 8) & 0xff; + modesel->block_length_lo = blocklength & 0xff; + if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) { + scsi_CDs[minor].device->sector_size = blocklength; + } #ifdef DEBUG else printk("sr%d: switching blocklength to %d bytes failed\n", - minor,blocklength); + minor, blocklength); #endif scsi_free(buffer, 512); return rc; @@ -149,28 +151,27 @@ int sr_cd_check(struct cdrom_device_info *cdi) { - unsigned long sector; - unsigned char *buffer; /* the buffer for the ioctl */ - unsigned char cmd[12]; /* the scsi-command */ - int rc,no_multi,minor; + unsigned long sector; + unsigned char *buffer; /* the buffer for the ioctl */ + unsigned char cmd[12]; /* the scsi-command */ + int rc, no_multi, minor; minor = MINOR(cdi->dev); if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION) return 0; - - spin_lock_irq(&io_request_lock); + buffer = (unsigned char *) scsi_malloc(512); - spin_unlock_irq(&io_request_lock); - if(!buffer) return -ENOMEM; - - sector = 0; /* the multisession sector offset goes here */ - no_multi = 0; /* flag: the drive can't handle multisession */ - rc = 0; - - switch(VENDOR_ID) { - + if (!buffer) + return -ENOMEM; + + sector = 0; /* the multisession sector offset goes here */ + no_multi = 0; /* flag: the drive can't handle multisession */ + rc = 0; + + switch (VENDOR_ID) { + case VENDOR_SCSI3: - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = READ_TOC; cmd[1] = (scsi_CDs[minor].device->lun << 5); cmd[8] = 12; @@ -180,70 +181,70 @@ break; if ((buffer[0] << 8) + buffer[1] < 0x0a) { printk(KERN_INFO "sr%d: Hmm, seems the drive " - "doesn't support multisession CD's\n",minor); + "doesn't support multisession CD's\n", minor); no_multi = 1; break; } sector = buffer[11] + (buffer[10] << 8) + - (buffer[9] << 16) + (buffer[8] << 24); + (buffer[9] << 16) + (buffer[8] << 24); if (buffer[6] <= 1) { /* ignore sector offsets from first track */ sector = 0; } break; - + #ifdef CONFIG_BLK_DEV_SR_VENDOR - case VENDOR_NEC: { - unsigned long min,sec,frame; - memset(cmd,0,12); - cmd[0] = 0xde; - cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03; - cmd[2] = 0xb0; - rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1); - if (rc != 0) - break; - if (buffer[14] != 0 && buffer[14] != 0xb0) { - printk(KERN_INFO "sr%d: Hmm, seems the cdrom " - "doesn't support multisession CD's\n",minor); - no_multi = 1; + case VENDOR_NEC:{ + unsigned long min, sec, frame; + memset(cmd, 0, 12); + cmd[0] = 0xde; + cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03; + cmd[2] = 0xb0; + rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1); + if (rc != 0) + break; + if (buffer[14] != 0 && buffer[14] != 0xb0) { + printk(KERN_INFO "sr%d: Hmm, seems the cdrom " + "doesn't support multisession CD's\n", minor); + no_multi = 1; + break; + } + min = BCD_TO_BIN(buffer[15]); + sec = BCD_TO_BIN(buffer[16]); + frame = BCD_TO_BIN(buffer[17]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; break; } - min = BCD_TO_BIN(buffer[15]); - sec = BCD_TO_BIN(buffer[16]); - frame = BCD_TO_BIN(buffer[17]); - sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame; - break; - } - case VENDOR_TOSHIBA: { - unsigned long min,sec,frame; + case VENDOR_TOSHIBA:{ + unsigned long min, sec, frame; - /* we request some disc information (is it a XA-CD ?, - * where starts the last session ?) */ - memset(cmd,0,12); - cmd[0] = 0xc7; - cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3; - rc = sr_do_ioctl(minor, cmd, buffer, 4, 1); - if (rc == -EINVAL) { - printk(KERN_INFO "sr%d: Hmm, seems the drive " - "doesn't support multisession CD's\n",minor); - no_multi = 1; + /* we request some disc information (is it a XA-CD ?, + * where starts the last session ?) */ + memset(cmd, 0, 12); + cmd[0] = 0xc7; + cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3; + rc = sr_do_ioctl(minor, cmd, buffer, 4, 1); + if (rc == -EINVAL) { + printk(KERN_INFO "sr%d: Hmm, seems the drive " + "doesn't support multisession CD's\n", minor); + no_multi = 1; + break; + } + if (rc != 0) + break; + min = BCD_TO_BIN(buffer[1]); + sec = BCD_TO_BIN(buffer[2]); + frame = BCD_TO_BIN(buffer[3]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; + if (sector) + sector -= CD_MSF_OFFSET; + sr_set_blocklength(minor, 2048); break; } - if (rc != 0) - break; - min = BCD_TO_BIN(buffer[1]); - sec = BCD_TO_BIN(buffer[2]); - frame = BCD_TO_BIN(buffer[3]); - sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame; - if (sector) - sector -= CD_MSF_OFFSET; - sr_set_blocklength(minor,2048); - break; - } case VENDOR_WRITER: - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = READ_TOC; cmd[1] = (scsi_CDs[minor].device->lun << 5); cmd[8] = 0x04; @@ -253,31 +254,29 @@ break; } if ((rc = buffer[2]) == 0) { - printk (KERN_WARNING - "sr%d: No finished session\n",minor); + printk(KERN_WARNING + "sr%d: No finished session\n", minor); break; } - - cmd[0] = READ_TOC; /* Read TOC */ + cmd[0] = READ_TOC; /* Read TOC */ cmd[1] = (scsi_CDs[minor].device->lun << 5); - cmd[6] = rc & 0x7f; /* number of last session */ + cmd[6] = rc & 0x7f; /* number of last session */ cmd[8] = 0x0c; cmd[9] = 0x40; - rc = sr_do_ioctl(minor, cmd, buffer, 12, 1); + rc = sr_do_ioctl(minor, cmd, buffer, 12, 1); if (rc != 0) { break; } - sector = buffer[11] + (buffer[10] << 8) + - (buffer[9] << 16) + (buffer[8] << 24); + (buffer[9] << 16) + (buffer[8] << 24); break; -#endif /* CONFIG_BLK_DEV_SR_VENDOR */ +#endif /* CONFIG_BLK_DEV_SR_VENDOR */ default: /* should not happen */ printk(KERN_WARNING - "sr%d: unknown vendor code (%i), not initialized ?\n", - minor,VENDOR_ID); + "sr%d: unknown vendor code (%i), not initialized ?\n", + minor, VENDOR_ID); sector = 0; no_multi = 1; break; @@ -286,16 +285,17 @@ scsi_CDs[minor].xa_flag = 0; if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(minor)) scsi_CDs[minor].xa_flag = 1; - - if (2048 != scsi_CDs[minor].sector_size) - sr_set_blocklength(minor,2048); + + if (2048 != scsi_CDs[minor].device->sector_size) { + sr_set_blocklength(minor, 2048); + } if (no_multi) cdi->mask |= CDC_MULTI_SESSION; #ifdef DEBUG if (sector) printk(KERN_DEBUG "sr%d: multisession offset=%lu\n", - minor,sector); + minor, sector); #endif scsi_free(buffer, 512); return rc; diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/st.c linux/drivers/scsi/st.c --- v2.3.31/linux/drivers/scsi/st.c Fri Oct 22 13:21:50 1999 +++ linux/drivers/scsi/st.c Sun Dec 12 23:04:20 1999 @@ -286,15 +286,13 @@ st_do_scsi(Scsi_Cmnd * SCpnt, Scsi_Tape * STp, unsigned char *cmd, int bytes, int timeout, int retries, int do_wait) { - unsigned long flags; unsigned char *bp; - spin_lock_irqsave(&io_request_lock, flags); if (SCpnt == NULL) - if ((SCpnt = scsi_allocate_device(NULL, STp->device, 1)) == NULL) { + SCpnt = scsi_allocate_device(STp->device, 1); + if (SCpnt == NULL) { printk(KERN_ERR "st%d: Can't get SCSI request.\n", TAPE_NR(STp->devt)); - spin_unlock_irqrestore(&io_request_lock, flags); return NULL; } @@ -315,7 +313,6 @@ scsi_do_cmd(SCpnt, (void *) cmd, bp, bytes, st_sleep_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); if (do_wait) { down(SCpnt->request.sem); diff -u --recursive --new-file v2.3.31/linux/drivers/scsi/u14-34f.c linux/drivers/scsi/u14-34f.c --- v2.3.31/linux/drivers/scsi/u14-34f.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/scsi/u14-34f.c Sun Dec 12 23:04:20 1999 @@ -826,7 +826,7 @@ } else { unsigned long flags; - sh[j]->wish_block = TRUE; +//FIXME// sh[j]->wish_block = TRUE; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); diff -u --recursive --new-file v2.3.31/linux/drivers/sound/mad16.c linux/drivers/sound/mad16.c --- v2.3.31/linux/drivers/sound/mad16.c Thu Aug 26 13:05:39 1999 +++ linux/drivers/sound/mad16.c Wed Dec 8 15:17:55 1999 @@ -14,6 +14,7 @@ * * OPTi 82C928 MAD16 (replaced by C929) * OAK OTI-601D Mozart + * OAK OTI-605 Mozart (later version with MPU401 Midi) * OPTi 82C929 MAD16 Pro * OPTi 82C930 * OPTi 82C924 @@ -22,8 +23,9 @@ * connect some other components (OPL-[234] and a WSS compatible codec) * to the PC bus and perform I/O, DMA and IRQ address decoding. There is * also a UART for the MPU-401 mode (not 82C928/Mozart). - * The Mozart chip appears to be compatible with the 82C928 (can anybody - * confirm this?). + * The Mozart chip appears to be compatible with the 82C928, although later + * issues of the card, using the OTI-605 chip, have an MPU-401 compatable Midi + * port. This port is configured differently to that of the OPTi audio chips. * * NOTE! If you want to set CD-ROM address and/or joystick enable, define * MAD16_CONF in local.h as combination of the following bits: @@ -65,8 +67,11 @@ * Improved debugging support. 16-May-1998 * Fixed bug. 16-Jun-1998 * - * Torsten Duwe Made Opti924 PnP support non-destructive - * 1998-12-23 + * Torsten Duwe Made Opti924 PnP support non-destructive + * 23-Dec-1998 + * + * Paul Grayson Added support for Midi on later Mozart cards. + * 25-Nov-1999 */ #include "sound_config.h" @@ -719,29 +724,24 @@ void attach_mad16_mpu(struct address_info *hw_config) { - if (board_type < C929) /* Early chip. No MPU support. Just SB MIDI */ - { #if defined(CONFIG_MIDI) && defined(CONFIG_MAD16_OLDCARD) - if (mad_read(MC1_PORT) & 0x20) - hw_config->io_base = 0x240; - else - hw_config->io_base = 0x220; + if (mad_read(MC1_PORT) & 0x20) + hw_config->io_base = 0x240; + else + hw_config->io_base = 0x220; - hw_config->name = "Mad16/Mozart"; - sb_dsp_init(hw_config); + hw_config->name = "Mad16/Mozart"; + sb_dsp_init(hw_config); + return; #endif - return; - } -#if defined(CONFIG_UART401) && defined(CONFIG_MIDI) if (!already_initialized) return; hw_config->driver_use_1 = SB_MIDI_ONLY; hw_config->name = "Mad16/Mozart"; attach_uart401(hw_config); -#endif } int probe_mad16_mpu(struct address_info *hw_config) @@ -802,7 +802,60 @@ hw_config->driver_use_1 = SB_MIDI_ONLY; return sb_dsp_detect(hw_config, 0, 0); #else - return 0; + /* assuming all later Mozart cards are identified as + * either 82C928 or Mozart. If so, following code attempts + * to set MPU register. TODO - add probing + */ + + + unsigned char tmp; + + tmp = mad_read(MC8_PORT); + + switch (hw_config->irq) + { + case 5: + tmp |= 0x08; + break; + case 7: + tmp |= 0x10; + break; + case 9: + tmp |= 0x18; + break; + case 10: + tmp |= 0x20; + break; + case 11: + tmp |= 0x28; + break; + default: + printk(KERN_ERR "mad16/MOZART: invalid mpu_irq\n"); + return 0; + } + + switch (hw_config->io_base) + { + case 0x300: + tmp |= 0x01; + break; + case 0x310: + tmp |= 0x03; + break; + case 0x320: + tmp |= 0x05; + break; + case 0x330: + tmp |= 0x07; + break; + default: + printk(KERN_ERR "mad16/MOZART: invalid mpu_io\n"); + return 0; + } + + mad_write(MC8_PORT, tmp); /* write MPU port parameters */ + + return probe_uart401(hw_config); #endif } tmp = mad_read(MC6_PORT) & 0x83; diff -u --recursive --new-file v2.3.31/linux/drivers/sound/sb_card.c linux/drivers/sound/sb_card.c --- v2.3.31/linux/drivers/sound/sb_card.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/sound/sb_card.c Sun Dec 12 22:55:54 1999 @@ -233,38 +233,42 @@ if (mad16 == 0 && trix == 0 && pas2 == 0 && support == 0) { #ifdef CONFIG_ISAPNP - if (sb_probe_isapnp(&config, &config_mpu)<0) + if (isapnp == 1 && sb_probe_isapnp(&config, &config_mpu)<0) { printk(KERN_ERR "sb_card: No ISAPnP cards found\n"); return -EINVAL; } + else + { +#endif + if (io == -1 || dma == -1 || irq == -1) + { + printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n"); + return -EINVAL; + } + config.io_base = io; + config.irq = irq; + config.dma = dma; + config.dma2 = dma16; + config.card_subtype = type; +#ifdef CONFIG_ISAPNP + } #endif - } - if (io == -1 || dma == -1 || irq == -1) - { - printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n"); - return -EINVAL; - } - config.io_base = io; - config.irq = irq; - config.dma = dma; - config.dma2 = dma16; - config.card_subtype = type; - - if (!probe_sb(&config)) - return -ENODEV; - attach_sb_card(&config); + if (!probe_sb(&config)) + return -ENODEV; + attach_sb_card(&config); - if(config.slots[0]==-1) - return -ENODEV; + if(config.slots[0]==-1) + return -ENODEV; #ifdef CONFIG_MIDI - if (isapnp == 0) - config_mpu.io_base = mpu_io; - if (probe_sbmpu(&config_mpu)) - sbmpu = 1; - if (sbmpu) - attach_sbmpu(&config_mpu); + if (isapnp == 0) + config_mpu.io_base = mpu_io; + if (probe_sbmpu(&config_mpu)) + sbmpu = 1; + if (sbmpu) + attach_sbmpu(&config_mpu); #endif + } SOUND_LOCK; return 0; } diff -u --recursive --new-file v2.3.31/linux/drivers/sound/sb_ess.c linux/drivers/sound/sb_ess.c --- v2.3.31/linux/drivers/sound/sb_ess.c Mon Oct 4 15:49:30 1999 +++ linux/drivers/sound/sb_ess.c Wed Dec 8 15:17:55 1999 @@ -10,29 +10,29 @@ * * History: * - * Rolf Fokkens (Dec 20 1998): ES188x recording level support on a per + * Rolf Fokkens (Dec 20 1998): ES188x recording level support on a per * fokkensr@vertis.nl input basis. - * (Dec 24 1998): Recognition of ES1788, ES1887, ES1888, + * (Dec 24 1998): Recognition of ES1788, ES1887, ES1888, * ES1868, ES1869 and ES1878. Could be used for * specific handling in the future. All except * ES1887 and ES1888 and ES688 are handled like * ES1688. - * (Dec 27 1998): RECLEV for all (?) ES1688+ chips. ES188x now + * (Dec 27 1998): RECLEV for all (?) ES1688+ chips. ES188x now * have the "Dec 20" support + RECLEV - * (Jan 2 1999): Preparation for Full Duplex. This means + * (Jan 2 1999): Preparation for Full Duplex. This means * Audio 2 is now used for playback when dma16 * is specified. The next step would be to use * Audio 1 and Audio 2 at the same time. - * (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this + * (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this * includes both the ESS stuff that has been in * sb_*[ch] before I touched it and the ESS support * I added later - * (Jan 23 1999): Full Duplex seems to work. I wrote a small + * (Jan 23 1999): Full Duplex seems to work. I wrote a small * test proggy which works OK. Haven't found * any applications to test it though. So why did * I bother to create it anyway?? :) Just for * fun. - * (May 2 1999): I tried to be too smart by "introducing" + * (May 2 1999): I tried to be too smart by "introducing" * ess_calc_best_speed (). The idea was that two * dividers could be used to setup a samplerate, * ess_calc_best_speed () would choose the best. @@ -40,10 +40,12 @@ * recording problems for high samplerates. I * fixed this by removing ess_calc_best_speed () * and just doing what the documentation says. - * Andy Sloane (June 4 1999): Stole some code from ALSA to fix the playback + * Andy Sloane (Jun 4 1999): Stole some code from ALSA to fix the playback * andy@guildsoftware.com speed on ES1869, ES1879, ES1887, and ES1888. * 1879's were previously ignored by this driver; * added (untested) support for those. + * Cvetan Ivanov (Oct 27 1999): Fixed ess_dsp_init to call ess_set_dma_hw for + * zezo@inet.bg _ALL_ ESS models, not only ES1887 * * This files contains ESS chip specifics. It's based on the existing ESS * handling as it resided in sb_common.c, sb_mixer.c and sb_audio.c. This @@ -52,7 +54,7 @@ * - RECLEV support for ES1688 and later * - 6 bits playback level support chips later than ES1688 * - Recording level support on a per-device basis for ES1887 - * - Full-Duplex for ES1887 (under development) + * - Full-Duplex for ES1887 * * Full duplex is enabled by specifying dma16. While the normal dma must * be one of 0, 1 or 3, dma16 can be one of 0, 1, 3 or 5. DMA 5 is a 16 bit @@ -100,7 +102,7 @@ * of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves * into ES1888 mode. This means that it claims IRQ 11, which happens to be my * ISDN adapter. Needless to say it no longer worked. I now understand why - * after rebooting 0x7f already was 0x05, the value of my choise: the BIOS + * after rebooting 0x7f already was 0x05, the value of my choice: the BIOS * did it. * * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is decribed @@ -1200,10 +1202,10 @@ /* AAS: info stolen from ALSA: these boards have different clocks */ switch(devc->submodel) { -/* APPARENTLY NOT 1869 +/* APPARENTLY NOT 1869 AND 1887 case SUBMDL_ES1869: -*/ case SUBMDL_ES1887: +*/ case SUBMDL_ES1888: devc->caps |= SB_CAP_ES18XX_RATE; break; @@ -1305,6 +1307,13 @@ int ess_dsp_init (sb_devc *devc, struct address_info *hw_config) { /* + * Caller also checks this, but anyway + */ + if (devc->model != MDL_ESS) { + printk (KERN_INFO "ess_dsp_init for non ESS chip\n"); + return 1; + } + /* * This for ES1887 to run Full Duplex. Actually ES1888 * is allowed to do so too. I have no idea yet if this * will work for ES1888 however. @@ -1324,15 +1333,12 @@ if (devc->dma8 != devc->dma16 && devc->dma16 != -1) { devc->duplex = 1; } - - if (!ess_set_dma_hw (devc)) { - free_irq(devc->irq, devc); - return 0; - } - return 1; - } else { - return -1; } + if (!ess_set_dma_hw (devc)) { + free_irq(devc->irq, devc); + return 0; + } + return 1; } /**************************************************************************** diff -u --recursive --new-file v2.3.31/linux/drivers/sound/waveartist.c linux/drivers/sound/waveartist.c --- v2.3.31/linux/drivers/sound/waveartist.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/sound/waveartist.c Mon Dec 13 16:26:27 1999 @@ -199,7 +199,7 @@ if (res == 0x55aa) break; } - } while (timeout--); + } while (--timeout); if (timeout == 0) { printk(KERN_WARNING "WaveArtist: reset timeout "); @@ -1200,7 +1200,8 @@ char rev[3], dev_name[64]; int my_dev; - waveartist_reset(devc); + if (waveartist_reset(devc)) + return -ENODEV; sprintf(dev_name, "%s (%s", devc->hw.name, devc->chip_name); @@ -1765,14 +1766,14 @@ MODULE_PARM(dma, "i"); /* DMA */ MODULE_PARM(dma2, "i"); /* DMA2 */ -int io = CONFIG_WAVEARTIST_BASE; -int irq = CONFIG_WAVEARTIST_IRQ; -int dma = CONFIG_WAVEARTIST_DMA; -int dma2 = CONFIG_WAVEARTIST_DMA2; +static int io = CONFIG_WAVEARTIST_BASE; +static int irq = CONFIG_WAVEARTIST_IRQ; +static int dma = CONFIG_WAVEARTIST_DMA; +static int dma2 = CONFIG_WAVEARTIST_DMA2; static int attached; -struct address_info hw_config; +static struct address_info hw_config; int init_module(void) { diff -u --recursive --new-file v2.3.31/linux/drivers/video/Makefile linux/drivers/video/Makefile --- v2.3.31/linux/drivers/video/Makefile Tue Nov 23 22:42:21 1999 +++ linux/drivers/video/Makefile Mon Dec 13 14:11:32 1999 @@ -7,6 +7,12 @@ MOD_IN_SUB_DIRS := ALL_SUB_DIRS := +O_TARGET := video.o +O_OBJS := +M_OBJS := +# This is a nice idea but needs depmod altering +# MOD_LIST_NAME := VIDEO_MODULES + # All of the (potential) objects that export symbols. # This list comes from 'grep -l EXPORT_SYMBOL *.[hc]'. @@ -96,26 +102,34 @@ obj-$(CONFIG_FBCON_MFB) += fbcon-mfb.o obj-$(CONFIG_FBCON_VGA) += fbcon-vga.o +# Extract lists of the multi-part drivers. +# The 'int-*' lists are the intermediate files used to build the multi's. + +multi-y := $(filter $(list-multi), $(obj-y)) +multi-m := $(filter $(list-multi), $(obj-m)) +int-y := $(sort $(foreach m, $(multi-y), $($(basename $(m))-objs))) +int-m := $(sort $(foreach m, $(multi-m), $($(basename $(m))-objs))) + # Files that are both resident and modular: remove from modular. obj-m := $(filter-out $(obj-y), $(obj-m)) +int-m := $(filter-out $(int-y), $(int-m)) # Take multi-part drivers out of obj-y and put components in. -obj-y := $(filter-out $(list-multi), $(obj-y)) +obj-y := $(filter-out $(list-multi), $(obj-y)) $(int-y) # Translate to Rules.make lists. -L_TARGET := video.a -# This is a nice idea but needs depmod altering -#MOD_LIST_NAME := VIDEO_MODULES - -L_OBJS := $(sort $(filter-out $(export-objs), $(obj-y))) -LX_OBJS := $(sort $(filter $(export-objs), $(obj-y))) +O_OBJS := $(filter-out $(export-objs), $(obj-y)) +OX_OBJS := $(filter $(export-objs), $(obj-y)) M_OBJS := $(sort $(filter-out $(export-objs), $(obj-m))) -MX_OBJS := $(sort $(filter $(export-objs), $(obj-m))) +MX_OBJS := $(sort $(filter $(export-objs), $(obj-m))) include $(TOPDIR)/Rules.make + +clean: + rm -f core *.o *.a *.s promcon_tbl.c: prom.uni ../char/conmakehash ../char/conmakehash prom.uni | \ diff -u --recursive --new-file v2.3.31/linux/drivers/video/cyber2000fb.c linux/drivers/video/cyber2000fb.c --- v2.3.31/linux/drivers/video/cyber2000fb.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/video/cyber2000fb.c Mon Dec 13 16:26:27 1999 @@ -344,6 +344,7 @@ unsigned char crtc[19]; unsigned int width; unsigned int pitch; + unsigned int fetch; /* * Other @@ -359,7 +360,7 @@ static void cyber2000fb_set_timing(struct par_info *hw) { - unsigned int fetchrow, i; + unsigned int i; /* * Blank palette @@ -412,6 +413,7 @@ /* PLL registers */ cyber2000_grphw(0xb0, hw->clock_mult); cyber2000_grphw(0xb1, hw->clock_div); + cyber2000_grphw(0xb2, 0xdb); cyber2000_grphw(0xb3, 0x54); /* MCLK: 75MHz */ cyber2000_grphw(0x90, 0x01); @@ -427,12 +429,10 @@ cyber2000_outb(0x20, 0x3c0); cyber2000_outb(0xff, 0x3c6); - fetchrow = hw->pitch + 1; - cyber2000_grphw(0x14, fetchrow); - /* FIXME: is this the right way round? */ - cyber2000_grphw(0x15, ((fetchrow >> 4) & 0xf0) | ((hw->pitch >> 8) & 0x0f)); + cyber2000_grphw(0x14, hw->fetch); + cyber2000_grphw(0x15, ((hw->fetch >> 8) & 0x03) | ((hw->pitch >> 4) & 0x30)); cyber2000_grphw(0x77, hw->visualid); - cyber2000_grphw(0x33, 0x1c); + cyber2000_grphw(0x33, 0x0c); /* * Set up accelerator registers @@ -616,8 +616,8 @@ * mult = reg0xb0.7:0 * div1 = (reg0xb1.5:0 + 1) * div2 = 2^(reg0xb1.7:6) - * fpll should be between 150 and 220 MHz - * (6667ps and 4545ps) + * fpll should be between 115 and 257 MHz + * (8696ps and 3891ps) */ static int cyber2000fb_decode_clock(struct par_info *hw, struct fb_var_screeninfo *var) @@ -670,25 +670,95 @@ break; } #else + /* + * 1600x1200 1280x1024 1152x864 1024x768 800x600 640x480 + * 5051 5051 yes 76* + * 5814 5814 no 66 + * 6411 6411 no 60 + * 7408 7408 yes 75* + * 74* + * 7937 7937 yes 70* + * 9091 4545 yes 80* + * 75* 100* + * 9260 4630 yes 60* + * 10000 5000 no 70 90 + * 12500 6250 yes 47-lace* 60* + * 43-lace* + * 12699 6349 yes 75* + * 13334 6667 no 72 + * 70 + * 14815 7407 yes 100* + * 15385 7692 yes 47-lace* 60* + * 43-lace* + * 17656 4414 no 90 + * 20000 5000 no 72 + * 20203 5050 yes 75* + * 22272 5568 yes 43-lace* 70* 100* + * 25000 6250 yes 60* + * 25057 6264 no 90 + * 27778 6944 yes 56* + * 48-lace* + * 31747 7936 yes 75* + * 32052 8013 no 72 + * 39722 /6 6620 no + * 39722 /8 4965 yes 60* + */ /* /1 /2 /4 /6 /8 */ /* (2010) (2000) */ - if (pll_ps == 4630) { /* 216.0, 108.0, 54.00, 36.000 27.000 */ - mult = 181; /* 4630 9260 18520 27780 37040 */ - div1 = 12; - } else if (pll_ps == 4965) { /* 201.0, 100.5, 50.25, 33.500 25.125 */ - mult = 211; /* 4965 9930 19860 29790 39720 */ - div1 = 15; - } else if (pll_ps == 5050) { /* 198.0, 99.0, 49.50, 33.000 24.750 */ - mult = 83; /* 5050 10100 20200 30300 40400 */ - div1 = 6; - } else if (pll_ps == 6349) { /* 158.0, 79.0, 39.50, 26.333 19.750 */ - mult = 209; /* 6349 12698 25396 38094 50792 */ - div1 = 19; - } else if (pll_ps == 6422) { /* 156.0, 78.0, 39.00, 26.000 19.500 */ - mult = 190; /* 6422 12844 25688 38532 51376 */ - div1 = 17; + if (pll_ps >= 4543 && pll_ps <= 4549) { + mult = 169; /*u220.0 110.0 54.99 36.663 27.497 */ + div1 = 11; /* 4546 9092 18184 27276 36367 */ + } else if (pll_ps >= 4596 && pll_ps <= 4602) { + mult = 243; /* 217.5 108.7 54.36 36.243 27.181 */ + div1 = 16; /* 4599 9197 18395 27592 36789 */ + } else if (pll_ps >= 4627 && pll_ps <= 4633) { + mult = 181; /*u216.0, 108.0, 54.00, 36.000 27.000 */ + div1 = 12; /* 4630 9260 18520 27780 37040 */ + } else if (pll_ps >= 4962 && pll_ps <= 4968) { + mult = 211; /*u201.0, 100.5, 50.25, 33.500 25.125 */ + div1 = 15; /* 4965 9930 19860 29790 39720 */ + } else if (pll_ps >= 5005 && pll_ps <= 5011) { + mult = 251; /* 200.0 99.8 49.92 33.280 24.960 */ + div1 = 18; /* 5008 10016 20032 30048 40064 */ + } else if (pll_ps >= 5047 && pll_ps <= 5053) { + mult = 83; /*u198.0, 99.0, 49.50, 33.000 24.750 */ + div1 = 6; /* 5050 10100 20200 30300 40400 */ + } else if (pll_ps >= 5490 && pll_ps <= 5496) { + mult = 89; /* 182.0 91.0 45.51 30.342 22.756 */ + div1 = 7; /* 5493 10986 21972 32958 43944 */ + } else if (pll_ps >= 5567 && pll_ps <= 5573) { + mult = 163; /*u179.5 89.8 44.88 29.921 22.441 */ + div1 = 13; /* 5570 11140 22281 33421 44562 */ + } else if (pll_ps >= 6246 && pll_ps <= 6252) { + mult = 190; /*u160.0, 80.0, 40.00, 26.671 20.003 */ + div1 = 17; /* 6249 12498 24996 37494 49992 */ + } else if (pll_ps >= 6346 && pll_ps <= 6352) { + mult = 209; /*u158.0, 79.0, 39.50, 26.333 19.750 */ + div1 = 19; /* 6349 12698 25396 38094 50792 */ + } else if (pll_ps >= 6648 && pll_ps <= 6655) { + mult = 210; /*u150.3 75.2 37.58 25.057 18.792 */ + div1 = 20; /* 6652 13303 26606 39909 53213 */ + } else if (pll_ps >= 6943 && pll_ps <= 6949) { + mult = 181; /*u144.0 72.0 36.00 23.996 17.997 */ + div1 = 18; /* 6946 13891 27782 41674 55565 */ + } else if (pll_ps >= 7404 && pll_ps <= 7410) { + mult = 198; /*u134.0 67.5 33.75 22.500 16.875 */ + div1 = 21; /* 7407 14815 29630 44445 59260 */ + } else if (pll_ps >= 7689 && pll_ps <= 7695) { + mult = 227; /*u130.0 65.0 32.50 21.667 16.251 */ + div1 = 25; /* 7692 15384 30768 46152 61536 */ + } else if (pll_ps >= 7808 && pll_ps <= 7814) { + mult = 152; /* 128.0 64.0 32.00 21.337 16.003 */ + div1 = 17; /* 7811 15623 31245 46868 62490 */ + } else if (pll_ps >= 7934 && pll_ps <= 7940) { + mult = 44; /*u126.0 63.0 31.498 20.999 15.749 */ + div1 = 5; /* 7937 15874 31748 47622 63494 */ } else return -EINVAL; + /* 187 13 -> 4855 */ + /* 181 18 -> 6946 */ + /* 163 13 -> 5570 */ + /* 169 11 -> 4545 */ #endif /* * Step 3: @@ -768,7 +838,11 @@ debug_printf("%02X ", hw->crtc[i]); debug_printf("%02X\n", hw->crtc_ofl); } - hw->width -= 1; + hw->width -= 1; + hw->fetch = hw->pitch; + if (current_par.bus_64bit == 0) + hw->fetch <<= 1; + hw->fetch += 1; return 0; } @@ -1243,12 +1317,12 @@ static char igs_regs[] __initdata = { 0x10, 0x10, 0x12, 0x00, 0x13, 0x00, -/* 0x30, 0x21,*/ 0x31, 0x00, 0x32, 0x00, 0x33, 0x01, + 0x31, 0x00, 0x32, 0x00, 0x33, 0x01, 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x01, 0x58, 0x00, 0x59, 0x00, 0x5a, 0x00, - 0x70, 0x0b,/* 0x71, 0x10, 0x72, 0x45,*/ 0x73, 0x30, - 0x74, 0x1b, 0x75, 0x1e, 0x76, 0x00, 0x7a, 0xc8 + 0x70, 0x0b, 0x73, 0x30, + 0x74, 0x0b, 0x75, 0x17, 0x76, 0x00, 0x7a, 0xc8 }; static void __init cyber2000fb_hw_init(void) @@ -1259,6 +1333,12 @@ cyber2000_grphw(igs_regs[i], igs_regs[i+1]); } +static unsigned short device_ids[] __initdata = { + PCI_DEVICE_ID_INTERG_2000, + PCI_DEVICE_ID_INTERG_2010, + PCI_DEVICE_ID_INTERG_5000 +}; + /* * Initialization */ @@ -1267,14 +1347,14 @@ struct pci_dev *dev; u_int h_sync, v_sync; u_long mmio_base, smem_base, smem_size; - int err = 0; - - dev = pci_find_device(PCI_VENDOR_ID_INTERG, - PCI_DEVICE_ID_INTERG_2000, NULL); + int err = 0, i; - if (!dev) + for (i = 0; i < sizeof(device_ids) / sizeof(device_ids[0]); i++) { dev = pci_find_device(PCI_VENDOR_ID_INTERG, - PCI_DEVICE_ID_INTERG_2010, NULL); + device_ids[i], NULL); + if (dev) + break; + } if (!dev) return -ENXIO; @@ -1307,12 +1387,15 @@ cyber2000_outb(0x08, 0x46e8); /* - * get the video RAM size from the VGA register. + * get the video RAM size and width from the VGA register. * This should have been already initialised by the BIOS, * but if it's garbage, claim default 1MB VRAM (woody) */ cyber2000_outb(0x72, 0x3ce); - switch (cyber2000_inb(0x3cf) & 3) { + i = cyber2000_inb(0x3cf); + current_par.bus_64bit = i & 4; + + switch (i & 3) { case 2: smem_size = 0x00400000; break; case 1: smem_size = 0x00200000; break; default: smem_size = 0x00100000; break; @@ -1335,7 +1418,7 @@ err = -ENOMEM; goto release_smem_resource; } -current_par.screen_base += IO_FUDGE_FACTOR; + current_par.screen_size = smem_size; current_par.screen_base_p = smem_base + 0x80000000; current_par.regs_base_p = mmio_base + 0x80000000; @@ -1410,7 +1493,6 @@ /* Not reached because the usecount will never be decremented to zero */ unregister_framebuffer(&fb_info); - /* TODO: clean up ... */ iounmap(current_par.screen_base); iounmap(CyberRegs); diff -u --recursive --new-file v2.3.31/linux/drivers/video/cyber2000fb.h linux/drivers/video/cyber2000fb.h --- v2.3.31/linux/drivers/video/cyber2000fb.h Tue Dec 7 09:32:46 1999 +++ linux/drivers/video/cyber2000fb.h Mon Dec 13 16:26:27 1999 @@ -52,6 +52,7 @@ char dev_name[32]; unsigned int initialised; unsigned int dev_id; + unsigned int bus_64bit:1; /* * palette @@ -82,6 +83,120 @@ #define VISUALID_64K 2 #define VISUALID_16M 4 #define VISUALID_32K 6 + +#define K_CAP_X2_CTL1 0x49 + +#define CAP_X_START 0x60 +#define CAP_X_END 0x62 +#define CAP_Y_START 0x64 +#define CAP_Y_END 0x66 +#define CAP_DDA_X_INIT 0x68 +#define CAP_DDA_X_INC 0x6a +#define CAP_DDA_Y_INIT 0x6c +#define CAP_DDA_Y_INC 0x6e + +#define EXT_FIFO_CTL 0x74 + +#define CAP_PIP_X_START 0x80 +#define CAP_PIP_X_END 0x82 +#define CAP_PIP_Y_START 0x84 +#define CAP_PIP_Y_END 0x86 + +#define CAP_NEW_CTL1 0x88 + +#define CAP_NEW_CTL2 0x89 + +#define CAP_MODE1 0xa4 +#define CAP_MODE1_8BIT 0x01 /* enable 8bit capture mode */ +#define CAP_MODE1_CCIR656 0x02 /* CCIR656 mode */ +#define CAP_MODE1_IGNOREVGT 0x04 /* ignore VGT */ +#define CAP_MODE1_ALTFIFO 0x10 /* use alternate FIFO for capture */ +#define CAP_MODE1_SWAPUV 0x20 /* swap UV bytes */ +#define CAP_MODE1_MIRRORY 0x40 /* mirror vertically */ +#define CAP_MODE1_MIRRORX 0x80 /* mirror horizontally */ + +#define CAP_MODE2 0xa5 + +#define Y_TV_CTL 0xae + +#define EXT_MEM_START 0xc0 /* ext start address 21 bits */ +#define HOR_PHASE_SHIFT 0xc2 /* high 3 bits */ +#define EXT_SRC_WIDTH 0xc3 /* ext offset phase 10 bits */ +#define EXT_SRC_HEIGHT 0xc4 /* high 6 bits */ +#define EXT_X_START 0xc5 /* ext->screen, 16 bits */ +#define EXT_X_END 0xc7 /* ext->screen, 16 bits */ +#define EXT_Y_START 0xc9 /* ext->screen, 16 bits */ +#define EXT_Y_END 0xcb /* ext->screen, 16 bits */ +#define EXT_SRC_WIN_WIDTH 0xcd /* 8 bits */ +#define EXT_COLOUR_COMPARE 0xce /* 24 bits */ +#define EXT_DDA_X_INIT 0xd1 /* ext->screen 16 bits */ +#define EXT_DDA_X_INC 0xd3 /* ext->screen 16 bits */ +#define EXT_DDA_Y_INIT 0xd5 /* ext->screen 16 bits */ +#define EXT_DDA_Y_INC 0xd7 /* ext->screen 16 bits */ + +#define VID_FIFO_CTL 0xd9 + +#define VID_CAP_VFC 0xdb +#define VID_CAP_VFC_YUV422 0x00 /* formats - does this cause conversion? */ +#define VID_CAP_VFC_RGB555 0x01 +#define VID_CAP_VFC_RGB565 0x02 +#define VID_CAP_VFC_RGB888_24 0x03 +#define VID_CAP_VFC_RGB888_32 0x04 +#define VID_CAP_VFC_DUP_PIX_ZOON 0x08 /* duplicate pixel zoom */ +#define VID_CAP_VFC_MOD_3RD_PIX 0x20 /* modify 3rd duplicated pixel */ +#define VID_CAP_VFC_DBL_H_PIX 0x40 /* double horiz pixels */ +#define VID_CAP_VFC_UV128 0x80 /* UV data offset by 128 */ + +#define VID_DISP_CTL1 0xdc +#define VID_DISP_CTL1_INTRAM 0x01 /* video pixels go to internal RAM */ +#define VID_DISP_CTL1_IGNORE_CCOMP 0x02 /* ignore colour compare registers */ +#define VID_DISP_CTL1_NOCLIP 0x04 /* do not clip to 16235,16240 */ +#define VID_DISP_CTL1_UV_AVG 0x08 /* U/V data is averaged */ +#define VID_DISP_CTL1_Y128 0x10 /* Y data offset by 128 */ +#define VID_DISP_CTL1_VINTERPOL_OFF 0x20 /* vertical interpolation off */ +#define VID_DISP_CTL1_VID_OUT_WIN_FULL 0x40 /* video out window full */ +#define VID_DISP_CTL1_ENABLE_VID_WINDOW 0x80 /* enable video window */ + +#define VID_FIFO_CTL1 0xdd + +#define VFAC_CTL1 0xe8 +#define VFAC_CTL1_CAPTURE 0x01 /* capture enable */ +#define VFAC_CTL1_VFAC_ENABLE 0x02 /* vfac enable */ +#define VFAC_CTL1_FREEZE_CAPTURE 0x04 /* freeze capture */ +#define VFAC_CTL1_FREEZE_CAPTURE_SYNC 0x08 /* sync freeze capture */ +#define VFAC_CTL1_VALIDFRAME_SRC 0x10 /* select valid frame source */ +#define VFAC_CTL1_PHILIPS 0x40 /* select Philips mode */ +#define VFAC_CTL1_MODVINTERPOLCLK 0x80 /* modify vertical interpolation clocl */ + +#define VFAC_CTL2 0xe9 +#define VFAC_CTL2_INVERT_VIDDATAVALID 0x01 /* invert video data valid */ +#define VFAC_CTL2_INVERT_GRAPHREADY 0x02 /* invert graphic ready output sig */ +#define VFAC_CTL2_INVERT_DATACLK 0x04 /* invert data clock signal */ +#define VFAC_CTL2_INVERT_HSYNC 0x08 /* invert hsync input */ +#define VFAC_CTL2_INVERT_VSYNC 0x10 /* invert vsync input */ +#define VFAC_CTL2_INVERT_FRAME 0x20 /* invert frame odd/even input */ +#define VFAC_CTL2_INVERT_BLANK 0x40 /* invert blank output */ +#define VFAC_CTL2_INVERT_OVSYNC 0x80 /* invert other vsync input */ + +#define VFAC_CTL3 0xea +#define VFAC_CTL3_CAP_IRQ 0x40 /* enable capture interrupt */ + +#define CAP_MEM_START 0xeb /* 18 bits */ +#define CAP_MAP_WIDTH 0xed /* high 6 bits */ +#define CAP_PITCH 0xee /* 8 bits */ + +#define CAP_CTL_MISC 0xef +#define CAP_CTL_MISC_HDIV 0x01 +#define CAP_CTL_MISC_HDIV4 0x02 +#define CAP_CTL_MISC_ODDEVEN 0x04 +#define CAP_CTL_MISC_HSYNCDIV2 0x08 +#define CAP_CTL_MISC_SYNCTZHIGH 0x10 +#define CAP_CTL_MISC_SYNCTZOR 0x20 +#define CAP_CTL_MISC_DISPUSED 0x80 + +#define REG_BANK 0xfa +#define REG_BANK_Y 0x01 +#define REG_BANK_K 0x05 #define K_CAP_X2_CTL1 0x49 diff -u --recursive --new-file v2.3.31/linux/drivers/video/fbgen.c linux/drivers/video/fbgen.c --- v2.3.31/linux/drivers/video/fbgen.c Thu Feb 25 10:02:11 1999 +++ linux/drivers/video/fbgen.c Thu Dec 9 13:05:08 1999 @@ -17,6 +17,7 @@ #include #include +#include