diff -ru --new-file /usr/src/linux/MAGIC linux/MAGIC
--- /usr/src/linux/MAGIC	Mon Nov 27 09:27:28 1995
+++ linux/MAGIC	Tue Nov 28 08:33:12 1995
@@ -71,6 +71,7 @@
 0x03	linux/hdreg.h
 0x04	linux/umsdos_fs.h
 0x06	linux/lp.h
+0x09	linux/md.h
 0x12	linux/fs.h
 0x20	linux/cm206.h
 'C'	linux/soundcard.h
diff -ru --new-file /usr/src/linux/arch/i386/defconfig linux/arch/i386/defconfig
--- /usr/src/linux/arch/i386/defconfig	Sun Nov 19 12:25:59 1995
+++ linux/arch/i386/defconfig	Tue Nov 28 08:33:12 1995
@@ -34,7 +34,7 @@
 CONFIG_BLK_DEV_IDECD=y
 # CONFIG_BLK_DEV_TRITON is not set
 # CONFIG_BLK_DEV_XD is not set
-
+# CONFIG_BLK_DEV_MD is not set
 #
 # Networking options
 #
diff -ru --new-file /usr/src/linux/drivers/block/Config.in linux/drivers/block/Config.in
--- /usr/src/linux/drivers/block/Config.in	Sun Nov 19 12:22:50 1995
+++ linux/drivers/block/Config.in	Tue Nov 28 08:33:12 1995
@@ -23,3 +23,16 @@
 fi
 
 bool 'XT harddisk support' CONFIG_BLK_DEV_XD
+bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD
+if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
+  dep_tristate '   Linear (append) mode' CONFIG_MD_LINEAR
+  dep_tristate '   RAID-0 (striping) mode' CONFIG_MD_STRIPED
+#  bool '   Kernel support for RAID-1' CONFIG_MD_SUPPORT_RAID1
+#  if [ "$CONFIG_MD_SUPPORT_RAID1" = "y" ]; then
+#    dep_tristate '       RAID-1 mode (very ALPHA)' CONFIG_MD_RAID1
+#  fi
+#  bool '   Kernel support for RAID-5' CONFIG_MD_SUPPORT_RAID5
+#  if [ "$CONFIG_MD_SUPPORT_RAID5" = "y" ]; then
+#    dep_tristate '       RAID-5 mode (NOT usable - debug only ;-)' CONFIG_MD_RAID5
+#  fi
+fi
diff -ru --new-file /usr/src/linux/drivers/block/Makefile linux/drivers/block/Makefile
--- /usr/src/linux/drivers/block/Makefile	Mon Oct 30 09:38:27 1995
+++ linux/drivers/block/Makefile	Tue Nov 28 08:33:12 1995
@@ -19,6 +19,7 @@
 L_OBJS   := ll_rw_blk.o ramdisk.o genhd.o 
 M_OBJS   :=
 MOD_LIST_NAME := BLOCK_MODULES
+LX_OBJS :=
 
 ifeq ($(CONFIG_BLK_DEV_FD),y)
 L_OBJS += floppy.o
@@ -46,6 +47,47 @@
 
 ifeq ($(CONFIG_BLK_DEV_XD),y)
 L_OBJS += xd.o
+endif
+
+ifeq ($(CONFIG_BLK_DEV_MD),y)
+LX_OBJS += md.o
+
+ifeq ($(CONFIG_MD_LINEAR),y)
+L_OBJS += linear.o
+else
+  ifeq ($(CONFIG_MD_LINEAR),m)
+  M_OBJS += linear.o
+  endif
+endif
+
+ifeq ($(CONFIG_MD_STRIPED),y)
+L_OBJS += raid0.o
+else
+  ifeq ($(CONFIG_MD_STRIPED),m)
+  M_OBJS += raid0.o
+  endif
+endif
+
+#ifeq ($(CONFIG_MD_RAID1),y)
+#L_OBJS += raid1.o
+#else
+#  ifeq ($(CONFIG_MD_SUPPORT_RAID1),y)
+#    ifeq ($(CONFIG_MD_RAID1),m)
+#    M_OBJS += raid1.o
+#    endif
+#  endif
+#endif
+#
+#ifeq ($(CONFIG_MD_RAID5),y)
+#L_OBJS += raid5.o
+#else
+#  ifeq ($(CONFIG_MD_SUPPORT_RAID5),y)
+#    ifeq ($(CONFIG_MD_RAID5),m)
+#    M_OBJS += raid5.o
+#    endif
+#  endif
+#endif
+
 endif
 
 include $(TOPDIR)/Rules.make
diff -ru --new-file /usr/src/linux/drivers/block/linear.c linux/drivers/block/linear.c
--- /usr/src/linux/drivers/block/linear.c	Thu Jan  1 01:00:00 1970
+++ linux/drivers/block/linear.c	Tue Nov 28 08:48:29 1995
@@ -0,0 +1,269 @@
+
+/*
+   linear.c : Multiple Devices driver for Linux
+              Copyright (C) 1994, 1995 Marc ZYNGIER
+	      <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	      <maz@gloups.fdn.fr>
+
+   Linear mode management functions.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#include <linux/module.h>
+
+#include <linux/md.h>
+#include <linux/linear.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include <linux/blk.h>
+
+static int linear_run (int minor, struct md_dev *mddev)
+{
+  int cur=0, i, size, dev0_size, nb_zone;
+  struct linear_data *data;
+
+  MOD_INC_USE_COUNT;
+  
+  mddev->private=kmalloc (sizeof (struct linear_data), GFP_KERNEL);
+  data=(struct linear_data *) mddev->private;
+
+  /*
+     Find out the smallest device. This was previously done
+     at registery time, but since it violates modularity,
+     I moved it here... Any comment ? ;-)
+   */
+
+  data->smallest=devices[minor];
+  for (i=1; i<mddev->nb_dev; i++)
+    if (data->smallest->size > devices[minor][i].size)
+      data->smallest=devices[minor]+i;
+  
+  nb_zone=data->nr_zones=
+    md_size[minor]/data->smallest->size +
+    (md_size[minor]%data->smallest->size ? 1 : 0);
+  
+  data->hash_table=kmalloc (sizeof (struct linear_hash)*nb_zone, GFP_KERNEL);
+
+  size=devices[minor][cur].size;
+
+  i=0;
+  while (cur<mddev->nb_dev)
+  {
+    data->hash_table[i].dev0=devices[minor]+cur;
+
+    if (size>=data->smallest->size) /* If we completly fill the slot */
+    {
+      data->hash_table[i++].dev1=NULL;
+      size-=data->smallest->size;
+
+      if (!size)
+      {
+	if (++cur==mddev->nb_dev) continue;
+	size=devices[minor][cur].size;
+      }
+
+      continue;
+    }
+
+    if (++cur==mddev->nb_dev) /* Last dev, set dev1 as NULL */
+    {
+      data->hash_table[i].dev1=NULL;
+      continue;
+    }
+
+    dev0_size=size;		/* Here, we use a 2nd dev to fill the slot */
+    size=devices[minor][cur].size;
+    data->hash_table[i++].dev1=devices[minor]+cur;
+    size-=(data->smallest->size - dev0_size);
+  }
+
+  return 0;
+}
+
+static int linear_stop (int minor, struct md_dev *mddev)
+{
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  
+  kfree (data->hash_table);
+  kfree (data);
+
+  MOD_DEC_USE_COUNT;
+
+  return 0;
+}
+
+
+static int linear_map (int minor, struct md_dev *mddev, struct request *req)
+{
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  struct linear_hash *hash;
+  struct real_dev *tmp_dev;
+  long block, rblock;
+  struct buffer_head *bh, *bh2;
+  int queue, nreq;
+  static struct request pending[MAX_REAL]={{0, }, };
+
+  while (req->bh || req->sem)
+  {
+    block=req->sector >> 1;
+    hash=data->hash_table+(block/data->smallest->size);
+    
+    if (block >= (hash->dev0->size + hash->dev0->offset))
+    {
+      if (!hash->dev1)
+	printk ("linear_map : hash->dev1==NULL for block %ld\n", block);
+      tmp_dev=hash->dev1;
+    }
+    else
+      tmp_dev=hash->dev0;
+    
+    if (block >= (tmp_dev->size + tmp_dev->offset) || block < tmp_dev->offset)
+      printk ("Block %ld out of bounds on dev %04x size %d offset %d\n", block, tmp_dev->dev, tmp_dev->size, tmp_dev->offset);
+    
+    rblock=(block-(tmp_dev->offset));
+    
+    if (req->sem)				/* This is a paging request */
+    {
+      req->rq_dev=tmp_dev->dev;
+      req->sector=rblock << 1;
+      add_request (blk_dev+MAJOR (tmp_dev->dev), req);
+      
+      return REDIRECTED_REQ;
+    }
+
+    if (block + (req->nr_sectors>>1) < (tmp_dev->size + tmp_dev->offset))
+    {
+      /* This request fits on a single disk, send it all... */
+      pending[0].rq_dev=tmp_dev->dev;
+      pending[0].cmd=req->cmd;
+      pending[0].sector=rblock << 1;
+      pending[0].nr_sectors=req->nr_sectors;
+      pending[0].bh=req->bh;
+      pending[0].bhtail=req->bhtail;
+
+      for (bh=req->bh; bh; bh=bh->b_reqnext)
+      {
+	if (!buffer_locked(bh))
+	  printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
+	
+	bh->b_rdev=tmp_dev->dev;
+      }
+      
+      req->rq_status=RQ_INACTIVE;
+      wake_up (&wait_for_request);
+      make_md_request (pending, 1);
+      return REDIRECTED_REQ;
+    }
+
+    /* This is the worst case : the request is across several disks,
+       we must divide it... */
+
+    queue=tmp_dev - devices[minor];
+    
+    for (nreq=0, bh=bh2=req->bh;
+	 bh && bh->b_blocknr < (tmp_dev->size+tmp_dev->offset);
+	 nreq++, bh2=bh, bh=bh->b_reqnext)
+    {
+      if (!buffer_locked(bh))
+	printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
+      
+      bh->b_rdev=tmp_dev->dev;
+    }
+    
+    pending[queue].rq_dev=tmp_dev->dev;
+    pending[queue].cmd=req->cmd;
+    pending[queue].sector=rblock << 1;
+    pending[queue].nr_sectors=nreq << 1;
+    pending[queue].bh=req->bh;
+    pending[queue].bhtail=bh2;
+
+    req->bh=bh;
+    if (!bh)
+      continue;
+    
+    req->sector=bh->b_blocknr << 1;
+    req->nr_sectors-=nreq << 1;
+  }
+
+  req->rq_status=RQ_INACTIVE;
+  wake_up (&wait_for_request);
+  make_md_request (pending, mddev->nb_dev);
+  return REDIRECTED_REQ;
+}
+
+
+static int linear_status (char *page, int minor, struct md_dev *mddev)
+{
+  int sz=0;
+
+#undef MD_DEBUG
+#ifdef MD_DEBUG
+  int j;
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  
+  sz+=sprintf (page+sz, "      ");
+  for (j=0; j<data->nr_zones; j++)
+  {
+    sz+=sprintf (page+sz, "[%s",
+		 partition_name (data->hash_table[j].dev0->dev));
+
+    if (data->hash_table[j].dev1)
+      sz+=sprintf (page+sz, "/%s] ",
+		   partition_name(data->hash_table[j].dev1->dev));
+    else
+      sz+=sprintf (page+sz, "] ");
+  }
+
+  sz+=sprintf (page+sz, "\n");
+#endif
+  return sz;
+}
+
+
+static struct md_personality linear_personality=
+{
+  "linear",
+  linear_map,
+  linear_run,
+  linear_stop,
+  linear_status,
+  NULL,				/* no ioctls */
+  0
+};
+
+
+#ifndef MODULE
+
+void linear_init (void)
+{
+  register_md_personality (LINEAR, &linear_personality);
+}
+
+#else
+
+int init_module (void)
+{
+  return (register_md_personality (LINEAR, &linear_personality));
+}
+
+void cleanup_module (void)
+{
+  if (MOD_IN_USE)
+    printk ("md linear : module still busy...\n");
+  else
+    unregister_md_personality (LINEAR);
+}
+
+#endif
diff -ru --new-file /usr/src/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
--- /usr/src/linux/drivers/block/ll_rw_blk.c	Tue Nov 28 08:54:59 1995
+++ linux/drivers/block/ll_rw_blk.c	Tue Nov 28 08:35:01 1995
@@ -50,7 +50,7 @@
 	{ NULL, NULL },		/* 6 dev lp */
 	{ NULL, NULL },		/* 7 dev pipes */
 	{ NULL, NULL },		/* 8 dev sd */
-	{ NULL, NULL },		/* 9 dev st */
+	{ NULL, NULL },		/* 9 dev st / dev md */
 	{ NULL, NULL },		/* 10 */
 	{ NULL, NULL },		/* 11 */
 	{ NULL, NULL },		/* 12 */
@@ -247,7 +247,7 @@
  * It disables interrupts so that it can muck with the
  * request-lists in peace.
  */
-static void add_request(struct blk_dev_struct * dev, struct request * req)
+void add_request(struct blk_dev_struct * dev, struct request * req)
 {
 	struct request * tmp;
 	short		 disk_index;
@@ -290,7 +290,7 @@
 	tmp->next = req;
 
 /* for SCSI devices, call request_fn unconditionally */
-	if (scsi_major(MAJOR(req->rq_dev)))
+	if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 		(dev->request_fn)();
 
 	sti();
@@ -344,12 +344,14 @@
 /* look for a free request. */
 	cli();
 
-/* The scsi disk drivers and the IDE driver completely remove the request
- * from the queue when they start processing an entry.  For this reason
- * it is safe to continue to add links to the top entry for those devices.
+/* The scsi disk drivers, the IDE driver and the MD driver completely
+ * remove the request from the queue when they start processing an
+ * entry.  For this reason it is safe to continue to add links to the
+ * top entry for those devices.
  */
 	if ((   major == IDE0_MAJOR	/* same as HD_MAJOR */
 	     || major == IDE1_MAJOR
+	     || major == MD_MAJOR
 	     || major == FLOPPY_MAJOR
 	     || major == SCSI_DISK_MAJOR
 	     || major == SCSI_CDROM_MAJOR
@@ -426,6 +428,100 @@
 	add_request(major+blk_dev,req);
 }
 
+#ifdef CONFIG_BLK_DEV_MD
+
+struct request *get_md_request (int max_req, kdev_t dev, char *callable_driver,
+				int *req_major0, int *req_major1, int n)
+{
+  int j;
+  struct request *req;
+  
+  /* find an unused request. */
+  if (!(req = get_request (max_req, dev)))
+  {
+    /* Ouch ! We have to sleep... We flush all inserted requests
+       by calling the drivers' request_fn, and wait for a free
+       request to come by... */
+    
+    printk ("get_md_request : No more requests, flushing...\n");
+    for (j=0; j<n; j++)
+    {
+      if (req_major0[j]==-1)
+	continue;
+      
+      if (callable_driver[req_major0[j]])
+      {
+	blk_dev[req_major0[j]].request_fn();
+	callable_driver[req_major0[j]]=0;
+      }
+
+      if (req_major1 && callable_driver[req_major1[j]])
+      {
+	blk_dev[req_major1[j]].request_fn();
+	callable_driver[req_major1[j]]=0;
+      }
+    }
+    
+    sti();
+    printk ("get_md_request : Now waiting for free request\n");
+    req = __get_request_wait (max_req, dev);
+    printk ("get_md_request : Got free request\n");
+  }
+
+  return (req);
+}
+
+void add_md_request (struct request *req, char *callable_driver)
+{
+  short disk_index;
+  struct request *tmp;
+
+  /* same as add_request, without mark_buffer_clean */
+  switch (MAJOR(req->rq_dev))
+  {
+    case SCSI_DISK_MAJOR:
+    disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
+    if (disk_index < 4)
+      drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
+    break;
+    
+    case IDE0_MAJOR:	/* same as HD_MAJOR */
+    case XT_DISK_MAJOR:
+    disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
+    drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
+    break;
+    
+    case IDE1_MAJOR:
+    disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
+    drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
+    
+    default:
+    break;
+  }
+  
+  cli();
+  if (!(tmp = (blk_dev[MAJOR(req->rq_dev)].current_request)))
+  {
+    blk_dev[MAJOR(req->rq_dev)].current_request = req;
+    callable_driver[MAJOR(req->rq_dev)] = 1;
+    return;
+  }
+  
+  for ( ; tmp->next ; tmp = tmp->next)
+  {
+    if ((IN_ORDER(tmp,req) || !IN_ORDER(tmp,tmp->next)) &&
+	IN_ORDER(req,tmp->next))
+      break;
+  }
+  req->next = tmp->next;
+  tmp->next = req;
+  
+  /* for SCSI devices, call request_fn unconditionally */
+  if (scsi_major(MAJOR(req->rq_dev)))
+    callable_driver[MAJOR(req->rq_dev)] = 1;
+}
+#endif
+
 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
 {
 	struct request * req;
@@ -523,6 +619,10 @@
 	for (i = 0; i < nr; i++) {
 		if (bh[i]) {
 			set_bit(BH_Req, &bh[i]->b_state);
+
+			/* Md needs this for error recovery */
+			bh[i]->b_rdev = bh[i]->b_dev;
+
 			make_request(major, rw, bh[i]);
 			if (rw == READ || rw == READA)
 				kstat.pgpgin++;
@@ -653,5 +753,8 @@
 #ifdef CONFIG_SJCD
 	sjcd_init();
 #endif CONFIG_SJCD
+#ifdef CONFIG_BLK_DEV_MD
+	md_init();
+#endif CONFIG_BLK_DEV_MD
 	return 0;
 }
diff -ru --new-file /usr/src/linux/drivers/block/md.c linux/drivers/block/md.c
--- /usr/src/linux/drivers/block/md.c	Thu Jan  1 01:00:00 1970
+++ linux/drivers/block/md.c	Tue Nov 28 08:49:19 1995
@@ -0,0 +1,818 @@
+
+/*
+   md.c : Multiple Devices driver for Linux
+          Copyright (C) 1994, 1995 Marc ZYNGIER
+	  <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	  <maz@gloups.fdn.fr>
+
+   A lot of inspiration came from hd.c ...
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/md.h>
+#include <linux/hdreg.h>
+#include <linux/stat.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <errno.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+
+#include <linux/blk.h>
+
+#ifdef CONFIG_MD_SUPPORT_RAID5
+int support_for_raid5; /* So raid-5 module won't be inserted if support
+			  was not set in the kernel */
+#endif
+
+#ifdef CONFIG_MD_SUPPORT_RAID1
+int support_for_raid1; /* So raid-1 module won't be inserted if support
+			  was not set in the kernel */
+#endif
+
+static struct hd_struct md_hd_struct[MAX_MD_DEV];
+static int md_blocksizes[MAX_MD_DEV];
+
+int md_size[MAX_MD_DEV]={0, };
+
+static void md_geninit (struct gendisk *);
+
+static struct gendisk md_gendisk=
+{
+  MD_MAJOR,
+  "md",
+  0,
+  1,
+  MAX_MD_DEV,
+  md_geninit,
+  md_hd_struct,
+  md_size,
+  MAX_MD_DEV,
+  NULL,
+  NULL
+};
+
+static struct md_personality *pers[MAX_PERSONALITY]={NULL, };
+
+struct real_dev devices[MAX_MD_DEV][MAX_REAL];
+struct md_dev md_dev[MAX_MD_DEV];
+
+static struct gendisk *find_gendisk (kdev_t dev)
+{
+  struct gendisk *tmp=gendisk_head;
+
+  while (tmp != NULL)
+  {
+    if (tmp->major==MAJOR(dev))
+      return (tmp);
+    
+    tmp=tmp->next;
+  }
+
+  return (NULL);
+}
+
+
+/* Picked up from genhd.c */
+char *partition_name (kdev_t dev)
+{
+  static char name[10];		/* This should be long
+				   enough for a device name ! */
+  struct gendisk *hd=find_gendisk (dev);
+  char base_name;
+  int minor=MINOR(dev);
+
+  if (!hd)
+  {
+    printk ("No gendisk entry for dev %04x\n", dev);
+    sprintf (name, "dev %04x", dev);
+    return (name);
+  }
+
+  base_name = (hd->major == IDE1_MAJOR) ? 'c' : 'a';
+  sprintf(name, "%s%c%d",
+	  hd->major_name,
+	  base_name + (minor >> hd->minor_shift),
+	  minor & ((1 << hd->minor_shift) - 1));
+  return (name);
+}
+
+
+static void set_ra (void)
+{
+  int i, j, minra=INT_MAX;
+
+  for (i=0; i<MAX_MD_DEV; i++)
+  {
+    if (!md_dev[i].pers)
+      continue;
+    
+    for (j=0; j<md_dev[i].nb_dev; j++)
+      if (read_ahead[MAJOR(devices[i][j].dev)]<minra)
+	minra=read_ahead[MAJOR(devices[i][j].dev)];
+  }
+  
+  read_ahead[MD_MAJOR]=minra;
+}
+
+
+static int md_ioctl (struct inode *inode, struct file *file,
+                     unsigned int cmd, unsigned long arg)
+{
+  int minor, index, err, current_ra;
+  struct gendisk *gen_real;
+  struct hd_geometry *loc = (struct hd_geometry *) arg;
+  kdev_t dev;
+
+  if (!suser())
+    return -EACCES;
+
+  if (((minor=MINOR(inode->i_rdev)) & 0x80) &&
+      (minor & 0x7f) < MAX_PERSONALITY &&
+      pers[minor & 0x7f] &&
+      pers[minor & 0x7f]->ioctl)
+    return (pers[minor & 0x7f]->ioctl (inode, file, cmd, arg));
+  
+  if (minor >= MAX_MD_DEV)
+    return -EINVAL;
+
+  switch (cmd)
+  {
+    case REGISTER_DEV:
+    dev=to_kdev_t ((dev_t) arg);
+    if (MAJOR(dev)==MD_MAJOR || md_dev[minor].nb_dev==MAX_REAL)
+      return -EINVAL;
+
+    if (!fs_may_mount (dev) || md_dev[minor].pers)
+      return -EBUSY;
+
+    if (!(gen_real=find_gendisk (dev)))
+      return -ENOENT;
+
+    index=md_dev[minor].nb_dev++;
+    devices[minor][index].dev=dev;
+
+    /* Lock the device by inserting a dummy inode. This doesn't
+       smeel very good, but I need to be consistent with the
+       mount stuff, specially with fs_may_mount. If someone have
+       a better idea, please help ! */
+    
+    devices[minor][index].inode=get_empty_inode ();
+    devices[minor][index].inode->i_dev=dev; /* don't care about
+					       other fields */
+    insert_inode_hash (devices[minor][index].inode);
+    
+    /* Devices sizes are rounded to a multiple of page (needed for
+       paging). This is NOT done by fdisk when partitionning,
+       but that's a DOS thing anyway... */
+    
+    devices[minor][index].size=gen_real->sizes[MINOR(dev)] & (PAGE_MASK>>10);
+    devices[minor][index].offset=index ?
+      (devices[minor][index-1].offset + devices[minor][index-1].size) : 0;
+
+    if (!index)
+      md_size[minor]=devices[minor][index].size;
+    else
+      md_size[minor]+=devices[minor][index].size;
+
+    printk("REGISTER_DEV %s to md%x done\n", partition_name(dev), minor);
+    break;
+
+    case START_MD:
+    if (!md_dev[minor].nb_dev)
+      return -EINVAL;
+
+    if (md_dev[minor].pers)
+      return -EBUSY;
+
+    md_dev[minor].repartition=(int) arg;
+    
+    if ((index=PERSONALITY(md_dev+minor) >> (PERSONALITY_SHIFT))
+	>= MAX_PERSONALITY ||
+	!pers[index])
+      return -EINVAL;
+
+    md_dev[minor].pers=pers[index];
+
+    if ((err=md_dev[minor].pers->run (minor, md_dev+minor)))
+    {
+      md_dev[minor].pers=NULL;
+      return (err);
+    }
+
+    /* FIXME : We assume here we have blocks
+       that are twice as large as sectors.
+       THIS MAY NOT BE TRUE !!! */
+    md_hd_struct[minor].start_sect=0;
+    md_hd_struct[minor].nr_sects=md_size[minor]<<1;
+
+    /* It would be better to have a per-md-dev read_ahead. Currently,
+       we only use the smallest read_ahead among md-attached devices */
+
+    current_ra=read_ahead[MD_MAJOR];
+    
+    for (index=0; index<md_dev[minor].nb_dev; index++)
+    {
+      if (current_ra>read_ahead[MAJOR(devices[minor][index].dev)])
+	current_ra=read_ahead[MAJOR(devices[minor][index].dev)];
+
+      devices[minor][index].fault_count=0;
+      devices[minor][index].invalid=VALID;
+    }
+
+    read_ahead[MD_MAJOR]=current_ra;
+
+    printk ("START_DEV md%x %s\n", minor, md_dev[minor].pers->name);
+    break;
+
+    case STOP_MD:
+    if (inode->i_count>1 || md_dev[minor].busy>1) /* ioctl : one open channel */
+    {
+      printk ("STOP_MD md%x failed : i_count=%d, busy=%d\n", minor, inode->i_count, md_dev[minor].busy);
+      return -EBUSY;
+    }
+
+    if (md_dev[minor].pers)
+    {
+      /*  The device won't exist anymore -> flush it now */
+      fsync_dev (inode->i_rdev);
+      invalidate_buffers (inode->i_rdev);
+      md_dev[minor].pers->stop (minor, md_dev+minor);
+    }
+
+    /* Remove locks. */
+    for (index=0; index<md_dev[minor].nb_dev; index++)
+      clear_inode (devices[minor][index].inode);
+
+    md_dev[minor].nb_dev=md_size[minor]=0;
+    md_dev[minor].pers=NULL;
+
+    set_ra ();			/* calculate new read_ahead */
+    
+    printk ("STOP_DEV md%x\n", minor);
+    break;
+
+#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
+    case MD_INVALID:
+    dev=to_kdev_t ((dev_t) arg);
+    if (!(err=md_valid_device (minor, dev, INVALID_ALWAYS)))
+      printk ("md%d : %s disabled\n", minor, partition_name (dev));
+
+    return (err);
+
+    case MD_VALID:
+    dev=to_kdev_t ((dev_t) arg);
+    if (!(err=md_valid_device (minor, dev, VALID)))
+      printk ("md%d : %s enabled\n", minor, partition_name (dev));
+
+    return (err);
+#endif
+    
+    case BLKGETSIZE:   /* Return device size */
+    if  (!arg)  return -EINVAL;
+    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
+    if (err)
+      return err;
+    put_user (md_hd_struct[MINOR(inode->i_rdev)].nr_sects, (long *) arg);
+    break;
+
+    case BLKFLSBUF:
+    fsync_dev (inode->i_rdev);
+    invalidate_buffers (inode->i_rdev);
+    break;
+
+    case BLKRASET:
+    if (arg > 0xff)
+      return -EINVAL;
+    read_ahead[MAJOR(inode->i_rdev)] = arg;
+    return 0;
+    
+    case BLKRAGET:
+    if  (!arg)  return -EINVAL;
+    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
+    if (err)
+      return err;
+    put_user (read_ahead[MAJOR(inode->i_rdev)], (long *) arg);
+    break;
+
+    case HDIO_GETGEO:
+    if (!loc)  return -EINVAL;
+    err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+    if (err)
+      return err;
+    put_user (2, (char *) &loc->heads);
+    put_user (4, (char *) &loc->sectors);
+    put_user (md_hd_struct[minor].nr_sects/8, (short *) &loc->cylinders);
+    put_user (md_hd_struct[MINOR(inode->i_rdev)].start_sect,
+		(long *) &loc->start);
+    break;
+    
+    RO_IOCTLS(inode->i_rdev,arg);
+    
+    default:
+    printk ("Unknown md_ioctl %d\n", cmd);
+    return -EINVAL;
+  }
+
+  return (0);
+}
+
+
+static int md_open (struct inode *inode, struct file *file)
+{
+  int minor=MINOR(inode->i_rdev);
+
+  md_dev[minor].busy++;
+  return (0);			/* Always succed */
+}
+
+
+static void md_release (struct inode *inode, struct file *file)
+{
+  int minor=MINOR(inode->i_rdev);
+
+  sync_dev (inode->i_rdev);
+  md_dev[minor].busy--;
+}
+
+
+static struct file_operations md_fops=
+{
+  NULL,
+  block_read,
+  block_write,
+  NULL,
+  NULL,
+  md_ioctl,
+  NULL,
+  md_open,
+  md_release,
+  block_fsync
+};
+
+
+static inline int remap_request (int minor, struct request *req)
+{
+  if (!md_dev[minor].pers)
+  {
+    printk ("Oops ! md%d not running, giving up !\n", minor);
+    return -1;
+  }
+
+  return (md_dev[minor].pers->map(minor, md_dev+minor, req));
+}
+
+static void do_md_request (void)
+{
+  int minor;
+  struct request *req;
+
+  while (1)
+  {
+#ifdef MD_COUNT_SIZE
+    int reqsize, chunksize;
+#endif
+    
+    cli ();
+    req = blk_dev[MD_MAJOR].current_request;
+    if (!req || (req->rq_status == RQ_INACTIVE))
+    {
+      sti ();
+      return;
+    }
+    
+#ifdef MD_COUNT_SIZE
+    reqsize=req->nr_sectors>>2;
+    chunksize=1 << FACTOR_SHIFT(FACTOR(md_dev+MINOR(req->rq_dev)));
+    if (reqsize==chunksize) (md_dev+MINOR(req->rq_dev))->equal_count++;
+    if (reqsize<chunksize) (md_dev+MINOR(req->rq_dev))->smallest_count++;
+    if (reqsize>chunksize) (md_dev+MINOR(req->rq_dev))->biggest_count++;
+#endif
+    
+    blk_dev[MD_MAJOR].current_request = req->next;
+    sti ();
+
+    minor = MINOR(req->rq_dev);
+    if ((MAJOR(req->rq_dev) != MD_MAJOR) || (minor >= MAX_REAL))
+    {
+      printk("md: bad device number: 0x%04x\n", req->rq_dev);
+      end_request(0, req);
+      continue;
+    }
+
+    switch (remap_request (minor, req))
+    {
+      case REDIRECTED_BHREQ:	/* Allright, redirection was succesful */
+      req->rq_status=RQ_INACTIVE;
+      wake_up (&wait_for_request);
+      break;
+
+      case REDIRECTED_REQ:
+      break;			/* Redirected whole request (for swaping) */
+      
+      case REDIRECT_FAILED:	/* Swap redirection failed in RAID-[15] */
+      end_request (0, req);
+      break;
+      
+      default:
+      printk ("remap_request returned strange value !\n");
+    }
+  }
+}
+
+
+void make_md_request (struct request *pending, int n)
+{
+  int i, j, max_req, major=0, rw, found;
+  kdev_t dev;
+  struct buffer_head *bh;
+  struct request *req;
+  static int req_major[MAX_REAL];
+  static char callable_driver[MAX_BLKDEV]={0, };
+
+  for (i=0; i<n; i++)
+  {
+    if (!pending[i].bh)
+    {
+      req_major[i]=-1;
+      continue;
+    }
+
+    cli();
+    
+    found=0;
+    rw=pending[i].cmd;
+    bh=pending[i].bh;
+    req_major[i]=major=MAJOR(dev=pending[i].rq_dev);
+    max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
+ 
+    if ((   major == IDE0_MAJOR	/* same as HD_MAJOR */
+	 || major == IDE1_MAJOR
+	 || major == SCSI_DISK_MAJOR
+	 || major == IDE2_MAJOR
+	 || major == IDE3_MAJOR)
+	&& (req = blk_dev[major].current_request))
+    {
+#ifdef CONFIG_BLK_DEV_HD
+      if (major == HD_MAJOR)
+	req = req->next;
+#endif CONFIG_BLK_DEV_HD
+      
+      while (req && !found)
+      {
+	if (req->rq_dev == dev &&
+	    !req->sem &&
+	    req->cmd == rw &&
+	    req->sector + req->nr_sectors == pending[i].sector &&
+	    (req->nr_sectors + pending[i].nr_sectors) < 245)
+	{
+	  req->bhtail->b_reqnext = bh;
+	  req->bhtail = pending[i].bhtail;
+	  req->nr_sectors += pending[i].nr_sectors;
+	  found=1;
+	  continue;
+	}
+	
+	if (!found &&
+	    req->rq_dev == dev &&
+	    !req->sem &&
+	    req->cmd == rw &&
+	    req->sector - pending[i].nr_sectors == pending[i].sector &&
+	    (req->nr_sectors + pending[i].nr_sectors) < 245)
+	{
+	  req->nr_sectors += pending[i].nr_sectors;
+	  bh->b_reqnext = req->bh;
+	  req->buffer = bh->b_data;
+	  req->current_nr_sectors = bh->b_size >> 9;
+	  req->sector = pending[i].sector;
+	  req->bh = bh;
+	  found=1;
+	  continue;
+	}    
+
+	req = req->next;
+      }
+    }
+
+    if (found)
+      continue;
+  
+    req=get_md_request (max_req, dev, callable_driver, req_major, NULL, i);
+    
+    /* Build it up... */
+    req->cmd = rw;
+    req->errors = 0;
+#if defined (CONFIG_MD_SUPPORT_RAID1)
+    req->shared_count = 0;
+#endif
+    req->sector = pending[i].sector;
+    req->nr_sectors = pending[i].nr_sectors;
+    req->current_nr_sectors = bh->b_size >> 9;
+    req->buffer = bh->b_data;
+    req->sem = NULL;
+    req->bh = bh;
+    req->bhtail = pending[i].bhtail;
+    req->next = NULL;
+
+    add_md_request (req, callable_driver);
+  }
+
+  for (j=0; j<n; j++)
+  {
+    static int m;
+
+    if (!pending[j].bh)
+      continue;
+    
+    m=MAJOR(pending[j].rq_dev);
+    pending[j].bh=NULL;
+    
+    if (callable_driver[m])
+    {
+      blk_dev[m].request_fn();
+      callable_driver[m]=0;
+    }
+  }
+ 
+  sti ();
+}
+
+
+static struct symbol_table md_symbol_table=
+{
+#include <linux/symtab_begin.h>
+
+  X(devices),
+  X(md_size),
+  X(add_request),
+  X(make_md_request),
+
+#ifdef CONFIG_MD_SUPPORT_RAID1
+  X(support_for_raid1),
+#endif
+
+#ifdef CONFIG_MD_SUPPORT_RAID5
+  X(support_for_raid5),
+#endif
+
+  X(register_md_personality),
+  X(unregister_md_personality),
+  X(partition_name),
+
+#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
+  X(md_valid_device),
+  X(md_can_reemit),
+#endif
+
+#include <linux/symtab_end.h>
+};
+
+
+static void md_geninit (struct gendisk *gdisk)
+{
+  int i;
+  
+  for(i=0;i<MAX_MD_DEV;i++)
+  {
+    md_blocksizes[i] = 1024;
+    md_gendisk.part[i].start_sect=-1;
+    md_dev[i].pers=NULL;
+#ifdef MD_COUNT_SIZES
+    md_dev[i].smallest_count=md_dev[i].biggest_count=md_dev[i].equal_count=0;
+#endif
+  }
+
+  blksize_size[MAJOR_NR] = md_blocksizes;
+  register_symtab (&md_symbol_table);
+
+  proc_register(&proc_root,
+		&(struct proc_dir_entry)
+	      {
+		PROC_MD, 6, "mdstat",
+		S_IFREG | S_IRUGO, 1, 0, 0,
+	      });
+}
+
+
+int get_md_status (char *page)
+{
+  int sz=0, i, j;
+
+  sz+=sprintf( page+sz, "Personalities : ");
+  for (i=0; i<MAX_PERSONALITY; i++)
+    if (pers[i])
+      sz+=sprintf (page+sz, "[%d %s] ", i, pers[i]->name);
+
+  page[sz-1]='\n';
+
+  sz+=sprintf (page+sz, "read_ahead ");
+  if (read_ahead[MD_MAJOR]==INT_MAX)
+    sz+=sprintf (page+sz, "not set\n");
+  else
+    sz+=sprintf (page+sz, "%d sectors\n", read_ahead[MD_MAJOR]);
+  
+  for (i=0; i<MAX_MD_DEV; i++)
+  {
+    sz+=sprintf (page+sz, "md%d : %sactive", i, md_dev[i].pers ? "" : "in");
+
+    if (md_dev[i].pers)
+      sz+=sprintf (page+sz, " %s", md_dev[i].pers->name);
+
+    for (j=0; j<md_dev[i].nb_dev; j++)
+      sz+=sprintf (page+sz, " %s%s%s",
+		   (devices[i][j].invalid==VALID) ? "" : "(",
+		   partition_name(devices[i][j].dev),
+		   (devices[i][j].invalid==VALID) ? "" : ")");
+    
+    if (md_dev[i].nb_dev)
+      sz+=sprintf (page+sz, " %d blocks", md_size[i]);
+
+    if (!md_dev[i].pers)
+    {
+      sz+=sprintf (page+sz, "\n");
+      continue;
+    }
+
+    if (md_dev[i].pers->max_invalid_dev)
+      sz+=sprintf (page+sz, " maxfault=%ld", MAX_FAULT(md_dev+i));
+
+    if (md_dev[i].pers != pers[(LINEAR>>PERSONALITY_SHIFT)])
+    {
+      sz+=sprintf (page+sz, " %dk chunks", 1<<FACTOR_SHIFT(FACTOR(md_dev+i)));
+#ifdef MD_COUNT_SIZES
+      sz+=sprintf (page+sz, " (%d/%d/%d)",
+		   md_dev[i].smallest_count,
+		   md_dev[i].equal_count,
+		   md_dev[i].biggest_count);
+#endif
+    }
+    sz+=sprintf (page+sz, "\n");
+    sz+=md_dev[i].pers->status (page+sz, i, md_dev+i);
+  }
+  
+  return (sz);
+}
+
+#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
+
+int md_valid_device (int minor, kdev_t dev, int mode)
+{
+  int i;
+
+  for (i=0; i<md_dev[minor].nb_dev; i++)
+    if (devices[minor][i].dev==dev)
+      break;
+
+  if (i>md_dev[minor].nb_dev)
+  {
+    printk ("Oops, dev %04x not found in md_valid_device\n", dev);
+    return -EINVAL;
+  }
+
+  switch (mode)
+  {
+    case VALID:
+    /* Don't consider INVALID_NEXT as a real invalidation.
+       Maybe that's not the good way to treat such a thing,
+       we'll see. */
+    if (devices[minor][i].invalid==INVALID_ALWAYS)
+    {
+      devices[minor][i].fault_count=0; /* reset fault count */
+      if (md_dev[minor].invalid_dev_count)
+	md_dev[minor].invalid_dev_count--;
+    }
+    break;
+
+    case INVALID:
+    if (devices[minor][i].invalid != VALID )
+      return 0;			/* Don't invalidate twice */
+    
+    if (++devices[minor][i].fault_count > MAX_FAULT(md_dev+minor) &&
+	MAX_FAULT(md_dev+minor)!=0xFF)
+    {
+      /* We cannot tolerate this fault.
+	 So sing a song, and say GoodBye to this device... */
+      
+      mode=INVALID_ALWAYS;
+      md_dev[minor].invalid_dev_count++;
+    }
+    else
+      /* FIXME :
+	 If we reached the max_invalid_dev count, doing one
+	 more invalidation will kill the md_dev. So we choose
+	 not to invalid the physical dev in such a case. But
+	 next access will probably fail... */
+      if (md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev)
+	mode=INVALID_NEXT;
+      else
+	mode=VALID;
+    break;
+
+    case INVALID_ALWAYS:	/* Only used via MD_INVALID ioctl */
+    md_dev[minor].invalid_dev_count++;
+  }
+  
+  devices[minor][i].invalid=mode;
+  return 0;
+}
+
+
+int md_can_reemit (int minor)
+{
+  /* FIXME :
+     If the device is raid-1 (md_dev[minor].pers->max_invalid_dev=-1),
+     always pretend that we can reemit the request.
+     Problem : if the 2 devices in the pair are dead, will loop
+     forever. Maybe having a per-personality can_reemit function would
+     help. */
+
+  if (!md_dev[minor].pers)
+    return (0);
+  
+  return(md_dev[minor].pers->max_invalid_dev &&
+	 ((md_dev[minor].pers->max_invalid_dev==-1) ?
+	 1 :
+	 md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev));
+}
+
+#endif
+
+int register_md_personality (int p_num, struct md_personality *p)
+{
+  int i=(p_num >> PERSONALITY_SHIFT);
+
+  if (i >= MAX_PERSONALITY)
+    return -EINVAL;
+
+  if (pers[i])
+    return -EBUSY;
+  
+  pers[i]=p;
+  printk ("%s personality registered\n", p->name);
+  return 0;
+}
+
+int unregister_md_personality (int p_num)
+{
+  int i=(p_num >> PERSONALITY_SHIFT);
+
+  if (i >= MAX_PERSONALITY)
+    return -EINVAL;
+
+  printk ("%s personality unregistered\n", pers[i]->name);
+  pers[i]=NULL;
+  return 0;
+} 
+
+void linear_init (void);
+void raid0_init (void);
+void raid1_init (void);
+void raid5_init (void);
+
+int md_init (void)
+{
+  printk ("md driver %s MAX_MD_DEV=%d, MAX_REAL=%d\n", MD_VERSION, MAX_MD_DEV, MAX_REAL);
+
+  if (register_blkdev (MD_MAJOR, "md", &md_fops))
+  {
+    printk ("Unable to get major %d for md\n", MD_MAJOR);
+    return (-1);
+  }
+
+  blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST;
+  blk_dev[MD_MAJOR].current_request=NULL;
+  read_ahead[MD_MAJOR]=INT_MAX;
+  md_gendisk.next=gendisk_head;
+
+  gendisk_head=&md_gendisk;
+
+#ifdef CONFIG_MD_LINEAR
+  linear_init ();
+#endif
+#ifdef CONFIG_MD_STRIPED
+  raid0_init ();
+#endif
+#ifdef CONFIG_MD_RAID1
+  raid1_init ();
+#endif
+#ifdef CONFIG_MD_RAID5
+  raid5_init ();
+#endif
+  
+  return (0);
+}
diff -ru --new-file /usr/src/linux/drivers/block/raid0.c linux/drivers/block/raid0.c
--- /usr/src/linux/drivers/block/raid0.c	Thu Jan  1 01:00:00 1970
+++ linux/drivers/block/raid0.c	Tue Nov 28 08:46:25 1995
@@ -0,0 +1,332 @@
+
+/*
+   raid0.c : Multiple Devices driver for Linux
+             Copyright (C) 1994, 1995 Marc ZYNGIER
+	     <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	     <maz@gloups.fdn.fr>
+
+   RAID-0 management functions.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/md.h>
+#include <linux/raid0.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include <linux/blk.h>
+
+static void create_strip_zones (int minor, struct md_dev *mddev)
+{
+  int i, j, c=0;
+  int current_offset=0;
+  struct real_dev *smallest_by_zone;
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  
+  data->nr_strip_zones=1;
+  
+  for (i=1; i<mddev->nb_dev; i++)
+  {
+    for (j=0; j<i; j++)
+      if (devices[minor][i].size==devices[minor][j].size)
+      {
+	c=1;
+	break;
+      }
+
+    if (!c)
+      data->nr_strip_zones++;
+
+    c=0;
+  }
+
+  data->strip_zone=kmalloc (sizeof(struct strip_zone)*data->nr_strip_zones,
+			      GFP_KERNEL);
+
+  data->smallest=NULL;
+  
+  for (i=0; i<data->nr_strip_zones; i++)
+  {
+    data->strip_zone[i].dev_offset=current_offset;
+    smallest_by_zone=NULL;
+    c=0;
+
+    for (j=0; j<mddev->nb_dev; j++)
+      if (devices[minor][j].size>current_offset)
+      {
+	data->strip_zone[i].dev[c++]=devices[minor]+j;
+	if (!smallest_by_zone ||
+	    smallest_by_zone->size > devices[minor][j].size)
+	  smallest_by_zone=devices[minor]+j;
+      }
+
+    data->strip_zone[i].nb_dev=c;
+    data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
+
+    if (!data->smallest ||
+	data->smallest->size > data->strip_zone[i].size)
+      data->smallest=data->strip_zone+i;
+
+    data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
+					   data->strip_zone[i-1].size) : 0;
+    current_offset=smallest_by_zone->size;
+  }
+}
+
+static int raid0_run (int minor, struct md_dev *mddev)
+{
+  int cur=0, i=0, size, zone0_size, nb_zone, min;
+  struct raid0_data *data;
+
+  min=1 << FACTOR_SHIFT(FACTOR(mddev));
+
+  for (i=0; i<mddev->nb_dev; i++)
+    if (devices[minor][i].size<min)
+    {
+      printk ("Cannot use %dk chunks on dev %s\n", min,
+	      partition_name (devices[minor][i].dev));
+      return -EINVAL;
+    }
+  
+  MOD_INC_USE_COUNT;
+  
+  /* Resize devices according to the factor */
+  md_size[minor]=0;
+  
+  for (i=0; i<mddev->nb_dev; i++)
+  {
+    devices[minor][i].size &= ~((1 << FACTOR_SHIFT(FACTOR(mddev))) - 1);
+    md_size[minor] += devices[minor][i].size;
+  }
+
+  mddev->private=kmalloc (sizeof (struct raid0_data), GFP_KERNEL);
+  data=(struct raid0_data *) mddev->private;
+  
+  create_strip_zones (minor, mddev);
+
+  nb_zone=data->nr_zones=
+    md_size[minor]/data->smallest->size +
+    (md_size[minor]%data->smallest->size ? 1 : 0);
+  
+  data->hash_table=kmalloc (sizeof (struct raid0_hash)*nb_zone, GFP_KERNEL);
+
+  size=data->strip_zone[cur].size;
+
+  i=0;
+  while (cur<data->nr_strip_zones)
+  {
+    data->hash_table[i].zone0=data->strip_zone+cur;
+
+    if (size>=data->smallest->size)/* If we completly fill the slot */
+    {
+      data->hash_table[i++].zone1=NULL;
+      size-=data->smallest->size;
+
+      if (!size)
+      {
+	if (++cur==data->nr_strip_zones) continue;
+	size=data->strip_zone[cur].size;
+      }
+
+      continue;
+    }
+
+    if (++cur==data->nr_strip_zones) /* Last dev, set unit1 as NULL */
+    {
+      data->hash_table[i].zone1=NULL;
+      continue;
+    }
+
+    zone0_size=size;		/* Here, we use a 2nd dev to fill the slot */
+    size=data->strip_zone[cur].size;
+    data->hash_table[i++].zone1=data->strip_zone+cur;
+    size-=(data->smallest->size - zone0_size);
+  }
+
+  return (0);
+}
+
+
+static int raid0_stop (int minor, struct md_dev *mddev)
+{
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+
+  kfree (data->hash_table);
+  kfree (data->strip_zone);
+  kfree (data);
+
+  MOD_DEC_USE_COUNT;
+  return 0;
+}
+
+
+static int raid0_map (int minor, struct md_dev *mddev, struct request *req)
+{
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  static struct raid0_hash *hash;
+  struct strip_zone *zone;
+  struct real_dev *tmp_dev;
+  int i, queue, blk_in_chunk, factor, chunk;
+  long block, rblock;
+  struct buffer_head *bh;
+  static struct request pending[MAX_REAL]={{0, }, };
+
+  factor=FACTOR(mddev);
+
+  while (req->bh || req->sem)
+  {
+    block=req->sector >> 1;
+    hash=data->hash_table+(block/data->smallest->size);
+    
+    if (block >= (hash->zone0->size +
+		  hash->zone0->zone_offset))
+    {
+      if (!hash->zone1)
+	printk ("raid0_map : hash->zone1==NULL for block %ld\n", block);
+      zone=hash->zone1;
+    }
+    else
+      zone=hash->zone0;
+    
+    blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
+    chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
+    tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
+    rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
+
+    if (req->sem)		/* This is a paging request */
+    {
+      req->rq_dev=tmp_dev->dev;
+      req->sector=rblock << 1;
+      add_request (blk_dev+MAJOR (tmp_dev->dev), req);
+
+      return REDIRECTED_REQ;
+    }
+
+    queue=tmp_dev - devices[minor];
+    
+				/* This is a buffer request */
+    for (i=blk_in_chunk; i<(1UL << FACTOR_SHIFT(factor)) && req->bh; i++)
+    {
+      bh=req->bh;
+      if (!buffer_locked(bh))
+	printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
+
+      bh->b_rdev=tmp_dev->dev;
+#if defined (CONFIG_MD_SUPPORT_RAID1)
+      bh->b_reqshared=NULL;
+      bh->b_sister_req=NULL;
+#endif
+      
+      if (!pending[queue].bh)
+      {
+	pending[queue].rq_dev=tmp_dev->dev;
+	pending[queue].bhtail=pending[queue].bh=bh;
+	pending[queue].sector=rblock*(bh->b_size >> 9);
+	pending[queue].cmd=req->cmd;
+	pending[queue].nr_sectors=bh->b_size >> 9;
+      }
+      else
+      {
+	pending[queue].bhtail->b_reqnext=bh;
+	pending[queue].bhtail=bh;
+	pending[queue].nr_sectors+=bh->b_size >> 9;
+      }
+
+      end_redirect (req);	/* Separate bh from the request */
+    }
+  }
+  
+  req->rq_status=RQ_INACTIVE;
+  wake_up (&wait_for_request);
+  make_md_request (pending, mddev->nb_dev);
+  return REDIRECTED_REQ;	/* Since we already set the request free */
+}
+
+
+static int raid0_status (char *page, int minor, struct md_dev *mddev)
+{
+  int sz=0;
+#undef MD_DEBUG
+#ifdef MD_DEBUG
+  int j, k;
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  
+  sz+=sprintf (page+sz, "      ");
+  for (j=0; j<data->nr_zones; j++)
+  {
+    sz+=sprintf (page+sz, "[z%d",
+		 data->hash_table[j].zone0-data->strip_zone);
+    if (data->hash_table[j].zone1)
+      sz+=sprintf (page+sz, "/z%d] ",
+		   data->hash_table[j].zone1-data->strip_zone);
+    else
+      sz+=sprintf (page+sz, "] ");
+  }
+  
+  sz+=sprintf (page+sz, "\n");
+  
+  for (j=0; j<data->nr_strip_zones; j++)
+  {
+    sz+=sprintf (page+sz, "      z%d=[", j);
+    for (k=0; k<data->strip_zone[j].nb_dev; k++)
+      sz+=sprintf (page+sz, "%s/",
+		   partition_name(data->strip_zone[j].dev[k]->dev));
+    sz--;
+    sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
+		 data->strip_zone[j].zone_offset,
+		 data->strip_zone[j].dev_offset,
+		 data->strip_zone[j].size);
+  }
+#endif
+  return sz;
+}
+
+
+static struct md_personality raid0_personality=
+{
+  "raid0",
+  raid0_map,
+  raid0_run,
+  raid0_stop,
+  raid0_status,
+  NULL,				/* no ioctls */
+  0
+};
+
+
+#ifndef MODULE
+
+void raid0_init (void)
+{
+  register_md_personality (RAID0, &raid0_personality);
+}
+
+#else
+
+int init_module (void)
+{
+  return (register_md_personality (RAID0, &raid0_personality));
+}
+
+void cleanup_module (void)
+{
+  if (MOD_IN_USE)
+    printk ("md raid0 : module still busy...\n");
+  else
+    unregister_md_personality (RAID0);
+}
+
+#endif
diff -ru --new-file /usr/src/linux/fs/proc/array.c linux/fs/proc/array.c
--- /usr/src/linux/fs/proc/array.c	Sun Nov 26 20:28:11 1995
+++ linux/fs/proc/array.c	Tue Nov 28 08:33:13 1995
@@ -768,6 +768,7 @@
 extern int get_dma_list(char *);
 extern int get_cpuinfo(char *);
 extern int get_pci_list(char*);
+extern int get_md_status (char *);
 
 static int get_root_array(char * page, int type, char **start, off_t offset, int length)
 {
@@ -820,6 +821,11 @@
 
 		case PROC_IOPORTS:
 			return get_ioport_list(page);
+
+#ifdef CONFIG_BLK_DEV_MD
+		case PROC_MD:
+			return get_md_status(page);
+#endif
 	}
 	return -EBADF;
 }
diff -ru --new-file /usr/src/linux/include/linux/blk.h linux/include/linux/blk.h
--- /usr/src/linux/include/linux/blk.h	Tue Nov 28 08:55:28 1995
+++ linux/include/linux/blk.h	Tue Nov 28 08:58:10 1995
@@ -26,7 +26,7 @@
  * These will have to be changed to be aware of different buffer
  * sizes etc.. It actually needs a major cleanup.
  */
-#ifdef IDE_DRIVER
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
 #define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
 #else
 #define SECTOR_MASK (blksize_size[MAJOR_NR] &&     \
@@ -76,6 +76,9 @@
 #ifdef CONFIG_BLK_DEV_XD
 extern int xd_init(void);
 #endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif CONFIG_BLK_DEV_MD
 
 extern void set_device_ro(kdev_t dev,int flag);
 
@@ -144,6 +147,19 @@
 #define DEVICE_ON(device)
 #define DEVICE_OFF(device)
 
+/* Kludge to use the same number for both char and block major numbers */
+#elif  (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#ifndef MD_PERSONALITY
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif
+
 #elif (MAJOR_NR == SCSI_TAPE_MAJOR)
 
 #define DEVICE_NAME "scsitape"
@@ -277,7 +293,7 @@
 
 #endif /* MAJOR_NR == whatever */
 
-#if (MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER)
+#if ((MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) && !defined(MD_DRIVER))
 
 #ifndef CURRENT
 #define CURRENT (blk_dev[MAJOR_NR].current_request)
@@ -309,8 +325,10 @@
 
 #endif /* DEVICE_TIMEOUT */
 
+#ifndef MD_PERSONALITY
 static void (DEVICE_REQUEST)(void);
-
+#endif
+  
 #ifdef DEVICE_INTR
 #define CLEAR_INTR SET_INTR(NULL)
 #else
@@ -334,7 +352,7 @@
 /* end_request() - SCSI devices have their own version */
 /*               - IDE drivers have their own copy too */
 
-#if ! SCSI_MAJOR(MAJOR_NR)
+#if ! SCSI_MAJOR(MAJOR_NR) || (defined(MD_DRIVER) && !defined(MD_PERSONALITY))
 
 #if defined(_IDE_CD_C) || defined(_TRITON_C) /* shares copy with ide.c */
 void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
@@ -343,6 +361,8 @@
 #ifdef IDE_DRIVER
 void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
 	struct request *req = hwgroup->rq;
+#elif defined(MD_DRIVER)
+static void end_request (int uptodate, struct request * req) {
 #else
 static void end_request(int uptodate) {
 	struct request *req = CURRENT;
@@ -376,7 +396,7 @@
 	}
 #ifdef IDE_DRIVER
 	hwgroup->rq = NULL;
-#else
+#elif !defined(MD_DRIVER)
 	DEVICE_OFF(req->rq_dev);
 	CURRENT = req->next;
 #endif /* IDE_DRIVER */
@@ -387,6 +407,36 @@
 }
 #endif /* ndef _IDE_CD_C */
 #endif /* ! SCSI_MAJOR(MAJOR_NR) */
+
+#ifdef MD_PERSONALITY
+extern inline void end_redirect (struct request *req)
+{
+  struct buffer_head * bh;
+
+  req->errors = 0;
+  
+  if ((bh = req->bh) != NULL)
+  {
+    req->bh = bh->b_reqnext;
+    bh->b_reqnext = NULL;
+    
+    if ((bh = req->bh) != NULL)
+    {
+      req->sector += req->current_nr_sectors;
+      req->current_nr_sectors = bh->b_size >> 9;
+      
+      if (req->nr_sectors < req->current_nr_sectors)
+      {
+	req->nr_sectors = req->current_nr_sectors;
+	printk("end_redirect : buffer-list destroyed\n");
+      }
+      
+      req->buffer = bh->b_data;
+      return;
+    }
+  }
+}
+#endif /* MD_PERSONALITY */
 
 #endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
 
diff -ru --new-file /usr/src/linux/include/linux/blkdev.h linux/include/linux/blkdev.h
--- /usr/src/linux/include/linux/blkdev.h	Thu Oct  5 18:31:40 1995
+++ linux/include/linux/blkdev.h	Tue Nov 28 08:56:14 1995
@@ -47,6 +47,14 @@
 extern struct wait_queue * wait_for_request;
 extern void resetup_one_dev(struct gendisk *dev, int drive);
 
+/* md needs those functions to requeue requests */
+extern void add_request(struct blk_dev_struct * dev, struct request * req);
+extern void add_md_request (struct request *req, char *callable_driver);
+extern struct request *get_md_request (int max_req, kdev_t dev,
+				       char *callable_driver,
+				       int *req_major0,
+				       int *req_major1, int n);
+
 extern int * blk_size[MAX_BLKDEV];
 
 extern int * blksize_size[MAX_BLKDEV];
diff -ru --new-file /usr/src/linux/include/linux/fs.h linux/include/linux/fs.h
--- /usr/src/linux/include/linux/fs.h	Tue Nov 28 08:55:28 1995
+++ linux/include/linux/fs.h	Tue Nov 28 08:36:33 1995
@@ -130,6 +130,7 @@
 	unsigned long b_size;		/* block size */
 	unsigned long b_blocknr;	/* block number */
 	kdev_t b_dev;			/* device (B_FREE = free) */
+	kdev_t b_rdev;		        /* Real device */
 	unsigned long b_state;		/* buffer state bitmap (see above) */
 	unsigned int b_count;		/* users using this block */
 	unsigned int b_list;		/* List that this buffer appears */
diff -ru --new-file /usr/src/linux/include/linux/linear.h linux/include/linux/linear.h
--- /usr/src/linux/include/linux/linear.h	Thu Jan  1 01:00:00 1970
+++ linux/include/linux/linear.h	Tue Nov 28 08:33:13 1995
@@ -0,0 +1,17 @@
+
+#ifndef _LINEAR_H
+#define _LINEAR_H
+
+struct linear_hash
+{
+  struct real_dev *dev0, *dev1;
+};
+
+struct linear_data
+{
+  struct linear_hash *hash_table; /* Dynamically allocated */
+  struct real_dev *smallest;
+  int nr_zones;
+};
+
+#endif
diff -ru --new-file /usr/src/linux/include/linux/major.h linux/include/linux/major.h
--- /usr/src/linux/include/linux/major.h	Sun Nov 19 12:26:22 1995
+++ linux/include/linux/major.h	Tue Nov 28 08:33:13 1995
@@ -26,7 +26,7 @@
  *  6 - lp
  *  7 - /dev/vcs*
  *  8 -                        scsi disk
- *  9 - scsi tape
+ *  9 - scsi tape              multiple devices driver
  * 10 - mice
  * 11 -                        scsi cdrom
  * 12 - qic02 tape
@@ -66,6 +66,7 @@
 #define VCS_MAJOR	7
 #define SCSI_DISK_MAJOR	8
 #define SCSI_TAPE_MAJOR	9
+#define MD_MAJOR        9
 #define MOUSE_MAJOR	10
 #define SCSI_CDROM_MAJOR 11
 #define QIC02_TAPE_MAJOR 12
diff -ru --new-file /usr/src/linux/include/linux/md.h linux/include/linux/md.h
--- /usr/src/linux/include/linux/md.h	Thu Jan  1 01:00:00 1970
+++ linux/include/linux/md.h	Tue Nov 28 08:58:10 1995
@@ -0,0 +1,150 @@
+
+/*
+   md.h : Multiple Devices driver for Linux
+          Copyright (C) 1994, 1995 Marc ZYNGIER
+	  <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	  <maz@gloups.fdn.fr>
+	  
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/ioctl.h>
+
+#define MD_VERSION "0.32"
+
+/* ioctls */
+#define REGISTER_DEV _IO (MD_MAJOR, 1)
+#define START_MD     _IO (MD_MAJOR, 2)
+#define STOP_MD      _IO (MD_MAJOR, 3)
+#define MD_INVALID   _IO (MD_MAJOR, 4)
+#define MD_VALID     _IO (MD_MAJOR, 5)
+
+/*
+   personalities :
+   Byte 0 : Chunk size factor
+   Byte 1 : Fault tolerance count for each physical device
+            (   0 means no fault tolerance,
+             0xFF means always tolerate faults)
+   Byte 2 : Personality
+   Byte 3 : Reserved.
+ */
+
+#define FAULT_SHIFT       8
+#define PERSONALITY_SHIFT 16
+
+#define FACTOR_MASK       0xFFUL
+#define FAULT_MASK        0xFF00UL
+#define PERSONALITY_MASK  0xFF0000UL
+
+#define MD_RESERVED       0	/* Not used by now */
+#define LINEAR            (1UL << PERSONALITY_SHIFT)
+#define STRIPED           (2UL << PERSONALITY_SHIFT)
+#define STRIPPED          STRIPED /* Long lasting spelling mistake... */
+#define RAID0             STRIPED
+#define RAID1             (3UL << PERSONALITY_SHIFT)
+#define RAID5             (4UL << PERSONALITY_SHIFT)
+#define MAX_PERSONALITY   5
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <sys/types.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#undef MD_COUNT_SIZE		/* Define this to have stats about
+				   chunk size in /proc/mdstat */
+#define MAX_REAL     8		/* Max number of physical dev per md dev */
+#define MAX_MD_DEV   4		/* Max number of md dev */
+
+#define FACTOR(a)         ((a)->repartition & FACTOR_MASK)
+#define MAX_FAULT(a)      (((a)->repartition & FAULT_MASK)>>8)
+#define PERSONALITY(a)    ((a)->repartition & PERSONALITY_MASK)
+
+#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
+
+/* Invalidation modes */
+#define VALID          0
+#define INVALID_NEXT   1
+#define INVALID_ALWAYS 2
+#define INVALID        3	/* Only useful to md_valid_device */
+
+/* Return values from personalities to md driver */
+#define REDIRECTED_BHREQ 0 /* Redirected individual buffers
+			      (shouldn't be used anymore since 0.31) */
+#define REDIRECTED_REQ   1 /* Redirected whole request */
+#define REDIRECT_FAILED -1 /* For RAID-1 */
+
+struct real_dev
+{
+  kdev_t dev;			/* Device number */
+  int size;			/* Device size (in blocks) */
+  int offset;			/* Real device offset (in blocks) in md dev
+				   (only used in linear mode) */
+  struct inode *inode;		/* Lock inode */
+  int fault_count;		/* Fault counter for invalidation */
+  int invalid;			/* Indicate if the device is disabled :
+				   VALID          - valid
+				   INVALID_NEXT   - disabled for next access
+				   INVALID_ALWAYS - permanently disabled
+				   (for redundancy modes only) */
+};
+
+struct md_dev;
+
+struct md_personality
+{
+  char *name;
+  int (*map)(int minor, struct md_dev *md_dev, struct request *req);
+  int (*run)(int minor, struct md_dev *md_dev);
+  int (*stop)(int minor, struct md_dev *md_dev);
+  int (*status)(char *page, int minor, struct md_dev *md_dev);
+  int (*ioctl)(struct inode *inode, struct file *file,
+	       unsigned int cmd, unsigned long arg);
+  int max_invalid_dev;
+};
+
+struct md_dev
+{
+  struct md_personality *pers;
+  int repartition;
+  int invalid_dev_count;
+  int busy;
+  int nb_dev;
+  void *private;
+#ifdef MD_COUNT_SIZE
+  unsigned int smallest_count;
+  unsigned int biggest_count;
+  unsigned int equal_count;
+#endif
+};
+
+extern struct real_dev devices[MAX_MD_DEV][MAX_REAL];
+extern struct md_dev md_dev[MAX_MD_DEV];
+extern int md_size[MAX_MD_DEV];
+
+extern void make_md_request(struct request *pending, int n);
+extern char *partition_name (kdev_t dev);
+
+#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
+extern int md_valid_device (int minor, kdev_t dev, int mode);
+extern int md_can_reemit (int minor);
+#endif
+
+extern int register_md_personality (int p_num, struct md_personality *p);
+extern int unregister_md_personality (int p_num);
+
+#endif __KERNEL__
+#endif _MD_H
diff -ru --new-file /usr/src/linux/include/linux/proc_fs.h linux/include/linux/proc_fs.h
--- /usr/src/linux/include/linux/proc_fs.h	Sun Nov 19 12:26:23 1995
+++ linux/include/linux/proc_fs.h	Tue Nov 28 08:56:14 1995
@@ -34,7 +34,8 @@
 	PROC_KSYMS,
 	PROC_DMA,	
 	PROC_IOPORTS,
-	PROC_PROFILE /* whether enabled or not */
+	PROC_PROFILE, /* whether enabled or not */
+	PROC_MD
 };
 
 enum pid_directory_inos {
diff -ru --new-file /usr/src/linux/include/linux/raid0.h linux/include/linux/raid0.h
--- /usr/src/linux/include/linux/raid0.h	Thu Jan  1 01:00:00 1970
+++ linux/include/linux/raid0.h	Tue Nov 28 08:33:13 1995
@@ -0,0 +1,28 @@
+
+#ifndef _RAID0_H
+#define _RAID0_H
+
+struct strip_zone
+{
+  int zone_offset;		/* Zone offset in md_dev */
+  int dev_offset;		/* Zone offset in real dev */
+  int size;			/* Zone size */
+  int nb_dev;			/* Number of devices attached to the zone */
+  struct real_dev *dev[MAX_REAL]; /* Devices attached to the zone */
+};
+
+struct raid0_hash
+{
+  struct strip_zone *zone0, *zone1;
+};
+
+struct raid0_data
+{
+  struct raid0_hash *hash_table; /* Dynamically allocated */
+  struct strip_zone *strip_zone; /* This one too */
+  int nr_strip_zones;
+  struct strip_zone *smallest;
+  int nr_zones;
+};
+
+#endif
