This is the mail archive of the gdb-patches@sources.redhat.com mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[commit] Integrate Altivec and e500 support into sim/ppc


Hello,

This patch drops in Altivec and e500 support for the PPC simulator.

It was developped under contract from Motorola by matthew green <mrg@redhat.com>, with fixes from Aldy Hernandez <aldyh@redhat.com>, Jim Wilson <wilson@redhat.com>, and Nick Clifton <nickc@redhat.com>.

Since at present the PPC simulator isn't multi-sim (needs to switch to sim/igen/) the ISA being simulated is currently selected using the arget tuple: powerpc-eabi, powerpc-eabispe (e500) and powerpc-eabialtivec to select the relevant simulator. Alternatively, --enable-sim-fpu = {altivec,e500} can be used (I think :-)

Please note that powerpc-eabispe-gdb currently panics (I've posted a fix).

committed,
Andrew
2003-06-22  Andrew Cagney  <cagney@redhat.com>

	Written by matthew green <mrg@redhat.com>, with fixes from Aldy
	Hernandez <aldyh@redhat.com>, Jim Wilson <wilson@redhat.com>, and
	Nick Clifton <nickc@redhat.com>.
	
	* ppc-instructions: Include altivec.igen and e500.igen.
	(model_busy, model_data): Add vr_busy and vscr_busy.
	(model_trace_release): Trace vr_busy and vscr_busy.
	(model_new_cycle): Update vr_busy and vscr_busy.
	(model_make_busy): Update vr_busy and vscr_busy.
	* registers.c (register_description): Add Altivec and e500
	registers.
	* psim.c (psim_read_register, psim_read_register): Handle Altivec
	and e500 registers.
	* ppc-spr-table (SPEFSCR): Add VRSAVE and SPEFSCR registers.
	* configure.in (sim_filter): When *altivec* add "av".  When *spe*
	or *simd* add e500.
	(sim_float): When *altivec* define WITH_ALTIVEC.  When *spe* add
	WITH_E500.
	* configure: Re-generate.
	* e500.igen, altivec.igen: New files.
	* e500_expression.h, altivec_expression.h: New files.
	* idecode_expression.h: Update copyright.  Include
	"e500_expression.h" and "altivec_expression.h".
	* e500_registers.h, altivec_registers.h: New files.
	* registers.h: Update copyright.  Include "e500_registers.h" and
	"altivec_registers.h".
	(registers): Add Altivec and e500 specific registers.
	* Makefile.in (IDECODE_H): Add "idecode_e500.h" and
	"idecode_altivec.h".
	(REGISTERS_H): Add "e500_registers.h" and "altivec_registers.h".
	(tmp-igen): Add dependencies on altivec.igen and e500.igen .

Index: Makefile.in
===================================================================
RCS file: /cvs/src/src/sim/ppc/Makefile.in,v
retrieving revision 1.9
diff -u -r1.9 Makefile.in
--- Makefile.in	20 Jun 2003 03:59:33 -0000	1.9
+++ Makefile.in	22 Jun 2003 16:23:44 -0000
@@ -187,13 +187,15 @@
 
 IDECODE_H = \
 	idecode.h \
-	idecode_expression.h \
+	idecode_expression.h e500_expression.h altivec_expression.h \
 	idecode_branch.h \
 	idecode_fields.h \
 	icache.h
 
 REGISTERS_H = \
 	registers.h \
+	e500_registers.h \
+	altivec_registers.h \
 	spreg.h
 
 CPU_H = \
@@ -459,8 +461,7 @@
 	$(SHELL) $(srcdir)/../../move-if-change tmp-spreg.c spreg.c
 	touch tmp-dgen
 
-
-tmp-igen: igen ppc-instructions $(IGEN_OPCODE_RULES) $(srcdir)/../../move-if-change tmp-ld-decode tmp-ld-cache tmp-ld-insn tmp-filter
+tmp-igen: igen $(srcdir)/ppc-instructions $(srcdir)/altivec.igen $(srcdir)/e500.igen $(IGEN_OPCODE_RULES) $(srcdir)/../../move-if-change tmp-ld-decode tmp-ld-cache tmp-ld-insn tmp-filter
 	./igen	$(IGEN_FLAGS) \
 		-o $(srcdir)/$(IGEN_OPCODE_RULES) \
 		-I $(srcdir) -i $(srcdir)/ppc-instructions \
Index: altivec.igen
===================================================================
RCS file: altivec.igen
diff -N altivec.igen
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ altivec.igen	22 Jun 2003 16:23:46 -0000
@@ -0,0 +1,2356 @@
+# Altivec instruction set, for PSIM, the PowerPC simulator.
+
+# Copyright 2003 Free Software Foundation, Inc.
+
+# Contributed by Red Hat Inc; developed under contract from Motorola.
+# Written by matthew green <mrg@redhat.com>.
+
+# This file is part of GDB.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.  */
+
+
+#
+# Motorola AltiVec instructions.
+#
+
+:cache:av:::VS:VS:
+:cache:av::vreg *:vS:VS:(cpu_registers(processor)->altivec.vr + VS)
+:cache:av::unsigned32:VS_BITMASK:VS:(1 << VS)
+:cache:av:::VA:VA:
+:cache:av::vreg *:vA:VA:(cpu_registers(processor)->altivec.vr + VA)
+:cache:av::unsigned32:VA_BITMASK:VA:(1 << VA)
+:cache:av:::VB:VB:
+:cache:av::vreg *:vB:VB:(cpu_registers(processor)->altivec.vr + VB)
+:cache:av::unsigned32:VB_BITMASK:VB:(1 << VB)
+:cache:av:::VC:VC:
+:cache:av::vreg *:vC:VC:(cpu_registers(processor)->altivec.vr + VC)
+:cache:av::unsigned32:VC_BITMASK:VC:(1 << VC)
+
+# Flags for model.h
+::model-macro:::
+	#define PPC_INSN_INT_VR(OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_int_vr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK); \
+		} while (0)
+
+	#define PPC_INSN_VR(OUT_VMASK, IN_VMASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_vr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
+		} while (0)
+
+	#define PPC_INSN_VR_CR(OUT_VMASK, IN_VMASK, CR_MASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_vr_cr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK, CR_MASK); \
+		} while (0)
+
+	#define PPC_INSN_VR_VSCR(OUT_VMASK, IN_VMASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_vr_vscr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
+		} while (0)
+
+	#define PPC_INSN_FROM_VSCR(VR_MASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_from_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
+		} while (0)
+
+	#define PPC_INSN_TO_VSCR(VR_MASK) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_to_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
+		} while (0)
+
+# Trace waiting for AltiVec registers to become available
+void::model-static::model_trace_altivec_busy_p:model_data *model_ptr, unsigned32 vr_busy
+	int i;
+	if (vr_busy) {
+	  vr_busy &= model_ptr->vr_busy;
+	  for(i = 0; i < 32; i++) {
+	    if (((1 << i) & vr_busy) != 0) {
+	      TRACE(trace_model, ("Waiting for register v%d.\n", i));
+	    }
+	  }
+	}
+	if (model_ptr->vscr_busy)
+	  TRACE(trace_model, ("Waiting for VSCR\n"));
+
+# Trace making AltiVec registers busy
+void::model-static::model_trace_altivec_make_busy:model_data *model_ptr, unsigned32 vr_mask, unsigned32 cr_mask
+	int i;
+	if (vr_mask) {
+	  for(i = 0; i < 32; i++) {
+	    if (((1 << i) & vr_mask) != 0) {
+	      TRACE(trace_model, ("Register v%d is now busy.\n", i));
+	    }
+	  }
+	}
+	if (cr_mask) {
+	  for(i = 0; i < 8; i++) {
+	    if (((1 << i) & cr_mask) != 0) {
+	      TRACE(trace_model, ("Register cr%d is now busy.\n", i));
+	    }
+	  }
+	}
+
+# Schedule an AltiVec instruction that takes integer input registers and produces output registers
+void::model-function::ppc_insn_int_vr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned32 out_vmask, const unsigned32 in_vmask
+	const unsigned32 int_mask = out_mask | in_mask;
+	const unsigned32 vr_mask = out_vmask | in_vmask;
+	model_busy *busy_ptr;
+
+	if ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
+	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+
+	  while ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
+	    if (WITH_TRACE && ppc_trace[trace_model]) {
+	      model_trace_busy_p(model_ptr, int_mask, 0, 0, PPC_NO_SPR);
+	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+	    }
+
+	    model_ptr->nr_stalls_data++;
+	    model_new_cycle(model_ptr);
+	  }
+	}
+
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	model_ptr->int_busy |= out_mask;
+	busy_ptr->int_busy |= out_mask;
+	model_ptr->vr_busy |= out_vmask;
+	busy_ptr->vr_busy |= out_vmask;
+
+	if (out_mask)
+	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+
+	if (out_vmask)
+	  busy_ptr->nr_writebacks += (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+
+	if (WITH_TRACE && ppc_trace[trace_model]) {
+	  model_trace_make_busy(model_ptr, out_mask, 0, 0);
+	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+	}
+
+# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers
+void::model-function::ppc_insn_vr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
+	const unsigned32 vr_mask = out_vmask | in_vmask;
+	model_busy *busy_ptr;
+
+	if (model_ptr->vr_busy & vr_mask) {
+	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+
+	  while (model_ptr->vr_busy & vr_mask) {
+	    if (WITH_TRACE && ppc_trace[trace_model]) {
+	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+	    }
+
+	    model_ptr->nr_stalls_data++;
+	    model_new_cycle(model_ptr);
+	  }
+	}
+
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	model_ptr->vr_busy |= out_vmask;
+	busy_ptr->vr_busy |= out_vmask;
+	if (out_vmask)
+	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+
+	if (WITH_TRACE && ppc_trace[trace_model]) {
+	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+	}
+
+# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches CR
+void::model-function::ppc_insn_vr_cr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask, const unsigned32 cr_mask
+	const unsigned32 vr_mask = out_vmask | in_vmask;
+	model_busy *busy_ptr;
+
+	if ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
+	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+
+	  while ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
+	    if (WITH_TRACE && ppc_trace[trace_model]) {
+	      model_trace_busy_p(model_ptr, 0, 0, cr_mask, PPC_NO_SPR);
+	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+	    }
+
+	    model_ptr->nr_stalls_data++;
+	    model_new_cycle(model_ptr);
+	  }
+	}
+
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	model_ptr->cr_fpscr_busy |= cr_mask;
+	busy_ptr->cr_fpscr_busy |= cr_mask;
+	model_ptr->vr_busy |= out_vmask;
+	busy_ptr->vr_busy |= out_vmask;
+
+	if (out_vmask)
+	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+
+	if (cr_mask)
+	  busy_ptr->nr_writebacks++;
+
+	if (WITH_TRACE && ppc_trace[trace_model])
+	  model_trace_altivec_make_busy(model_ptr, vr_mask, cr_mask);
+
+# Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches VSCR
+void::model-function::ppc_insn_vr_vscr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
+	const unsigned32 vr_mask = out_vmask | in_vmask;
+	model_busy *busy_ptr;
+
+	if ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+
+	  while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+	    if (WITH_TRACE && ppc_trace[trace_model])
+	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+
+	    model_ptr->nr_stalls_data++;
+	    model_new_cycle(model_ptr);
+	  }
+	}
+
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	model_ptr->vr_busy |= out_vmask;
+	busy_ptr->vr_busy |= out_vmask;
+	model_ptr->vscr_busy = 1;
+	busy_ptr->vscr_busy = 1;
+
+	if (out_vmask)
+	  busy_ptr->nr_writebacks = 1 + (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+
+	if (WITH_TRACE && ppc_trace[trace_model])
+	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+
+# Schedule an MFVSCR instruction that VSCR input register and produces an AltiVec output register
+void::model-function::ppc_insn_from_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+	model_busy *busy_ptr;
+
+	while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+	  if (WITH_TRACE && ppc_trace[trace_model])
+	    model_trace_altivec_busy_p(model_ptr, vr_mask);
+
+	  model_ptr->nr_stalls_data++;
+	  model_new_cycle(model_ptr);
+	}
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	model_ptr->cr_fpscr_busy |= vr_mask;
+	busy_ptr->cr_fpscr_busy |= vr_mask;
+
+	if (vr_mask)
+	  busy_ptr->nr_writebacks = 1;
+
+	model_ptr->vr_busy |= vr_mask;
+	if (WITH_TRACE && ppc_trace[trace_model])
+	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+
+# Schedule an MTVSCR instruction that one AltiVec input register and produces a vscr output register
+void::model-function::ppc_insn_to_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+	model_busy *busy_ptr;
+
+	while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+	  if (WITH_TRACE && ppc_trace[trace_model])
+	    model_trace_altivec_busy_p(model_ptr, vr_mask);
+
+	  model_ptr->nr_stalls_data++;
+	  model_new_cycle(model_ptr);
+	}
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	busy_ptr ->vscr_busy = 1;
+	model_ptr->vscr_busy = 1;
+	busy_ptr->nr_writebacks = 1;
+
+	TRACE(trace_model,("Making VSCR busy.\n"));
+
+# The follow are AltiVec saturate operations
+
+signed8::model-function::altivec_signed_saturate_8:signed16 val, int *sat
+	  signed8 rv;
+	  if (val > 127) {
+	    rv = 127;
+	    *sat = 1;
+	  } else if (val < -128) {
+	    rv = -128;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+signed16::model-function::altivec_signed_saturate_16:signed32 val, int *sat
+	  signed16 rv;
+	  if (val > 32767) {
+	    rv = 32767;
+	    *sat = 1;
+	  } else if (val < -32768) {
+	    rv = -32768;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+signed32::model-function::altivec_signed_saturate_32:signed64 val, int *sat
+	  signed32 rv;
+	  if (val > 2147483647) {
+	    rv = 2147483647;
+	    *sat = 1;
+	  } else if (val < -2147483648LL) {
+	    rv = -2147483648LL;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+unsigned8::model-function::altivec_unsigned_saturate_8:signed16 val, int *sat
+	  unsigned8 rv;
+	  if (val > 255) {
+	    rv = 255;
+	    *sat = 1;
+	  } else if (val < 0) {
+	    rv = 0;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+unsigned16::model-function::altivec_unsigned_saturate_16:signed32 val, int *sat
+	  unsigned16 rv;
+	  if (val > 65535) {
+	    rv = 65535;
+	    *sat = 1;
+	  } else if (val < 0) {
+	    rv = 0;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+unsigned32::model-function::altivec_unsigned_saturate_32:signed64 val, int *sat
+	  unsigned32 rv;
+	  if (val > 4294967295LL) {
+	    rv = 4294967295LL;
+	    *sat = 1;
+	  } else if (val < 0) {
+	    rv = 0;
+	    *sat = 1;
+	  } else {
+	    rv = val;
+	    *sat = 0;
+	  }
+	  return rv;
+
+#
+# Load instructions, 6-14 ... 6-22.
+#
+
+0.31,6.VS,11.RA,16.RB,21.7,31.0:X:av:lvebx %VD, %RA, %RB:Load Vector Element Byte Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	eb = EA & 0xf;
+	(*vS).b[AV_BINDEX(eb)] = MEM(unsigned, EA, 1);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.39,31.0:X:av:lvehx %VD, %RA, %RB:Load Vector Element Half Word Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~1;
+	eb = EA & 0xf;
+	(*vS).h[AV_HINDEX(eb/2)] = MEM(unsigned, EA, 2);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.71,31.0:X:av:lvewx %VD, %RA, %RB:Load Vector Element Word Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~3;
+	eb = EA & 0xf;
+	(*vS).w[eb/4] = MEM(unsigned, EA, 4);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+
+0.31,6.VS,11.RA,16.RB,21.6,31.0:X:av:lvsl %VD, %RA, %RB:Load Vector for Shift Left
+	unsigned_word b;
+	unsigned_word addr;
+	int i, j;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	addr = b + *rB;
+	j = addr & 0xf;
+	for (i = 0; i < 16; i++)
+	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	    (*vS).b[AV_BINDEX(i)] = j++;
+	  else
+	    (*vS).b[AV_BINDEX(15 - i)] = j++;
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.38,31.0:X:av:lvsr %VD, %RA, %RB:Load Vector for Shift Right
+	unsigned_word b;
+	unsigned_word addr;
+	int i, j;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	addr = b + *rB;
+	j = 0x10 - (addr & 0xf);
+	for (i = 0; i < 16; i++)
+	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	    (*vS).b[AV_BINDEX(i)] = j++;
+	  else
+	    (*vS).b[AV_BINDEX(15 - i)] = j++;
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+
+0.31,6.VS,11.RA,16.RB,21.103,31.0:X:av:lvx %VD, %RA, %RB:Load Vector Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+	  (*vS).w[0] = MEM(unsigned, EA + 0, 4);
+	  (*vS).w[1] = MEM(unsigned, EA + 4, 4);
+	  (*vS).w[2] = MEM(unsigned, EA + 8, 4);
+	  (*vS).w[3] = MEM(unsigned, EA + 12, 4);
+	} else {
+	  (*vS).w[0] = MEM(unsigned, EA + 12, 4);
+	  (*vS).w[1] = MEM(unsigned, EA + 8, 4);
+	  (*vS).w[2] = MEM(unsigned, EA + 4, 4);
+	  (*vS).w[3] = MEM(unsigned, EA + 0, 4);
+	}
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.359,31.0:X:av:lvxl %VD, %RA, %RB:Load Vector Indexed LRU
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+	  (*vS).w[0] = MEM(unsigned, EA + 0, 4);
+	  (*vS).w[1] = MEM(unsigned, EA + 4, 4);
+	  (*vS).w[2] = MEM(unsigned, EA + 8, 4);
+	  (*vS).w[3] = MEM(unsigned, EA + 12, 4);
+	} else {
+	  (*vS).w[0] = MEM(unsigned, EA + 12, 4);
+	  (*vS).w[1] = MEM(unsigned, EA + 8, 4);
+	  (*vS).w[2] = MEM(unsigned, EA + 4, 4);
+	  (*vS).w[3] = MEM(unsigned, EA + 0, 4);
+	}
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+#
+# Move to/from VSCR instructions, 6-23 & 6-24.
+#
+
+0.4,6.VS,11.0,16.0,21.1540:VX:av:mfvscr %VS:Move from Vector Status and Control Register
+	(*vS).w[0] = 0;
+	(*vS).w[1] = 0;
+	(*vS).w[2] = 0;
+	(*vS).w[3] = VSCR;
+	PPC_INSN_FROM_VSCR(VS_BITMASK);
+
+0.4,6.0,11.0,16.VB,21.1604:VX:av:mtvscr %VB:Move to Vector Status and Control Register
+	VSCR = (*vB).w[3];
+	PPC_INSN_TO_VSCR(VB_BITMASK);
+
+#
+# Store instructions, 6-25 ... 6-29.
+#
+
+0.31,6.VS,11.RA,16.RB,21.135,31.0:X:av:stvebx %VD, %RA, %RB:Store Vector Element Byte Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	eb = EA & 0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	  STORE(EA, 1, (*vS).b[eb]);
+	else
+	  STORE(EA, 1, (*vS).b[15-eb]);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.167,31.0:X:av:stvehx %VD, %RA, %RB:Store Vector Element Half Word Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~1;
+	eb = EA & 0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	  STORE(EA, 2, (*vS).h[eb/2]);
+	else
+	  STORE(EA, 2, (*vS).h[7-eb]);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.199,31.0:X:av:stvewx %VD, %RA, %RB:Store Vector Element Word Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned_word eb;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~3;
+	eb = EA & 0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	  STORE(EA, 4, (*vS).w[eb/4]);
+	else
+	  STORE(EA, 4, (*vS).w[3-(eb/4)]);
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.231,31.0:X:av:stvx %VD, %RA, %RB:Store Vector Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+	  STORE(EA + 0, 4, (*vS).w[0]);
+	  STORE(EA + 4, 4, (*vS).w[1]);
+	  STORE(EA + 8, 4, (*vS).w[2]);
+	  STORE(EA + 12, 4, (*vS).w[3]);
+	} else {
+	  STORE(EA + 12, 4, (*vS).w[0]);
+	  STORE(EA + 8, 4, (*vS).w[1]);
+	  STORE(EA + 4, 4, (*vS).w[2]);
+	  STORE(EA + 0, 4, (*vS).w[3]);
+	}
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+0.31,6.VS,11.RA,16.RB,21.487,31.0:X:av:stvxl %VD, %RA, %RB:Store Vector Indexed LRU
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = (b + *rB) & ~0xf;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+	  STORE(EA + 0, 4, (*vS).w[0]);
+	  STORE(EA + 4, 4, (*vS).w[1]);
+	  STORE(EA + 8, 4, (*vS).w[2]);
+	  STORE(EA + 12, 4, (*vS).w[3]);
+	} else {
+	  STORE(EA + 12, 4, (*vS).w[0]);
+	  STORE(EA + 8, 4, (*vS).w[1]);
+	  STORE(EA + 4, 4, (*vS).w[2]);
+	  STORE(EA + 0, 4, (*vS).w[3]);
+	}
+	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+
+#
+# Vector Add instructions, 6-30 ... 6-40.
+#
+
+0.4,6.VS,11.VA,16.VB,21.384:VX:av:vaddcuw %VD, %VA, %VB:Vector Add Carryout Unsigned Word
+	unsigned64 temp;
+	int i;
+	for (i = 0; i < 4; i++) {
+	  temp = (unsigned64)(*vA).w[i] + (unsigned64)(*vB).w[i];
+	  (*vS).w[i] = temp >> 32;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.10:VX:av:vaddfp %VD, %VA, %VB:Vector Add Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_add (&d, &a, &b);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+	
+0.4,6.VS,11.VA,16.VB,21.768:VX:av:vaddsbs %VD, %VA, %VB:Vector Add Signed Byte Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	for (i = 0; i < 16; i++) {
+	  temp = (signed16)(signed8)(*vA).b[i] + (signed16)(signed8)(*vB).b[i];
+	  (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.832:VX:av:vaddshs %VD, %VA, %VB:Vector Add Signed Half Word Saturate
+	int i, sat, tempsat;
+	signed32 temp, a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (signed32)(signed16)(*vA).h[i];
+	  b = (signed32)(signed16)(*vB).h[i];
+	  temp = a + b;
+	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.896:VX:av:vaddsws %VD, %VA, %VB:Vector Add Signed Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	for (i = 0; i < 4; i++) {
+	  temp = (signed64)(signed32)(*vA).w[i] + (signed64)(signed32)(*vB).w[i];
+	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.0:VX:av:vaddubm %VD, %VA, %VB:Vector Add Unsigned Byte Modulo
+	int i;
+	for (i = 0; i < 16; i++)
+	  (*vS).b[i] = ((*vA).b[i] + (*vB).b[i]) & 0xff;
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.512:VX:av:vaddubs %VD, %VA, %VB:Vector Add Unsigned Byte Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  temp = (signed16)(unsigned8)(*vA).b[i] + (signed16)(unsigned8)(*vB).b[i];
+	  (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.64:VX:av:vadduhm %VD, %VA, %VB:Vector Add Unsigned Half Word Modulo
+	int i;
+	for (i = 0; i < 8; i++)
+	  (*vS).h[i] = ((*vA).h[i] + (*vB).h[i]) & 0xffff;
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.576:VX:av:vadduhs %VD, %VA, %VB:Vector Add Unsigned Half Word Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	for (i = 0; i < 8; i++) {
+	  temp = (signed32)(unsigned16)(*vA).h[i] + (signed32)(unsigned16)(*vB).h[i];
+	  (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.128:VX:av:vadduwm %VD, %VA, %VB:Vector Add Unsigned Word Modulo
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] + (*vB).w[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.640:VX:av:vadduws %VD, %VA, %VB:Vector Add Unsigned Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	for (i = 0; i < 4; i++) {
+	  temp = (signed64)(unsigned32)(*vA).w[i] + (signed64)(unsigned32)(*vB).w[i];
+	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+#
+# Vector AND instructions, 6-41, 6-42
+#
+
+0.4,6.VS,11.VA,16.VB,21.1028:VX:av:vand %VD, %VA, %VB:Vector Logical AND
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] & (*vB).w[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1092:VX:av:vandc %VD, %VA, %VB:Vector Logical AND with Compliment
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] & ~((*vB).w[i]);
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Average instructions, 6-43, 6-48
+#
+
+0.4,6.VS,11.VA,16.VB,21.1282:VX:av:vavgsb %VD, %VA, %VB:Vector Average Signed Byte
+	int i;
+	signed16 temp, a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (signed16)(signed8)(*vA).b[i];
+	  b = (signed16)(signed8)(*vB).b[i];
+	  temp = a + b + 1;
+	  (*vS).b[i] = (temp >> 1) & 0xff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1346:VX:av:vavgsh %VD, %VA, %VB:Vector Average Signed Half Word
+	int i;
+	signed32 temp, a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (signed32)(signed16)(*vA).h[i];
+	  b = (signed32)(signed16)(*vB).h[i];
+	  temp = a + b + 1;
+	  (*vS).h[i] = (temp >> 1) & 0xffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1410:VX:av:vavgsw %VD, %VA, %VB:Vector Average Signed Word
+	int i;
+	signed64 temp, a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (signed64)(signed32)(*vA).w[i];
+	  b = (signed64)(signed32)(*vB).w[i];
+	  temp = a + b + 1;
+	  (*vS).w[i] = (temp >> 1) & 0xffffffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1026:VX:av:vavgub %VD, %VA, %VB:Vector Average Unsigned Byte
+	int i;
+	unsigned16 temp, a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  temp = a + b + 1;
+	  (*vS).b[i] = (temp >> 1) & 0xff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1090:VX:av:vavguh %VD, %VA, %VB:Vector Average Unsigned Half Word
+	int i;
+	unsigned32 temp, a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  temp = a + b + 1;
+	  (*vS).h[i] = (temp >> 1) & 0xffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1154:VX:av:vavguw %VD, %VA, %VB:Vector Average Unsigned Word
+	int i;
+	unsigned64 temp, a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  temp = a + b + 1;
+	  (*vS).w[i] = (temp >> 1) & 0xffffffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+#
+# Vector Fixed Point Convert instructions, 6-49, 6-50
+#
+
+0.4,6.VS,11.UIMM,16.VB,21.842:VX:av:vcfsx %VD, %VB, %UIMM:Vector Convert From Signed Fixed-Point Word
+	int i;
+	unsigned32 f;
+	sim_fpu b, div, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
+	  sim_fpu_div (&d, &b, &div);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.UIMM,16.VB,21.778:VX:av:vcfux %VD, %VA, %UIMM:Vector Convert From Unsigned Fixed-Point Word
+	int i;
+	unsigned32 f;
+	sim_fpu b, d, div;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
+	  sim_fpu_div (&d, &b, &div);
+	  sim_fpu_to32u (&f, &d, sim_fpu_round_default);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+#
+# Vector Compare instructions, 6-51 ... 6-64
+#
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.966:VXR:av:vcmpbpfpx %VD, %VA, %VB:Vector Compare Bounds Floating Point
+	int i, le, ge;
+	sim_fpu a, b, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  le = sim_fpu_is_le(&a, &b);
+	  ge = sim_fpu_is_ge(&a, &b);
+	  (*vS).w[i] = (le ? 0 : 1 << 31) | (ge ? 0 : 1 << 30);
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 0);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.198:VXR:av:vcmpeqfpx %VD, %VA, %VB:Vector Compare Equal-to-Floating Point
+	int i;
+	sim_fpu a, b;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  if (sim_fpu_is_eq(&a, &b))
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.6:VXR:av:vcmpequbx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Byte
+	int i;
+	for (i = 0; i < 16; i++)
+	  if ((*vA).b[i] == (*vB).b[i])
+	    (*vS).b[i] = 0xff;
+	  else
+	    (*vS).b[i] = 0;
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.70:VXR:av:vcmpequhx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Half Word
+	int i;
+	for (i = 0; i < 8; i++)
+	  if ((*vA).h[i] == (*vB).h[i])
+	    (*vS).h[i] = 0xffff;
+	  else
+	    (*vS).h[i] = 0;
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.134:VXR:av:vcmpequwx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Word
+	int i;
+	for (i = 0; i < 4; i++)
+	  if ((*vA).w[i] == (*vB).w[i])
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.454:VXR:av:vcmpgefpx %VD, %VA, %VB:Vector Compare Greater-Than-or-Equal-to Floating Point
+	int i;
+	sim_fpu a, b;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  if (sim_fpu_is_ge(&a, &b))
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.710:VXR:av:vcmpgtfpx %VD, %VA, %VB:Vector Compare Greater-Than Floating Point
+	int i;
+	sim_fpu a, b;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  if (sim_fpu_is_gt(&a, &b))
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.774:VXR:av:vcmpgtsbx %VD, %VA, %VB:Vector Compare Greater-Than Signed Byte
+	int i;
+	signed8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a > b)
+	    (*vS).b[i] = 0xff;
+	  else
+	    (*vS).b[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.838:VXR:av:vcmpgtshx %VD, %VA, %VB:Vector Compare Greater-Than Signed Half Word
+	int i;
+	signed16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a > b)
+	    (*vS).h[i] = 0xffff;
+	  else
+	    (*vS).h[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.902:VXR:av:vcmpgtswx %VD, %VA, %VB:Vector Compare Greater-Than Signed Word
+	int i;
+	signed32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a > b)
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.518:VXR:av:vcmpgtubx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Byte
+	int i;
+	unsigned8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a > b)
+	    (*vS).b[i] = 0xff;
+	  else
+	    (*vS).b[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.582:VXR:av:vcmpgtuhx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Half Word
+	int i;
+	unsigned16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a > b)
+	    (*vS).h[i] = 0xffff;
+	  else
+	    (*vS).h[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+0.4,6.VS,11.VA,16.VB,21.RC,22.646:VXR:av:vcmpgtuwx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Word
+	int i;
+	unsigned32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a > b)
+	    (*vS).w[i] = 0xffffffff;
+	  else
+	    (*vS).w[i] = 0;
+	}
+	if (RC)
+	  ALTIVEC_SET_CR6(vS, 1);
+	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+
+#
+# Vector Convert instructions, 6-65, 6-66.
+#
+
+0.4,6.VS,11.UIMM,16.VB,21.970:VX:av:vctsxs %VD, %VB, %UIMM:Vector Convert to Signed Fixed-Point Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	sim_fpu a, b, m;
+	sat = 0;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
+	  sim_fpu_mul (&a, &b, &m);
+	  sim_fpu_to64i (&temp, &a, sim_fpu_round_default);
+	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.UIMM,16.VB,21.906:VX:av:vctuxs %VD, %VB, %UIMM:Vector Convert to Unsigned Fixed-Point Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	sim_fpu a, b, m;
+	sat = 0;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
+	  sim_fpu_mul (&a, &b, &m);
+	  sim_fpu_to64u (&temp, &a, sim_fpu_round_default);
+	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+
+#
+# Vector Estimate instructions, 6-67 ... 6-70.
+#
+
+0.4,6.VS,11.0,16.VB,21.394:VX:av:vexptefp %VD, %VB:Vector 2 Raised to the Exponent Estimate Floating Point
+	int i;
+	unsigned32 f;
+	signed32 bi;
+	sim_fpu b, d;
+	for (i = 0; i < 4; i++) {
+	  /*HACK!*/
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_to32i (&bi, &b, sim_fpu_round_default);
+	  bi = 2 ^ bi;
+	  sim_fpu_32to (&d, bi);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.458:VX:av:vlogefp %VD, %VB:Vector Log2 Estimate Floating Point
+	int i;
+	unsigned32 c, u, f;
+	sim_fpu b, cfpu, d;
+	for (i = 0; i < 4; i++) {
+	  /*HACK!*/
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_to32u (&u, &b, sim_fpu_round_default);
+	  for (c = 0; (u /= 2) > 1; c++)
+	    ;
+	  sim_fpu_32to (&cfpu, c);
+	  sim_fpu_add (&d, &b, &cfpu);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+
+#
+# Vector Multiply Add instruction, 6-71
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.46:VAX:av:vmaddfp %VD, %VA, %VB, %VC:Vector Multiply Add Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, c, d, e;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_32to (&c, (*vC).w[i]);
+	  sim_fpu_mul (&e, &a, &c);
+	  sim_fpu_add (&d, &e, &b);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Maximum instructions, 6-72 ... 6-78.
+#
+
+0.4,6.VS,11.VA,16.VB,21.1034:VX:av:vmaxfp %VD, %VA, %VB:Vector Maximum Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_max (&d, &a, &b);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.258:VX:av:vmaxsb %VD, %VA, %VB:Vector Maximum Signed Byte
+	int i;
+	signed8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a > b)
+	    (*vS).b[i] = a;
+	  else
+	    (*vS).b[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.322:VX:av:vmaxsh %VD, %VA, %VB:Vector Maximum Signed Half Word
+	int i;
+	signed16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a > b)
+	    (*vS).h[i] = a;
+	  else
+	    (*vS).h[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.386:VX:av:vmaxsw %VD, %VA, %VB:Vector Maximum Signed Word
+	int i;
+	signed32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a > b)
+	    (*vS).w[i] = a;
+	  else
+	    (*vS).w[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.2:VX:av:vmaxub %VD, %VA, %VB:Vector Maximum Unsigned Byte
+	int i;
+	unsigned8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a > b)
+	    (*vS).b[i] = a;
+	  else
+	    (*vS).b[i] = b;
+	};
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.66:VX:av:vmaxus %VD, %VA, %VB:Vector Maximum Unsigned Half Word
+	int i;
+	unsigned16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a > b)
+	    (*vS).h[i] = a;
+	  else
+	    (*vS).h[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.130:VX:av:vmaxuw %VD, %VA, %VB:Vector Maximum Unsigned Word
+	int i;
+	unsigned32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a > b)
+	    (*vS).w[i] = a;
+	  else
+	    (*vS).w[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Multiple High instructions, 6-79, 6-80.
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.32:VAX:av:vmhaddshs %VD, %VA, %VB, %VC:Vector Multiple High and Add Signed Half Word Saturate
+	int i, sat, tempsat;
+	signed16 a, b;
+	signed32 prod, temp, c;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  c = (signed32)(signed16)(*vC).h[i];
+	  prod = (signed32)a * (signed32)b;
+	  temp = (prod >> 15) + c;
+	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.33:VAX:av:vmhraddshs %VD, %VA, %VB, %VC:Vector Multiple High Round and Add Signed Half Word Saturate
+	int i, sat, tempsat;
+	signed16 a, b;
+	signed32 prod, temp, c;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  c = (signed32)(signed16)(*vC).h[i];
+	  prod = (signed32)a * (signed32)b;
+	  prod += 0x4000;
+	  temp = (prod >> 15) + c;
+	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Minimum instructions, 6-81 ... 6-87
+#
+
+0.4,6.VS,11.VA,16.VB,21.1098:VX:av:vminfp %VD, %VA, %VB:Vector Minimum Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_min (&d, &a, &b);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.770:VX:av:vminsb %VD, %VA, %VB:Vector Minimum Signed Byte
+	int i;
+	signed8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a < b)
+	    (*vS).b[i] = a;
+	  else
+	    (*vS).b[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.834:VX:av:vminsh %VD, %VA, %VB:Vector Minimum Signed Half Word
+	int i;
+	signed16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a < b)
+	    (*vS).h[i] = a;
+	  else
+	    (*vS).h[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.898:VX:av:vminsw %VD, %VA, %VB:Vector Minimum Signed Word
+	int i;
+	signed32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a < b)
+	    (*vS).w[i] = a;
+	  else
+	    (*vS).w[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.514:VX:av:vminub %VD, %VA, %VB:Vector Minimum Unsigned Byte
+	int i;
+	unsigned8 a, b;
+	for (i = 0; i < 16; i++) {
+	  a = (*vA).b[i];
+	  b = (*vB).b[i];
+	  if (a < b)
+	    (*vS).b[i] = a;
+	  else
+	    (*vS).b[i] = b;
+	};
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.578:VX:av:vminuh %VD, %VA, %VB:Vector Minimum Unsigned Half Word
+	int i;
+	unsigned16 a, b;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  if (a < b)
+	    (*vS).h[i] = a;
+	  else
+	    (*vS).h[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.642:VX:av:vminuw %VD, %VA, %VB:Vector Minimum Unsigned Word
+	int i;
+	unsigned32 a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).w[i];
+	  b = (*vB).w[i];
+	  if (a < b)
+	    (*vS).w[i] = a;
+	  else
+	    (*vS).w[i] = b;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Multiply Low instruction, 6-88
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.34:VAX:av:vmladduhm %VD, %VA, %VB, %VC:Vector Multiply Low and Add Unsigned Half Word Modulo
+	int i;
+	unsigned16 a, b, c;
+	unsigned32 prod;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).h[i];
+	  b = (*vB).h[i];
+	  c = (*vC).h[i];
+	  prod = (unsigned32)a * (unsigned32)b;
+	  (*vS).h[i] = (prod + c) & 0xffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Merge instructions, 6-89 ... 6-94
+#
+
+0.4,6.VS,11.VA,16.VB,21.12:VX:av:vmrghb %VD, %VA, %VB:Vector Merge High Byte
+	int i;
+	for (i = 0; i < 16; i += 2) {
+	  (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i/2)];
+	  (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX(i/2)]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.76:VX:av:vmrghh %VD, %VA, %VB:Vector Merge High Half Word
+	int i;
+	for (i = 0; i < 8; i += 2) {
+	  (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX(i/2)];
+	  (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX(i/2)]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.140:VX:av:vmrghw %VD, %VA, %VB:Vector Merge High Word
+	int i;
+	for (i = 0; i < 4; i += 2) {
+	  (*vS).w[i] = (*vA).w[i/2];
+	  (*vS).w[i+1] = (*vB).w[i/2]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.268:VX:av:vmrglb %VD, %VA, %VB:Vector Merge Low Byte
+	int i;
+	for (i = 0; i < 16; i += 2) {
+	  (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX((i/2) + 8)];
+	  (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX((i/2) + 8)]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.332:VX:av:vmrglh %VD, %VA, %VB:Vector Merge Low Half Word
+	int i;
+	for (i = 0; i < 8; i += 2) {
+	  (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX((i/2) + 4)];
+	  (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX((i/2) + 4)]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.396:VX:av:vmrglw %VD, %VA, %VB:Vector Merge Low Word
+	int i;
+	for (i = 0; i < 4; i += 2) {
+	  (*vS).w[i] = (*vA).w[(i/2) + 2];
+	  (*vS).w[i+1] = (*vB).w[(i/2) + 2]; 
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Multiply Sum instructions, 6-95 ... 6-100
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.37:VAX:av:vmsummbm %VD, %VA, %VB, %VC:Vector Multiply Sum Mixed-Sign Byte Modulo
+	int i, j;
+	signed32 temp;
+	signed16 prod, a;
+	unsigned16 b;
+	for (i = 0; i < 4; i++) {
+	  temp = (*vC).w[i];
+	  for (j = 0; j < 4; j++) {
+	    a = (signed16)(signed8)(*vA).b[i*4+j]; 
+	    b = (*vB).b[i*4+j];
+	    prod = a * b;
+	    temp += (signed32)prod;
+	  }
+	  (*vS).w[i] = temp;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.40:VAX:av:vmsumshm %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Modulo
+	int i, j;
+	signed32 temp, prod, a, b;
+	for (i = 0; i < 4; i++) {
+	  temp = (*vC).w[i];
+	  for (j = 0; j < 2; j++) {
+	    a = (signed32)(signed16)(*vA).h[i*2+j]; 
+	    b = (signed32)(signed16)(*vB).h[i*2+j];
+	    prod = a * b;
+	    temp += prod;
+	  }
+	  (*vS).w[i] = temp;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.41:VAX:av:vmsumshs %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Saturate
+	int i, j, sat, tempsat;
+	signed64 temp;
+	signed32 prod, a, b;
+	sat = 0;
+	for (i = 0; i < 4; i++) {
+	  temp = (signed64)(signed32)(*vC).w[i];
+	  for (j = 0; j < 2; j++) {
+	    a = (signed32)(signed16)(*vA).h[i*2+j]; 
+	    b = (signed32)(signed16)(*vB).h[i*2+j];
+	    prod = a * b;
+	    temp += (signed64)prod;
+	  }
+	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.36:VAX:av:vmsumubm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Byte Modulo
+	int i, j;
+	unsigned32 temp;
+	unsigned16 prod, a, b;
+	for (i = 0; i < 4; i++) {
+	  temp = (*vC).w[i];
+	  for (j = 0; j < 4; j++) {
+	    a = (*vA).b[i*4+j]; 
+	    b = (*vB).b[i*4+j];
+	    prod = a * b;
+	    temp += prod;
+	  }
+	  (*vS).w[i] = temp;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.38:VAX:av:vmsumuhm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Modulo
+	int i, j;
+	unsigned32 temp, prod, a, b;
+	for (i = 0; i < 4; i++) {
+	  temp = (*vC).w[i];
+	  for (j = 0; j < 2; j++) {
+	    a = (*vA).h[i*2+j]; 
+	    b = (*vB).h[i*2+j];
+	    prod = a * b;
+	    temp += prod;
+	  }
+	  (*vS).w[i] = temp;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.39:VAX:av:vmsumuhs %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Saturate
+	int i, j, sat, tempsat;
+	unsigned32 temp, prod, a, b;
+	sat = 0;
+	for (i = 0; i < 4; i++) {
+	  temp = (*vC).w[i];
+	  for (j = 0; j < 2; j++) {
+	    a = (*vA).h[i*2+j]; 
+	    b = (*vB).h[i*2+j];
+	    prod = a * b;
+	    temp += prod;
+	  }
+	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Multiply Even/Odd instructions, 6-101 ... 6-108
+#
+
+0.4,6.VS,11.VA,16.VB,21.776:VX:av:vmulesb %VD, %VA, %VB:Vector Multiply Even Signed Byte
+	int i;
+	signed8 a, b;
+	signed16 prod;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).b[AV_BINDEX(i*2)]; 
+	  b = (*vB).b[AV_BINDEX(i*2)];
+	  prod = a * b;
+	  (*vS).h[AV_HINDEX(i)] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.840:VX:av:vmulesh %VD, %VA, %VB:Vector Multiply Even Signed Half Word
+	int i;
+	signed16 a, b;
+	signed32 prod;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).h[AV_HINDEX(i*2)]; 
+	  b = (*vB).h[AV_HINDEX(i*2)];
+	  prod = a * b;
+	  (*vS).w[i] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.520:VX:av:vmuleub %VD, %VA, %VB:Vector Multiply Even Unsigned Byte
+	int i;
+	unsigned8 a, b;
+	unsigned16 prod;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).b[AV_BINDEX(i*2)]; 
+	  b = (*vB).b[AV_BINDEX(i*2)];
+	  prod = a * b;
+	  (*vS).h[AV_HINDEX(i)] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.584:VX:av:vmuleuh %VD, %VA, %VB:Vector Multiply Even Unsigned Half Word
+	int i;
+	unsigned16 a, b;
+	unsigned32 prod;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).h[AV_HINDEX(i*2)]; 
+	  b = (*vB).h[AV_HINDEX(i*2)];
+	  prod = a * b;
+	  (*vS).w[i] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.264:VX:av:vmulosb %VD, %VA, %VB:Vector Multiply Odd Signed Byte
+	int i;
+	signed8 a, b;
+	signed16 prod;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).b[AV_BINDEX((i*2)+1)]; 
+	  b = (*vB).b[AV_BINDEX((i*2)+1)];
+	  prod = a * b;
+	  (*vS).h[AV_HINDEX(i)] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.328:VX:av:vmulosh %VD, %VA, %VB:Vector Multiply Odd Signed Half Word
+	int i;
+	signed16 a, b;
+	signed32 prod;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).h[AV_HINDEX((i*2)+1)]; 
+	  b = (*vB).h[AV_HINDEX((i*2)+1)];
+	  prod = a * b;
+	  (*vS).w[i] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.8:VX:av:vmuloub %VD, %VA, %VB:Vector Multiply Odd Unsigned Byte
+	int i;
+	unsigned8 a, b;
+	unsigned16 prod;
+	for (i = 0; i < 8; i++) {
+	  a = (*vA).b[AV_BINDEX((i*2)+1)]; 
+	  b = (*vB).b[AV_BINDEX((i*2)+1)];
+	  prod = a * b;
+	  (*vS).h[AV_HINDEX(i)] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.72:VX:av:vmulouh %VD, %VA, %VB:Vector Multiply Odd Unsigned Half Word
+	int i;
+	unsigned16 a, b;
+	unsigned32 prod;
+	for (i = 0; i < 4; i++) {
+	  a = (*vA).h[AV_HINDEX((i*2)+1)]; 
+	  b = (*vB).h[AV_HINDEX((i*2)+1)];
+	  prod = a * b;
+	  (*vS).w[i] = prod;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Negative Multiply-Subtract instruction, 6-109
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.47:VX:av:vnmsubfp %VD, %VA, %VB, %VC:Vector Negative Multiply-Subtract Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, c, d, i1, i2;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_32to (&c, (*vC).w[i]);
+	  sim_fpu_mul (&i1, &a, &c);
+	  sim_fpu_sub (&i2, &i1, &b);
+	  sim_fpu_neg (&d, &i2);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Logical OR instructions, 6-110, 6-111, 6-177
+#
+
+0.4,6.VS,11.VA,16.VB,21.1284:VX:av:vnor %VD, %VA, %VB:Vector Logical NOR
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = ~((*vA).w[i] | (*vB).w[i]);
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1156:VX:av:vor %VD, %VA, %VB:Vector Logical OR
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] | (*vB).w[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1220:VX:av:vxor %VD, %VA, %VB:Vector Logical XOR
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] ^ (*vB).w[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Permute instruction, 6-112
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.43:VX:av:vperm %VD, %VA, %VB, %VC:Vector Permute
+	int i, who;
+	for (i = 0; i < 16; i++) {
+	  who = (*vC).b[AV_BINDEX(i)] & 0x1f;
+	  if (who & 0x10)
+	    (*vS).b[AV_BINDEX(i)] = (*vB).b[AV_BINDEX(who & 0xf)];
+	  else
+	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(who & 0xf)];
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+
+#
+# Vector Pack instructions, 6-113 ... 6-121
+#
+
+0.4,6.VS,11.VA,16.VB,21.782:VX:av:vpkpx %VD, %VA, %VB:Vector Pack Pixel32
+	int i;
+	for (i = 0; i < 4; i++) {
+	  (*vS).h[AV_HINDEX(i+4)] = ((((*vB).w[i]) >> 9) & 0xfc00)
+	               | ((((*vB).w[i]) >> 6) & 0x03e0)
+	               | ((((*vB).w[i]) >> 3) & 0x001f);
+	  (*vS).h[AV_HINDEX(i)] = ((((*vA).w[i]) >> 9) & 0xfc00)
+	             | ((((*vA).w[i]) >> 6) & 0x03e0)
+	             | ((((*vA).w[i]) >> 3) & 0x001f);
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.398:VX:av:vpkshss %VD, %VA, %VB:Vector Pack Signed Half Word Signed Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  if (i < 8)
+	    temp = (*vA).h[AV_HINDEX(i)];
+	  else
+	    temp = (*vB).h[AV_HINDEX(i-8)];
+	  (*vS).b[AV_BINDEX(i)] = altivec_signed_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.270:VX:av:vpkshus %VD, %VA, %VB:Vector Pack Signed Half Word Unsigned Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  if (i < 8)
+	    temp = (*vA).h[AV_HINDEX(i)];
+	  else
+	    temp = (*vB).h[AV_HINDEX(i-8)];
+	  (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.462:VX:av:vpkswss %VD, %VA, %VB:Vector Pack Signed Word Signed Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	sat = 0;
+	for (i = 0; i < 8; i++) {
+	  if (i < 4)
+	    temp = (*vA).w[i];
+	  else
+	    temp = (*vB).w[i-4];
+	  (*vS).h[AV_HINDEX(i)] = altivec_signed_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.334:VX:av:vpkswus %VD, %VA, %VB:Vector Pack Signed Word Unsigned Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	sat = 0;
+	for (i = 0; i < 8; i++) {
+	  if (i < 4)
+	    temp = (*vA).w[i];
+	  else
+	    temp = (*vB).w[i-4];
+	  (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.14:VX:av:vpkuhum %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Modulo
+	int i;
+	for (i = 0; i < 16; i++)
+	  if (i < 8)
+	    (*vS).b[AV_BINDEX(i)] = (*vA).h[AV_HINDEX(i)];
+	  else
+	    (*vS).b[AV_BINDEX(i)] = (*vB).h[AV_HINDEX(i-8)];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.142:VX:av:vpkuhus %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  if (i < 8)
+	    temp = (*vA).h[AV_HINDEX(i)];
+	  else
+	    temp = (*vB).h[AV_HINDEX(i-8)];
+	  /* force positive in signed16, ok as we'll toss the bit away anyway */
+	  temp &= ~0x8000;
+	  (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.78:VX:av:vpkuwum %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Modulo
+	int i;
+	for (i = 0; i < 8; i++)
+	  if (i < 8)
+	    (*vS).h[AV_HINDEX(i)] = (*vA).w[i];
+	  else
+	    (*vS).h[AV_HINDEX(i)] = (*vB).w[i-8];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.206:VX:av:vpkuwus %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	sat = 0;
+	for (i = 0; i < 8; i++) {
+	  if (i < 4)
+	    temp = (*vA).w[i];
+	  else
+	    temp = (*vB).w[i-4];
+	  /* force positive in signed32, ok as we'll toss the bit away anyway */
+	  temp &= ~0x80000000;
+	  (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Reciprocal instructions, 6-122, 6-123, 6-131
+#
+
+0.4,6.VS,11.0,16.VB,21.266:VX:av:vrefp %VD, %VB:Vector Reciprocal Estimate Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu op, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_div (&d, &sim_fpu_one, &op);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.330:VX:av:vrsqrtefp %VD, %VB:Vector Reciprocal Square Root Estimate Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu op, i1, one, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_sqrt (&i1, &op);
+	  sim_fpu_div (&d, &sim_fpu_one, &i1);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+
+#
+# Vector Round instructions, 6-124 ... 6-127
+#
+
+0.4,6.VS,11.0,16.VB,21.714:VX:av:vrfim %VD, %VB:Vector Round to Floating-Point Integer towards Minus Infinity
+	int i;
+	unsigned32 f;
+	sim_fpu op;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_round_32(&op, sim_fpu_round_down, sim_fpu_denorm_default);
+	  sim_fpu_to32 (&f, &op);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.522:VX:av:vrfin %VD, %VB:Vector Round to Floating-Point Integer Nearest
+	int i;
+	unsigned32 f;
+	sim_fpu op;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_round_32(&op, sim_fpu_round_near, sim_fpu_denorm_default);
+	  sim_fpu_to32 (&f, &op);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.650:VX:av:vrfip %VD, %VB:Vector Round to Floating-Point Integer towards Plus Infinity
+	int i;
+	unsigned32 f;
+	sim_fpu op;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_round_32(&op, sim_fpu_round_up, sim_fpu_denorm_default);
+	  sim_fpu_to32 (&f, &op);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.586:VX:av:vrfiz %VD, %VB:Vector Round to Floating-Point Integer towards Zero
+	int i;
+	unsigned32 f;
+	sim_fpu op;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&op, (*vB).w[i]);
+	  sim_fpu_round_32(&op, sim_fpu_round_zero, sim_fpu_denorm_default);
+	  sim_fpu_to32 (&f, &op);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+
+#
+# Vector Rotate Left instructions, 6-128 ... 6-130
+#
+
+0.4,6.VS,11.VA,16.VB,21.4:VX:av:vrlb %VD, %VA, %VB:Vector Rotate Left Integer Byte
+	int i;
+	unsigned16 temp;
+	for (i = 0; i < 16; i++) {
+	  temp = (unsigned16)(*vA).b[i] << (((*vB).b[i]) & 7);
+	  (*vS).b[i] = (temp & 0xff) | ((temp >> 8) & 0xff);
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.68:VX:av:vrlh %VD, %VA, %VB:Vector Rotate Left Integer Half Word
+	int i;
+	unsigned32 temp;
+	for (i = 0; i < 8; i++) {
+	  temp = (unsigned32)(*vA).h[i] << (((*vB).h[i]) & 0xf);
+	  (*vS).h[i] = (temp & 0xffff) | ((temp >> 16) & 0xffff);
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.132:VX:av:vrlw %VD, %VA, %VB:Vector Rotate Left Integer Word
+	int i;
+	unsigned64 temp;
+	for (i = 0; i < 4; i++) {
+	  temp = (unsigned64)(*vA).w[i] << (((*vB).w[i]) & 0x1f);
+	  (*vS).w[i] = (temp & 0xffffffff) | ((temp >> 32) & 0xffffffff);
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Conditional Select instruction, 6-133
+#
+
+0.4,6.VS,11.VA,16.VB,21.VC,26.42:VAX:av:vsel %VD, %VA, %VB, %VC:Vector Conditional Select
+	int i;
+	unsigned32 c;
+	for (i = 0; i < 4; i++) {
+	  c = (*vC).w[i];
+	  (*vS).w[i] = ((*vB).w[i] & c) | ((*vA).w[i] & ~c);
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+
+#
+# Vector Shift Left instructions, 6-134 ... 6-139
+#
+
+0.4,6.VS,11.VA,16.VB,21.452:VX:av:vsl %VD, %VA, %VB:Vector Shift Left
+	int sh, i, j, carry, new_carry;
+	sh = (*vB).b[0] & 7;	/* don't bother checking everything */
+	carry = 0;
+	for (j = 3; j >= 0; j--) {
+	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	    i = j;
+	  else
+	    i = (j + 2) % 4;
+	  new_carry = (*vA).w[i] >> (32 - sh);
+	  (*vS).w[i] = ((*vA).w[i] << sh) | carry;
+	  carry = new_carry;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.260:VX:av:vslb %VD, %VA, %VB:Vector Shift Left Integer Byte
+	int i, sh;
+	for (i = 0; i < 16; i++) {
+	  sh = ((*vB).b[i]) & 7;
+	  (*vS).b[i] = (*vA).b[i] << sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.0,22.SH,26.44:VX:av:vsldol %VD, %VA, %VB:Vector Shift Left Double by Octet Immediate
+	int i, j;
+	for (j = 0, i = SH; i < 16; i++)
+	  (*vS).b[j++] = (*vA).b[i];
+	for (i = 0; i < SH; i++)
+	  (*vS).b[j++] = (*vB).b[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.324:VX:av:vslh %VD, %VA, %VB:Vector Shift Left Half Word
+	int i, sh;
+	for (i = 0; i < 8; i++) {
+	  sh = ((*vB).h[i]) & 0xf;
+	  (*vS).h[i] = (*vA).h[i] << sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1036:VX:av:vslo %VD, %VA, %VB:Vector Shift Left by Octet
+	int i, sh;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	  sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
+	else
+	  sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
+	for (i = 0; i < 16; i++) {
+	  if (15 - i > sh)
+	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i + sh)];
+	  else
+	    (*vS).b[AV_BINDEX(i)] = 0;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.388:VX:av:vslw %VD, %VA, %VB:Vector Shift Left Integer Word
+	int i, sh;
+	for (i = 0; i < 4; i++) {
+	  sh = ((*vB).w[i]) & 0x1f;
+	  (*vS).w[i] = (*vA).w[i] << sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Splat instructions, 6-140 ... 6-145
+#
+
+0.4,6.VS,11.UIMM,16.VB,21.524:VX:av:vspltb %VD, %VB, %UIMM:Vector Splat Byte
+	int i;
+	unsigned8 b;
+	b = (*vB).b[AV_BINDEX(UIMM & 0xf)];
+	for (i = 0; i < 16; i++)
+	  (*vS).b[i] = b;
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.UIMM,16.VB,21.588:VX:av:vsplth %VD, %VB, %UIMM:Vector Splat Half Word
+	int i;
+	unsigned16 h;
+	h = (*vB).h[AV_HINDEX(UIMM & 0x7)];
+	for (i = 0; i < 8; i++)
+	  (*vS).h[i] = h;
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.SIMM,16.0,21.780:VX:av:vspltisb %VD, %SIMM:Vector Splat Immediate Signed Byte
+	int i;
+	signed8 b = SIMM;
+	/* manual 5-bit signed extension */
+	if (b & 0x10)
+	  b -= 0x20;
+	for (i = 0; i < 16; i++)
+	  (*vS).b[i] = b;
+	PPC_INSN_VR(VS_BITMASK, 0);
+
+0.4,6.VS,11.SIMM,16.0,21.844:VX:av:vspltish %VD, %SIMM:Vector Splat Immediate Signed Half Word
+	int i;
+	signed16 h = SIMM;
+	/* manual 5-bit signed extension */
+	if (h & 0x10)
+	  h -= 0x20;
+	for (i = 0; i < 8; i++)
+	  (*vS).h[i] = h;
+	PPC_INSN_VR(VS_BITMASK, 0);
+
+0.4,6.VS,11.SIMM,16.0,21.908:VX:av:vspltisw %VD, %SIMM:Vector Splat Immediate Signed Word
+	int i;
+	signed32 w = SIMM;
+	/* manual 5-bit signed extension */
+	if (w & 0x10)
+	  w -= 0x20;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = w;
+	PPC_INSN_VR(VS_BITMASK, 0);
+
+0.4,6.VS,11.UIMM,16.VB,21.652:VX:av:vspltw %VD, %VB, %UIMM:Vector Splat Word
+	int i;
+	unsigned32 w;
+	w = (*vB).w[UIMM & 0x3];
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = w;
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+
+#
+# Vector Shift Right instructions, 6-146 ... 6-154
+#
+
+0.4,6.VS,11.VA,16.VB,21.708:VX:av:vsr %VD, %VA, %VB:Vector Shift Right
+	int sh, i, j, carry, new_carry;
+	sh = (*vB).b[0] & 7;	/* don't bother checking everything */
+	carry = 0;
+	for (j = 0; j < 4; j++) {
+	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	    i = j;
+	  else
+	    i = (j + 2) % 4;
+	  new_carry = (*vA).w[i] << (32 - sh);
+	  (*vS).w[i] = ((*vA).w[i] >> sh) | carry;
+	  carry = new_carry;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.772:VX:av:vsrab %VD, %VA, %VB:Vector Shift Right Algebraic Byte
+	int i, sh;
+	signed16 a;
+	for (i = 0; i < 16; i++) {
+	  sh = ((*vB).b[i]) & 7;
+	  a = (signed16)(signed8)(*vA).b[i];
+	  (*vS).b[i] = (a >> sh) & 0xff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.836:VX:av:vsrah %VD, %VA, %VB:Vector Shift Right Algebraic Half Word
+	int i, sh;
+	signed32 a;
+	for (i = 0; i < 8; i++) {
+	  sh = ((*vB).h[i]) & 0xf;
+	  a = (signed32)(signed16)(*vA).h[i];
+	  (*vS).h[i] = (a >> sh) & 0xffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.900:VX:av:vsraw %VD, %VA, %VB:Vector Shift Right Algebraic Word
+	int i, sh;
+	signed64 a;
+	for (i = 0; i < 4; i++) {
+	  sh = ((*vB).w[i]) & 0xf;
+	  a = (signed64)(signed32)(*vA).w[i];
+	  (*vS).w[i] = (a >> sh) & 0xffffffff;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.516:VX:av:vsrb %VD, %VA, %VB:Vector Shift Right Byte
+	int i, sh;
+	for (i = 0; i < 16; i++) {
+	  sh = ((*vB).b[i]) & 7;
+	  (*vS).b[i] = (*vA).b[i] >> sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.580:VX:av:vsrh %VD, %VA, %VB:Vector Shift Right Half Word
+	int i, sh;
+	for (i = 0; i < 8; i++) {
+	  sh = ((*vB).h[i]) & 0xf;
+	  (*vS).h[i] = (*vA).h[i] >> sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1100:VX:av:vsro %VD, %VA, %VB:Vector Shift Right Octet
+	int i, sh;
+	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+	  sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
+	else
+	  sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
+	for (i = 0; i < 16; i++) {
+	  if (i < sh)
+	    (*vS).b[AV_BINDEX(i)] = 0;
+	  else
+	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i - sh)];
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.644:VX:av:vsrw %VD, %VA, %VB:Vector Shift Right Word
+	int i, sh;
+	for (i = 0; i < 4; i++) {
+	  sh = ((*vB).w[i]) & 0x1f;
+	  (*vS).w[i] = (*vA).w[i] >> sh;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Subtract instructions, 6-155 ... 6-165
+#
+
+0.4,6.VS,11.VA,16.VB,21.1408:VX:av:vsubcuw %VD, %VA, %VB:Vector Subtract Carryout Unsigned Word
+	int i;
+	signed64 temp, a, b;
+	for (i = 0; i < 4; i++) {
+	  a = (signed64)(unsigned32)(*vA).w[i];
+	  b = (signed64)(unsigned32)(*vB).w[i];
+	  temp = a - b;
+	  (*vS).w[i] = ~(temp >> 32) & 1;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.74:VX:av:vsubfp %VD, %VA, %VB:Vector Subtract Floating Point
+	int i;
+	unsigned32 f;
+	sim_fpu a, b, d;
+	for (i = 0; i < 4; i++) {
+	  sim_fpu_32to (&a, (*vA).w[i]);
+	  sim_fpu_32to (&b, (*vB).w[i]);
+	  sim_fpu_sub (&d, &a, &b);
+	  sim_fpu_to32 (&f, &d);
+	  (*vS).w[i] = f;
+	}
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1792:VX:av:vsubsbs %VD, %VA, %VB:Vector Subtract Signed Byte Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  temp = (signed16)(signed8)(*vA).b[i] - (signed16)(signed8)(*vB).b[i];
+	  (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1856:VX:av:vsubshs %VD, %VA, %VB:Vector Subtract Signed Half Word Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	sat = 0;
+	for (i = 0; i < 8; i++) {
+	  temp = (signed32)(signed16)(*vA).h[i] - (signed32)(signed16)(*vB).h[i];
+	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1920:VX:av:vsubsws %VD, %VA, %VB:Vector Subtract Signed Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	sat = 0;
+	for (i = 0; i < 4; i++) {
+	  temp = (signed64)(signed32)(*vA).w[i] - (signed64)(signed32)(*vB).w[i];
+	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1024:VX:av:vsububm %VD, %VA, %VB:Vector Subtract Unsigned Byte Modulo
+	int i;
+	for (i = 0; i < 16; i++)
+	  (*vS).b[i] = (*vA).b[i] - (*vB).b[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1536:VX:av:vsububs %VD, %VA, %VB:Vector Subtract Unsigned Byte Saturate
+	int i, sat, tempsat;
+	signed16 temp;
+	sat = 0;
+	for (i = 0; i < 16; i++) {
+	  temp = (signed16)(unsigned8)(*vA).b[i] - (signed16)(unsigned8)(*vB).b[i];
+	  (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1088:VX:av:vsubuhm %VD, %VA, %VB:Vector Subtract Unsigned Half Word Modulo
+	int i;
+	for (i = 0; i < 8; i++)
+	  (*vS).h[i] = ((*vA).h[i] - (*vB).h[i]) & 0xffff;
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1600:VX:av:vsubuhs %VD, %VA, %VB:Vector Subtract Unsigned Half Word Saturate
+	int i, sat, tempsat;
+	signed32 temp;
+	for (i = 0; i < 8; i++) {
+	  temp = (signed32)(unsigned16)(*vA).h[i] - (signed32)(unsigned16)(*vB).h[i];
+	  (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1152:VX:av:vsubuwm %VD, %VA, %VB:Vector Subtract Unsigned Word Modulo
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (*vA).w[i] - (*vB).w[i];
+	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1664:VX:av:vsubuws %VD, %VA, %VB:Vector Subtract Unsigned Word Saturate
+	int i, sat, tempsat;
+	signed64 temp;
+	for (i = 0; i < 4; i++) {
+	  temp = (signed64)(unsigned32)(*vA).w[i] - (signed64)(unsigned32)(*vB).w[i];
+	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Sum instructions, 6-166 ... 6-170
+#
+
+0.4,6.VS,11.VA,16.VB,21.1928:VX:av:vsumsws %VD, %VA, %VB:Vector Sum Across Signed Word Saturate
+	int i, sat;
+	signed64 temp;
+	temp = (signed64)(signed32)(*vB).w[3];
+	for (i = 0; i < 4; i++)
+	  temp += (signed64)(signed32)(*vA).w[i];
+	(*vS).w[3] = altivec_signed_saturate_32(temp, &sat);
+	(*vS).w[0] = (*vS).w[1] = (*vS).w[2] = 0;
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1672:VX:av:vsum2sws %VD, %VA, %VB:Vector Sum Across Partial (1/2) Signed Word Saturate
+	int i, j, sat, tempsat;
+	signed64 temp;
+	for (j = 0; j < 4; j += 2) {
+	  temp = (signed64)(signed32)(*vB).w[j+1];
+	  temp += (signed64)(signed32)(*vA).w[j] + (signed64)(signed32)(*vA).w[j+1];
+	  (*vS).w[j+1] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	(*vS).w[0] = (*vS).w[2] = 0;
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1800:VX:av:vsum4sbs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Byte Saturate
+	int i, j, sat, tempsat;
+	signed64 temp;
+	for (j = 0; j < 4; j++) {
+	  temp = (signed64)(signed32)(*vB).w[j];
+	  for (i = 0; i < 4; i++)
+	    temp += (signed64)(signed8)(*vA).b[i+(j*4)];
+	  (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1608:VX:av:vsum4shs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Half Word Saturate
+	int i, j, sat, tempsat;
+	signed64 temp;
+	for (j = 0; j < 4; j++) {
+	  temp = (signed64)(signed32)(*vB).w[j];
+	  for (i = 0; i < 2; i++)
+	    temp += (signed64)(signed16)(*vA).h[i+(j*2)];
+	  (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+0.4,6.VS,11.VA,16.VB,21.1544:VX:av:vsum4ubs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Unsigned Byte Saturate
+	int i, j, sat, tempsat;
+	signed64 utemp;
+	signed64 temp;
+	for (j = 0; j < 4; j++) {
+	  utemp = (signed64)(unsigned32)(*vB).w[j];
+	  for (i = 0; i < 4; i++)
+	    utemp += (signed64)(unsigned16)(*vA).b[i+(j*4)];
+	  temp = utemp;
+	  (*vS).w[j] = altivec_unsigned_saturate_32(temp, &tempsat);
+	  sat |= tempsat;
+	}
+	ALTIVEC_SET_SAT(sat);
+	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+
+
+#
+# Vector Unpack instructions, 6-171 ... 6-176
+#
+
+0.4,6.VS,11.0,16.VB,21.846:VX:av:vupkhpx %VD, %VB:Vector Unpack High Pixel16
+	int i;
+	unsigned16 h;
+	for (i = 0; i < 4; i++) {
+	  h = (*vB).h[AV_HINDEX(i)];
+	  (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
+		     | ((h & 0x7c00) << 6)
+		     | ((h & 0x03e0) << 3)
+		     | ((h & 0x001f));
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.526:VX:av:vupkhsb %VD, %VB:Vector Unpack High Signed Byte
+	int i;
+	for (i = 0; i < 8; i++)
+	  (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i)];
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.590:VX:av:vupkhsh %VD, %VB:Vector Unpack High Signed Half Word
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i)];
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.974:VX:av:vupklpx %VD, %VB:Vector Unpack Low Pixel16
+	int i;
+	unsigned16 h;
+	for (i = 0; i < 4; i++) {
+	  h = (*vB).h[AV_HINDEX(i + 4)];
+	  (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
+		     | ((h & 0x7c00) << 6)
+		     | ((h & 0x03e0) << 3)
+		     | ((h & 0x001f));
+	}
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.654:VX:av:vupklsb %VD, %VB:Vector Unpack Low Signed Byte
+	int i;
+	for (i = 0; i < 8; i++)
+	  (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i + 8)];
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+
+0.4,6.VS,11.0,16.VB,21.718:VX:av:vupklsh %VD, %VB:Vector Unpack Low Signed Half Word
+	int i;
+	for (i = 0; i < 4; i++)
+	  (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i + 4)];
+	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
Index: altivec_expression.h
===================================================================
RCS file: altivec_expression.h
diff -N altivec_expression.h
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ altivec_expression.h	22 Jun 2003 16:23:46 -0000
@@ -0,0 +1,50 @@
+/* Altivec expression macros, for PSIM, the PowerPC simulator.
+
+   Copyright 2003 Free Software Foundation, Inc.
+
+   Contributed by Red Hat Inc; developed under contract from Motorola.
+   Written by matthew green <mrg@redhat.com>.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* AltiVec macro helpers.  */
+
+#define ALTIVEC_SET_CR6(vS, checkone) \
+do { \
+  if (checkone && ((*vS).w[0] == 0xffffffff && \
+		   (*vS).w[1] == 0xffffffff && \
+		   (*vS).w[2] == 0xffffffff && \
+		   (*vS).w[3] == 0xffffffff)) \
+    CR_SET(6, 1 << 3); \
+  else if ((*vS).w[0] == 0 && \
+           (*vS).w[1] == 0 && \
+           (*vS).w[2] == 0 && \
+           (*vS).w[3] == 0) \
+    CR_SET(6, 1 << 1); \
+  else \
+    CR_SET(6, 0); \
+} while (0)
+
+#define	VSCR_SAT	0x00000001
+#define	VSCR_NJ		0x00010000
+
+#define ALTIVEC_SET_SAT(sat) \
+do { \
+  if (sat) \
+    VSCR |= VSCR_SAT; \
+} while (0)
Index: altivec_registers.h
===================================================================
RCS file: altivec_registers.h
diff -N altivec_registers.h
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ altivec_registers.h	22 Jun 2003 16:23:46 -0000
@@ -0,0 +1,63 @@
+/* Altivec registers, for PSIM, the PowerPC simulator.
+
+   Copyright 2003 Free Software Foundation, Inc.
+
+   Contributed by Red Hat Inc; developed under contract from Motorola.
+   Written by matthew green <mrg@redhat.com>.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* Manage this as 4 32-bit entities, 8 16-bit entities or 16 8-bit
+   entities.  */
+typedef union
+{
+  unsigned8 b[16];
+  unsigned16 h[8];
+  unsigned32 w[4];
+} vreg;
+
+typedef unsigned32 vscreg;
+
+struct altivec_regs {
+  /* AltiVec Registers */
+  vreg vr[32];
+  vscreg vscr;
+};
+
+/* AltiVec registers */
+#define VR(N)		cpu_registers(processor)->altivec.vr[N]
+
+/* AltiVec vector status and control register */
+#define VSCR		cpu_registers(processor)->altivec.vscr
+
+/* AltiVec endian helpers, wrong endian hosts vs targets need to be
+   sure to get the right bytes/halfs/words when the order matters.
+   Note that many AltiVec instructions do not depend on byte order and
+   work on N independant bits of data.  This is only for the
+   instructions that actually move data around.  */
+
+#if (WITH_HOST_BYTE_ORDER == BIG_ENDIAN)
+#define AV_BINDEX(x)	((x) & 15)
+#define AV_HINDEX(x)	((x) & 7)
+#else
+static char endian_b2l_bindex[16] = { 3, 2, 1, 0, 7, 6, 5, 4,
+			     11, 10, 9, 8, 15, 14, 13, 12 };
+static char endian_b2l_hindex[16] = { 1, 0, 3, 2, 5, 4, 7, 6 };
+#define AV_BINDEX(x)	endian_b2l_bindex[(x) & 15]
+#define AV_HINDEX(x)	endian_b2l_hindex[(x) & 7]
+#endif
Index: configure.in
===================================================================
RCS file: /cvs/src/src/sim/ppc/configure.in,v
retrieving revision 1.3
diff -u -r1.3 configure.in
--- configure.in	16 Dec 2001 21:00:08 -0000	1.3
+++ configure.in	22 Jun 2003 16:24:04 -0000
@@ -190,15 +190,23 @@
 
 
 AC_ARG_ENABLE(sim-float,
-[  --enable-sim-float			Specify whether to use host floating point or simulate.],
+[  --enable-sim-float			Specify whether the target has hard, soft, altivec or e500 floating point.],
 [case "${enableval}" in
   yes | hard)	sim_float="-DWITH_FLOATING_POINT=HARD_FLOATING_POINT";;
   no | soft)	sim_float="-DWITH_FLOATING_POINT=SOFT_FLOATING_POINT";;
+  altivec)      sim_float="-DWITH_ALTIVEC" ; sim_filter="${sim_filter},av" ;;
+  *spe*|*simd*) sim_float="-DWITH_E500" ; sim_filter="${sim_filter},e500" ;;
   *)		AC_MSG_ERROR("Unknown value $enableval passed to --enable-sim-float"); sim_float="";;
 esac
 if test x"$silent" != x"yes" && test x"$sim_float" != x""; then
   echo "Setting float flags = $sim_float" 6>&1
-fi],[sim_float=""])dnl
+fi],[
+case "${target}" in
+  *altivec*) sim_float="-DWITH_ALTIVEC" ; sim_filter="${sim_filter},av" ;;
+  *spe*|*simd*)	sim_float="-DWITH_E500" ; sim_filter="${sim_filter},e500" ;;
+  *) sim_float=""
+esac
+])dnl
 
 
 AC_ARG_ENABLE(sim-hardware,
Index: e500.igen
===================================================================
RCS file: e500.igen
diff -N e500.igen
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ e500.igen	22 Jun 2003 16:24:08 -0000
@@ -0,0 +1,3348 @@
+# e500 core instructions, for PSIM, the PowerPC simulator.
+
+# Copyright 2003  Free Software Foundation, Inc.
+
+# Contributed by Red Hat Inc; developed under contract from Motorola.
+# Written by matthew green <mrg@redhat.com>.
+
+# This file is part of GDB.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with This program; see the file COPYING.  If not, write to
+# the Free Software Foundation, 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+
+#
+# e500 Core Complex Instructions
+#
+
+:cache:e500::signed_word *:rAh:RA:(cpu_registers(processor)->e500.gprh + RA)
+:cache:e500::signed_word *:rSh:RS:(cpu_registers(processor)->e500.gprh + RS)
+:cache:e500::signed_word *:rBh:RB:(cpu_registers(processor)->e500.gprh + RB)
+
+# Flags for model.h
+::model-macro:::
+	#define PPC_INSN_INT_SPR(OUT_MASK, IN_MASK, SPR) \
+		do { \
+		  if (CURRENT_MODEL_ISSUE > 0) \
+		    ppc_insn_int_spr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, SPR); \
+		} while (0)
+
+# Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
+void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned nSPR
+	const unsigned32 int_mask = out_mask | in_mask;
+	model_busy *busy_ptr;
+
+	while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
+	  if (WITH_TRACE && ppc_trace[trace_model])
+	    model_trace_busy_p(model_ptr, int_mask, 0, 0, nSPR);
+
+	  model_ptr->nr_stalls_data++;
+	  model_new_cycle(model_ptr);
+	}
+
+	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+	busy_ptr->int_busy |= out_mask;
+	model_ptr->int_busy |= out_mask;
+	busy_ptr->spr_busy = nSPR;
+	model_ptr->spr_busy[nSPR] = 1;
+	busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_mask)) ? 3 : 2;
+	TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
+
+#
+# SPE Modulo Fractional Multiplication handling support
+#
+:function:e500::unsigned64:ev_multiply16_smf:signed16 a, signed16 b, int *sat
+	signed32 a32 = a, b32 = b, rv32;
+	rv32 = a * b;
+	*sat = (rv32 & (3<<30)) == (3<<30);
+	return (signed64)rv32 << 1;
+
+:function:e500::unsigned64:ev_multiply32_smf:signed32 a, signed32 b, int *sat
+	signed64 rv64, a64 = a, b64 = b;
+	rv64 = a64 * b64;
+	*sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
+	/* Loses top sign bit.  */
+	return rv64 << 1;
+#
+# SPE Saturation handling support
+#
+:function:e500::signed32:ev_multiply16_ssf:signed16 a, signed16 b, int *sat
+	signed32 rv32;
+	if (a == 0xffff8000 && b == 0xffff8000)
+	  {
+	    rv32 = 0x7fffffffL;
+	    * sat = 1;
+	    return rv32;
+	  }
+	else
+	  {
+	    signed32 a32 = a, b32 = b;
+	    
+	    rv32 = a * b;
+	    * sat = (rv32 & (3<<30)) == (3<<30);
+	    return (signed64)rv32 << 1;
+	  }
+
+:function:e500::signed64:ev_multiply32_ssf:signed32 a, signed32 b, int *sat
+	signed64 rv64;
+	if (a == 0x80000000 && b == 0x80000000)
+	  {
+	    rv64 = 0x7fffffffffffffffLL;
+	    * sat = 1;
+	    return rv64;
+	  }
+	else
+	  {
+	    signed64 a64 = a, b64 = b;
+	    rv64 = a64 * b64;
+	    *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
+	    /* Loses top sign bit.  */
+	    return rv64 << 1;
+	  }
+
+#
+# SPE FP handling support
+#
+
+:function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
+	unsigned64 guard;
+	guard = sim_fpu_guard(a, 0);
+	if (guard & 1)
+	  EV_SET_SPEFSCR_BITS(fg);
+	if (guard & ~1)
+	  EV_SET_SPEFSCR_BITS(fx);
+
+:function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, unsigned32 packed
+	sim_fpu_32to (dst, packed);
+
+	/* Set normally unused fields to allow booke arithmetic.  */
+	if (dst->class == sim_fpu_class_infinity)
+	  {
+	    dst->normal_exp = 128;
+	    dst->fraction = ((unsigned64)1 << 60);
+	  }
+	else if (dst->class == sim_fpu_class_qnan
+		 || dst->class == sim_fpu_class_snan)
+	  {
+	    dst->normal_exp = 128;
+	    /* This is set, but without the implicit bit, so we have to or
+	       in the implicit bit.  */
+	    dst->fraction |= ((unsigned64)1 << 60);
+	  }
+
+:function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
+	int invalid_operand, overflow_result, underflow_result;
+	int dest_exp;
+
+	invalid_operand = 0;
+	overflow_result = 0;
+	underflow_result = 0;
+
+	/* Treat NaN, Inf, and denorm like normal numbers, and signal invalid
+	   operand if it hasn't already been done.  */
+	if (EV_IS_INFDENORMNAN (a))
+	  {
+	    a->class = sim_fpu_class_number;
+
+	    EV_SET_SPEFSCR_BITS (inv);
+	    invalid_operand = 1;
+	  }
+	if (EV_IS_INFDENORMNAN (b))
+	  {
+	    b->class = sim_fpu_class_number;
+
+	    if (! invalid_operand)
+	      {
+		EV_SET_SPEFSCR_BITS (inv);
+		invalid_operand = 1;
+	      }
+	  }
+
+	sim_fpu_add (d, a, b);
+
+	dest_exp = booke_sim_fpu_exp (d);
+	/* If this is a denorm, force to zero, and signal underflow if
+	   we haven't already indicated invalid operand.  */
+	if (dest_exp <= -127)
+	  {
+	    int sign = d->sign;
+
+	    *d = sim_fpu_zero;
+	    d->sign = sign;
+	    if (! invalid_operand)
+	      {
+		EV_SET_SPEFSCR_BITS (under);
+		underflow_result = 1;
+	      }
+	  }
+	/* If this is Inf/NaN, force to pmax/nmax, and signal overflow if
+	   we haven't already indicated invalid operand.  */
+	else if (dest_exp >= 127)
+	  {
+	    int sign = d->sign;
+
+	    *d = sim_fpu_max32;
+	    d->sign = sign;
+	    if (! invalid_operand)
+	      {
+		EV_SET_SPEFSCR_BITS (over);
+		overflow_result = 1;
+	      }
+	  }
+	/* Destination sign is sign of operand with larger magnitude, or
+	   the sign of the first operand if operands have the same
+	   magnitude.  Thus if the result is zero, we force it to have
+	   the sign of the first operand.  */
+	else if (d->fraction == 0)
+	  d->sign = a->sign;
+
+	return invalid_operand || overflow_result || underflow_result;
+
+:function:e500::unsigned32:ev_fs_add:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+	sim_fpu a, b, d;
+	unsigned32 w;
+	int exception;
+
+	booke_sim_fpu_32to (&a, aa);
+	booke_sim_fpu_32to (&b, bb);
+
+	exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
+				       processor);
+
+	sim_fpu_to32 (&w, &d);
+	if (! exception)
+	  ev_check_guard(&d, fg, fx, processor);
+	return w;
+
+:function:e500::unsigned32:ev_fs_sub:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+	sim_fpu a, b, d;
+	unsigned32 w;
+	int exception;
+
+	booke_sim_fpu_32to (&a, aa);
+	booke_sim_fpu_32to (&b, bb);
+
+	/* Invert sign of second operand, and add.  */
+	b.sign = ! b.sign;
+	exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
+				       processor);
+
+	sim_fpu_to32 (&w, &d);
+	if (! exception)
+	  ev_check_guard(&d, fg, fx, processor);
+	return w;
+
+# sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
+# The booke algorithms require exp values, so we fake them here.
+# fixme: It also apparently does the same for zero, but should not.
+:function:e500::unsigned32:booke_sim_fpu_exp:sim_fpu *x
+	int y = sim_fpu_is (x);
+	if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
+	  return 0;
+	else if (y == SIM_FPU_IS_SNAN || y == SIM_FPU_IS_QNAN
+		 || y == SIM_FPU_IS_NINF || y == SIM_FPU_IS_PINF)
+	  return 128;
+	else
+	  return sim_fpu_exp (x);
+
+:function:e500::unsigned32:ev_fs_mul:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
+	sim_fpu a, b, d;
+	unsigned32 w;
+	int sa, sb, ea, eb, ei;
+	sim_fpu_32to (&a, aa);
+	sim_fpu_32to (&b, bb);
+	sa = sim_fpu_sign(&a);
+	sb = sim_fpu_sign(&b);
+	ea = booke_sim_fpu_exp(&a);
+	eb = booke_sim_fpu_exp(&b);
+	ei = ea + eb + 127;
+	if (sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
+	  w = 0;
+	else if (sa == sb) {
+	  if (ei >= 254) {
+	    w = EV_PMAX;
+	    EV_SET_SPEFSCR_BITS(over);
+	  } else if (ei < 1) {
+	    d = sim_fpu_zero;
+	    sim_fpu_to32 (&w, &d);
+	    w &= 0x7fffffff;	/* Clear sign bit.  */
+	  } else {
+	    goto normal_mul;
+	  }
+	} else {
+	  if (ei >= 254) {
+	    w = EV_NMAX;
+	    EV_SET_SPEFSCR_BITS(over);
+	  } else if (ei < 1) {
+	    d = sim_fpu_zero;
+	    sim_fpu_to32 (&w, &d);
+	    w |= 0x80000000;	/* Set sign bit.  */
+	  } else {
+	normal_mul:
+	    if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
+	      EV_SET_SPEFSCR_BITS(inv);
+	    sim_fpu_mul (&d, &a, &b);
+	    sim_fpu_to32 (&w, &d);
+	  }
+	}
+	return w;
+
+:function:e500::unsigned32:ev_fs_div:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
+	sim_fpu a, b, d;
+	unsigned32 w;
+	int sa, sb, ea, eb, ei;
+	
+	sim_fpu_32to (&a, aa);
+	sim_fpu_32to (&b, bb);
+	sa = sim_fpu_sign(&a);
+	sb = sim_fpu_sign(&b);
+	ea = booke_sim_fpu_exp(&a);
+	eb = booke_sim_fpu_exp(&b);
+	ei = ea - eb + 127;
+
+	/* Special cases to handle behaviour of e500 hardware.
+	   cf case 107543.  */
+	if (sim_fpu_is_nan (&a) || sim_fpu_is_nan (&b)
+	  || sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
+	{
+	  if (sim_fpu_is_snan (&a) || sim_fpu_is_snan (&b))
+	    {
+	      if (bb == 0x3f800000)
+	        w = EV_PMAX;
+	      else if (aa == 0x7fc00001)
+	        w = 0x3fbffffe;
+	      else
+	        goto normal_div;
+	    }
+	  else
+	    goto normal_div;
+	}
+	else if (sim_fpu_is_infinity (&a) && sim_fpu_is_infinity (&b))
+	{
+	  if (sa == sb)
+	    sim_fpu_32to (&d, 0x3f800000);
+	  else
+	    sim_fpu_32to (&d, 0xbf800000);
+	  sim_fpu_to32 (&w, &d);
+	}
+	else if (sa == sb) {
+	  if (ei > 254) {
+	    w = EV_PMAX;
+	    EV_SET_SPEFSCR_BITS(over);
+	  } else if (ei <= 1) {
+	    d = sim_fpu_zero;
+	    sim_fpu_to32 (&w, &d);
+	    w &= 0x7fffffff;	/* Clear sign bit.  */
+	  } else {
+	    goto normal_div;
+	  }
+	} else {
+	  if (ei > 254) {
+	    w = EV_NMAX;
+	    EV_SET_SPEFSCR_BITS(over);
+	  } else if (ei <= 1) {
+	    d = sim_fpu_zero;
+	    sim_fpu_to32 (&w, &d);
+	    w |= 0x80000000;	/* Set sign bit.  */
+	  } else {
+	normal_div:
+	    if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
+	      EV_SET_SPEFSCR_BITS(inv);
+	    if (sim_fpu_is_zero (&b))
+	      {
+	        if (sim_fpu_is_zero (&a))
+	          EV_SET_SPEFSCR_BITS(dbz);
+	        else 
+	          EV_SET_SPEFSCR_BITS(inv);
+	        w = sa ? EV_NMAX : EV_PMAX;
+	      }
+	    else
+	      {
+	        sim_fpu_div (&d, &a, &b);
+	        sim_fpu_to32 (&w, &d);
+	        ev_check_guard(&d, fg, fx, processor);
+	      }
+	  }
+	}
+	return w;
+	
+
+#
+# A.2.7 Integer SPE Simple Instructions
+#
+
+0.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
+	unsigned32 w1, w2;
+	w1 = *rBh + *rAh;
+	w2 = *rB + *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evaddw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
+	unsigned32 w1, w2;
+	w1 = *rBh + IMM;
+	w2 = *rB + IMM;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evaddiw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
+	unsigned32 w1, w2;
+	w1 = *rBh - *rAh;
+	w2 = *rB - *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evsubfw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
+	unsigned32 w1, w2;
+	w1 = *rBh - IMM;
+	w2 = *rB - IMM;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evsubifw: *rSh = %08x; *rS = %08x; IMM = %d\n", *rSh, *rS, IMM);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
+	signed32 w1, w2;
+	w1 = *rAh;
+	if (w1 < 0 && w1 != 0x80000000)
+	  w1 = -w1;
+	w2 = *rA;
+	if (w2 < 0 && w2 != 0x80000000)
+	  w2 = -w2;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
+	signed32 w1, w2;
+	w1 = *rAh;
+	/* the negative most negative number is the most negative number */
+	if (w1 != 0x80000000)
+	  w1 = -w1;
+	w2 = *rA;
+	if (w2 != 0x80000000)
+	  w2 = -w2;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
+	unsigned64 w1, w2;
+	w1 = *rAh & 0xff;
+	if (w1 & 0x80)
+	  w1 |= 0xffffff00;
+	w2 = *rA & 0xff;
+	if (w2 & 0x80)
+	  w2 |= 0xffffff00;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
+
+0.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
+	unsigned64 w1, w2;
+	w1 = *rAh & 0xffff;
+	if (w1 & 0x8000)
+	  w1 |= 0xffff0000;
+	w2 = *rA & 0xffff;
+	if (w2 & 0x8000)
+	  w2 |= 0xffff0000;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
+	unsigned32 w1, w2;
+	w1 = *rBh & *rAh;
+	w2 = *rB & *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
+	unsigned32 w1, w2;
+	w1 = *rBh | *rAh;
+	w2 = *rB | *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
+	unsigned32 w1, w2;
+	w1 = *rBh ^ *rAh;
+	w2 = *rB ^ *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
+	unsigned32 w1, w2;
+	w1 = ~(*rBh & *rAh);
+	w2 = ~(*rB & *rA);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
+	unsigned32 w1, w2;
+	w1 = ~(*rBh | *rAh);
+	w2 = ~(*rB | *rA);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
+	unsigned32 w1, w2;
+	w1 = (~*rBh) ^ *rAh;
+	w2 = (~*rB) ^ *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
+	unsigned32 w1, w2;
+	w1 = (~*rBh) & *rAh;
+	w2 = (~*rB) & *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evandc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
+	unsigned32 w1, w2;
+	w1 = (~*rBh) | *rAh;
+	w2 = (~*rB) | *rA;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evorc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
+	unsigned32 nh, nl, w1, w2;
+	nh = *rBh & 0x1f;
+	nl = *rB & 0x1f;
+	w1 = ((unsigned32)*rAh) << nh | ((unsigned32)*rAh) >> (32 - nh);
+	w2 = ((unsigned32)*rA) << nl | ((unsigned32)*rA) >> (32 - nl);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
+	unsigned32 w1, w2, imm;
+	imm = (unsigned32)UIMM;
+	w1 = ((unsigned32)*rAh) << imm | ((unsigned32)*rAh) >> (32 - imm);
+	w2 = ((unsigned32)*rA) << imm | ((unsigned32)*rA) >> (32 - imm);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
+	unsigned32 nh, nl, w1, w2;
+	nh = *rBh & 0x1f;
+	nl = *rB & 0x1f;
+	w1 = ((unsigned32)*rAh) << nh;
+	w2 = ((unsigned32)*rA) << nl;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
+	unsigned32 w1, w2, imm = UIMM;
+	w1 = ((unsigned32)*rAh) << imm;
+	w2 = ((unsigned32)*rA) << imm;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
+	signed32 w1, w2;
+	unsigned32 nh, nl;
+	nh = *rBh & 0x1f;
+	nl = *rB & 0x1f;
+	w1 = ((signed32)*rAh) >> nh;
+	w2 = ((signed32)*rA) >> nl;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
+	unsigned32 w1, w2, nh, nl;
+	nh = *rBh & 0x1f;
+	nl = *rB & 0x1f;
+	w1 = ((unsigned32)*rAh) >> nh;
+	w2 = ((unsigned32)*rA) >> nl;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
+	signed32 w1, w2;
+	unsigned32 imm = UIMM;
+	w1 = ((signed32)*rAh) >> imm;
+	w2 = ((signed32)*rA) >> imm;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
+	unsigned32 w1, w2, imm = UIMM;
+	w1 = ((unsigned32)*rAh) >> imm;
+	w2 = ((unsigned32)*rA) >> imm;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
+	unsigned32 w1, w2, mask, c1, c2;
+	for (c1 = 0, mask = 0x80000000, w1 = *rAh;
+	      !(w1 & mask) && mask != 0; mask >>= 1)
+	  c1++;
+	for (c2 = 0, mask = 0x80000000, w2 = *rA;
+	      !(w2 & mask) && mask != 0; mask >>= 1)
+	  c2++;
+	EV_SET_REG2(*rSh, *rS, c1, c2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
+	unsigned32 w1, w2, mask, sign_bit, c1, c2;
+	for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
+	     ((w1 & mask) == sign_bit) && mask != 0;
+	     mask >>= 1, sign_bit >>= 1)
+	  c1++;
+	for (c2 = 0, mask = 0x80000000, w2 = *rA, sign_bit = w2 & mask;
+	     ((w2 & mask) == sign_bit) && mask != 0;
+	     mask >>= 1, sign_bit >>= 1)
+	  c2++;
+	EV_SET_REG2(*rSh, *rS, c1, c2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
+	unsigned32 w1, w2;
+	w1 = ((unsigned32)*rAh + 0x8000) & 0xffff0000;
+	w2 = ((unsigned32)*rA + 0x8000) & 0xffff0000;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+		//printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
+	unsigned32 w1, w2;
+	w1 = *rAh;
+	w2 = *rBh;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
+	unsigned32 w1, w2;
+	w1 = *rA;
+	w2 = *rB;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
+	unsigned32 w1, w2;
+	w1 = *rA;
+	w2 = *rBh;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
+	unsigned32 w1, w2;
+	w1 = *rAh;
+	w2 = *rB;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
+	unsigned32 w;
+	w = SIMM & 0x1f;
+	if (w & 0x10)
+	  w |= 0xffffffe0;
+	EV_SET_REG2(*rSh, *rS, w, w);
+	PPC_INSN_INT(RS_BITMASK, 0, 0);
+
+0.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
+	unsigned32 w;
+	w = SIMM << 27;
+	EV_SET_REG2(*rSh, *rS, w, w);
+	PPC_INSN_INT(RS_BITMASK, 0, 0);
+
+0.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
+	signed32 ah, al, bh, bl;
+	int w, ch, cl;
+	ah = *rAh;
+	al = *rA;
+	bh = *rBh;
+	bl = *rB;
+	if (ah > bh)
+	  ch = 1;
+	else
+	  ch = 0;
+	if (al > bl)
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
+	unsigned32 ah, al, bh, bl;
+	int w, ch, cl;
+	ah = *rAh;
+	al = *rA;
+	bh = *rBh;
+	bl = *rB;
+	if (ah > bh)
+	  ch = 1;
+	else
+	  ch = 0;
+	if (al > bl)
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
+	signed32 ah, al, bh, bl;
+	int w, ch, cl;
+	ah = *rAh;
+	al = *rA;
+	bh = *rBh;
+	bl = *rB;
+	if (ah < bh)
+	  ch = 1;
+	else
+	  ch = 0;
+	if (al < bl)
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
+	unsigned32 ah, al, bh, bl;
+	int w, ch, cl;
+	ah = *rAh;
+	al = *rA;
+	bh = *rBh;
+	bl = *rB;
+	if (ah < bh)
+	  ch = 1;
+	else
+	  ch = 0;
+	if (al < bl)
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
+	unsigned32 ah, al, bh, bl;
+	int w, ch, cl;
+	ah = *rAh;
+	al = *rA;
+	bh = *rBh;
+	bl = *rB;
+	if (ah == bh)
+	  ch = 1;
+	else
+	  ch = 0;
+	if (al == bl)
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+		//printf("evcmpeq: ch %d cl %d BF %d, CR is now %08x\n", ch, cl, BF, CR);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
+	unsigned32 w1, w2;
+	int cr;
+	cr = CR_FIELD(CRFS);
+	if (cr & 8)
+	  w1 = *rAh;
+	else
+	  w1 = *rBh;
+	if (cr & 4)
+	  w2 = *rA;
+	else
+	  w2 = *rB;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
+	unsigned32 w1, w2, a, d, mask;
+	mask = (*rB) & 0xffff;
+	a = (*rA) & 0xffff;
+	d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
+	*rS = ((*rA) & 0xffff0000) | (d & 0xffff);
+		//printf("brinc: *rS = %08x\n", *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+#
+# A.2.8 Integer SPE Complex Instructions
+#
+
+0.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int movl, movh;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	tl = ev_multiply16_ssf (al, bl, &movl);
+	th = ev_multiply16_ssf (ah, bh, &movh);
+	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
+			        EV_SATURATE (movl, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl, movh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int movl, movh;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	tl = ev_multiply16_ssf (al, bl, &movl);
+	th = ev_multiply16_ssf (ah, bh, &movh);
+	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
+			        EV_SATURATE (movl, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl, movh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int dummy;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	tl = ev_multiply16_smf (al, bl, & dummy);
+	th = ev_multiply16_smf (ah, bh, & dummy);
+	EV_SET_REG2 (*rSh, *rS, th, tl);
+	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
+	signed32 al, ah, bl, bh;
+	signed32 tl, th;
+	int dummy;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	tl = ev_multiply16_smf (al, bl, & dummy);
+	th = ev_multiply16_smf (ah, bh, & dummy);
+	EV_SET_REG2_ACC (*rSh, *rS, th, tl);
+	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
+	signed32 al, ah, bl, bh, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2(*rSh, *rS, th, tl);
+		//printf("evmhosmi: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
+	signed32 al, ah, bl, bh, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
+		//printf("evmhosmia: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
+	unsigned32 al, ah, bl, bh, tl, th;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
+	unsigned32 al, ah, bl, bh, tl, th;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int movl, movh;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	tl = ev_multiply16_ssf (al, bl, &movl);
+	th = ev_multiply16_ssf (ah, bh, &movh);
+	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
+			       EV_SATURATE (movl, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl, movh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int movl, movh;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	tl = ev_multiply16_ssf (al, bl, &movl);
+	th = ev_multiply16_ssf (ah, bh, &movh);
+	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
+				    EV_SATURATE (movl, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl, movh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
+	signed16 al, ah, bl, bh;
+	signed64 tl, th;
+	int movl, movh;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	tl = ev_multiply16_smf (al, bl, &movl);
+	th = ev_multiply16_smf (ah, bh, &movh);
+	EV_SET_REG2 (*rSh, *rS, th, tl);
+	EV_SET_SPEFSCR_OV (movl, movh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	int dummy;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	tl = ev_multiply16_smf (al, bl, & dummy);
+	th = ev_multiply16_smf (ah, bh, & dummy);
+	EV_SET_REG2_ACC (*rSh, *rS, th, tl);
+	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
+	signed16 al, ah, bl, bh;
+	signed32 tl, th;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2 (*rSh, *rS, th, tl);
+	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
+	signed32 al, ah, bl, bh, tl, th;
+	al = (signed32)(signed16)EV_HIHALF(*rA);
+	ah = (signed32)(signed16)EV_HIHALF(*rAh);
+	bl = (signed32)(signed16)EV_HIHALF(*rB);
+	bh = (signed32)(signed16)EV_HIHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
+	unsigned32 al, ah, bl, bh, tl, th;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
+	unsigned32 al, ah, bl, bh, tl, th;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	tl = al * bl;
+	th = ah * bh;
+	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
+	signed16 al, ah, bl, bh;
+	signed32 t1, t2;
+	signed64 tl, th;
+	int movl, movh, ovl, ovh;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	t1 = ev_multiply16_ssf (ah, bh, &movh);
+	t2 = ev_multiply16_ssf (al, bl, &movl);
+	th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
+	tl = EV_ACCLOW  + EV_SATURATE (movl, 0x7fffffff, t2);
+	ovh = EV_SAT_P_S32 (th);
+	ovl = EV_SAT_P_S32 (tl);
+	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
+			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+		//printf("evmhossiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhossiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ((signed64)ah * bh) << 1;
+	t2 = ((signed64)al * bl) << 1;
+	th = EV_ACCHIGH + (t1 & 0xffffffff);
+	tl = EV_ACCLOW + (t2 & 0xffffffff);
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+		//printf("evmhosmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhosmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	signed64 tl, th;
+	int ovl, ovh;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = (signed64)EV_ACCHIGH + (signed64)t1;
+	tl = (signed64)EV_ACCLOW + (signed64)t2;
+	ovh = EV_SAT_P_U32(th);
+	ovl = EV_SAT_P_U32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
+		//printf("evmhousiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhousiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned32 t1, t2;
+	signed64 tl, th;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+		//printf("evmhoumiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhoumiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
+	signed16 al, ah, bl, bh;
+	signed32 t1, t2;
+	signed64 tl, th;
+	int movl, movh, ovl, ovh;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	t1 = ev_multiply16_ssf (ah, bh, &movh);
+	t2 = ev_multiply16_ssf (al, bl, &movl);
+	th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
+	tl = EV_ACCLOW  + EV_SATURATE (movl, 0x7fffffff, t2);
+	ovh = EV_SAT_P_S32 (th);
+	ovl = EV_SAT_P_S32 (tl);
+	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
+			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = (signed32)(signed16)EV_HIHALF(*rA);
+	ah = (signed32)(signed16)EV_HIHALF(*rAh);
+	bl = (signed32)(signed16)EV_HIHALF(*rB);
+	bh = (signed32)(signed16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+		//printf("evmhessiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhessiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
+	signed16 al, ah, bl, bh;
+	signed32 t1, t2, th, tl;
+	int dummy;
+
+	al = (signed16)EV_HIHALF(*rA);
+	ah = (signed16)EV_HIHALF(*rAh);
+	bl = (signed16)EV_HIHALF(*rB);
+	bh = (signed16)EV_HIHALF(*rBh);
+	t1 = ev_multiply16_smf (ah, bh, &dummy);
+	t2 = ev_multiply16_smf (al, bl, &dummy);
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_HIHALF(*rA);
+	ah = (signed32)(signed16)EV_HIHALF(*rAh);
+	bl = (signed32)(signed16)EV_HIHALF(*rB);
+	bh = (signed32)(signed16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	signed64 tl, th;
+	int ovl, ovh;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = (signed64)EV_ACCHIGH + (signed64)t1;
+	tl = (signed64)EV_ACCLOW + (signed64)t2;
+	ovh = EV_SAT_P_U32(th);
+	ovl = EV_SAT_P_U32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned32 t1, t2;
+	unsigned64 tl, th;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH + t1;
+	tl = EV_ACCLOW + t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
+	signed16 al, ah, bl, bh;
+	signed32 t1, t2;
+	signed64 tl, th;
+	int movl, movh, ovl, ovh;
+	
+	al = (signed16) EV_LOHALF (*rA);
+	ah = (signed16) EV_LOHALF (*rAh);
+	bl = (signed16) EV_LOHALF (*rB);
+	bh = (signed16) EV_LOHALF (*rBh);
+	t1 = ev_multiply16_ssf (ah, bh, &movh);
+	t2 = ev_multiply16_ssf (al, bl, &movl);
+	th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
+	tl = EV_ACCLOW  - EV_SATURATE (movl, 0x7fffffff, t2);
+	ovh = EV_SAT_P_S32 (th);
+	ovl = EV_SAT_P_S32 (tl);
+	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
+			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+		//printf("evmhossianw: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ((signed64)ah * bh) << 1;
+	t2 = ((signed64)al * bl) << 1;
+	th = EV_ACCHIGH - (t1 & 0xffffffff);
+	tl = EV_ACCLOW - (t2 & 0xffffffff);
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_LOHALF(*rA);
+	ah = (signed32)(signed16)EV_LOHALF(*rAh);
+	bl = (signed32)(signed16)EV_LOHALF(*rB);
+	bh = (signed32)(signed16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	signed64 tl, th;
+	int ovl, ovh;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = (signed64)EV_ACCHIGH - (signed64)t1;
+	tl = (signed64)EV_ACCLOW - (signed64)t2;
+	ovl = EV_SAT_P_U32(tl);
+	ovh = EV_SAT_P_U32(th);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
+		//printf("evmhousianw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmoussianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned32 t1, t2;
+	unsigned64 tl, th;
+	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
+	signed16 al, ah, bl, bh;
+	signed32 t1, t2;
+	signed64 tl, th;
+	int movl, movh, ovl, ovh;
+	
+	al = (signed16) EV_HIHALF (*rA);
+	ah = (signed16) EV_HIHALF (*rAh);
+	bl = (signed16) EV_HIHALF (*rB);
+	bh = (signed16) EV_HIHALF (*rBh);
+	t1 = ev_multiply16_ssf (ah, bh, &movh);
+	t2 = ev_multiply16_ssf (al, bl, &movl);
+	th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
+	tl = EV_ACCLOW  - EV_SATURATE (movl, 0x7fffffff, t2);
+	ovh = EV_SAT_P_S32 (th);
+	ovl = EV_SAT_P_S32 (tl);
+	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
+			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = (signed32)(signed16)EV_HIHALF(*rA);
+	ah = (signed32)(signed16)EV_HIHALF(*rAh);
+	bl = (signed32)(signed16)EV_HIHALF(*rB);
+	bh = (signed32)(signed16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	t1 = ((signed64)ah * bh) << 1;
+	t2 = ((signed64)al * bl) << 1;
+	th = EV_ACCHIGH - (t1 & 0xffffffff);
+	tl = EV_ACCLOW - (t2 & 0xffffffff);
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	al = (signed32)(signed16)EV_HIHALF(*rA);
+	ah = (signed32)(signed16)EV_HIHALF(*rAh);
+	bl = (signed32)(signed16)EV_HIHALF(*rB);
+	bh = (signed32)(signed16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+		//printf("evmhesmianw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmhesmianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	signed64 tl, th;
+	int ovl, ovh;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = (signed64)EV_ACCHIGH - (signed64)t1;
+	tl = (signed64)EV_ACCLOW - (signed64)t2;
+	ovl = EV_SAT_P_U32(tl);
+	ovh = EV_SAT_P_U32(th);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
+		//printf("evmheusianw: ovh %d ovl %d al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
+		//printf("evmheusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
+	unsigned32 al, ah, bl, bh;
+	unsigned32 t1, t2;
+	unsigned64 tl, th;
+	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
+	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
+	t1 = ah * bh;
+	t2 = al * bl;
+	th = EV_ACCHIGH - t1;
+	tl = EV_ACCLOW - t2;
+	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_LOHALF(*rA);
+	b = (signed32)(signed16)EV_LOHALF(*rB);
+	t1 = EV_MUL16_SSF(a, b);
+	if (t1 & ((unsigned64)1 << 32))
+	  t1 |= 0xfffffffe00000000;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_LOHALF(*rA);
+	b = (signed32)(signed16)EV_LOHALF(*rB);
+	t1 = (signed64)a * (signed64)b;
+	t2 = (signed64)ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+		//printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
+		//printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
+	unsigned32 a, b;
+	unsigned64 t1, t2;
+	a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	t1 = a * b;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_HIHALF(*rA);
+	b = (signed32)(signed16)EV_HIHALF(*rB);
+	t1 = EV_MUL16_SSF(a, b);
+	if (t1 & ((unsigned64)1 << 32))
+	  t1 |= 0xfffffffe00000000;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_HIHALF(*rA);
+	b = (signed32)(signed16)EV_HIHALF(*rB);
+	t1 = (signed64)(a * b);
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
+	unsigned32 a, b;
+	unsigned64 t1, t2;
+	a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	t1 = a * b;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_LOHALF(*rA);
+	b = (signed32)(signed16)EV_LOHALF(*rB);
+	t1 = EV_MUL16_SSF(a, b);
+	if (t1 & ((unsigned64)1 << 32))
+	  t1 |= 0xfffffffe00000000;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_LOHALF(*rA);
+	b = (signed32)(signed16)EV_LOHALF(*rB);
+	t1 = (signed64)a * (signed64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+		//printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
+		//printf("evmhogsmian: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
+	unsigned32 a, b;
+	unsigned64 t1, t2;
+	a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
+	b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
+	t1 = (unsigned64)a * (unsigned64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_HIHALF(*rA);
+	b = (signed32)(signed16)EV_HIHALF(*rB);
+	t1 = EV_MUL16_SSF(a, b);
+	if (t1 & ((unsigned64)1 << 32))
+	  t1 |= 0xfffffffe00000000;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
+	signed32 a, b;
+	signed64 t1, t2;
+	a = (signed32)(signed16)EV_HIHALF(*rA);
+	b = (signed32)(signed16)EV_HIHALF(*rB);
+	t1 = (signed64)a * (signed64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
+	unsigned32 a, b;
+	unsigned64 t1, t2;
+	a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
+	b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
+	t1 = (unsigned64)a * (unsigned64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int movl, movh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(al, bl, &movl);
+	t2 = ev_multiply32_ssf(ah, bh, &movh);
+	EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
+			       EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
+	EV_SET_SPEFSCR_OV(movl, movh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int movl, movh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(al, bl, &movl);
+	t2 = ev_multiply32_ssf(ah, bh, &movh);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
+			           EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
+	EV_SET_SPEFSCR_OV(movl, movh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = EV_MUL32_SSF(al, bl);
+	t2 = EV_MUL32_SSF(ah, bh);
+	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+	
+0.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = EV_MUL32_SSF(al, bl);
+	t2 = EV_MUL32_SSF(ah, bh);
+	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)al * (signed64)bl;
+	t2 = (signed64)ah * (signed64)bh;
+	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)al * (signed64)bl;
+	t2 = (signed64)ah * (signed64)bh;
+	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)al * (unsigned64)bl;
+	t2 = (unsigned64)ah * (unsigned64)bh;
+	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)al * (unsigned64)bl;
+	t2 = (unsigned64)ah * (unsigned64)bh;
+	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int movl, movh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(al, bl, &movl);
+	t2 = ev_multiply32_ssf(ah, bh, &movh);
+	EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
+			       EV_SATURATE(movl, 0xffffffff, t1));
+	EV_SET_SPEFSCR_OV(movl, movh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int movl, movh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(al, bl, &movl);
+	t2 = ev_multiply32_ssf(ah, bh, &movh);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
+			           EV_SATURATE(movl, 0xffffffff, t1));
+	EV_SET_SPEFSCR_OV(movl, movh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = EV_MUL32_SSF(al, bl);
+	t2 = EV_MUL32_SSF(ah, bh);
+	EV_SET_REG2(*rSh, *rS, t2, t1);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = EV_MUL32_SSF(al, bl);
+	t2 = EV_MUL32_SSF(ah, bh);
+	EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)al * (unsigned64)bl;
+	t2 = (unsigned64)ah * (unsigned64)bh;
+	EV_SET_REG2(*rSh, *rS, t2, t1);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)al * (unsigned64)bl;
+	t2 = (unsigned64)ah * (unsigned64)bh;
+	EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int movl, movh, ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(ah, bh, &movh);
+	t2 = ev_multiply32_ssf(al, bl, &movl);
+	th = EV_ACCHIGH + EV_SATURATE(movh, 0xffffffff, t1);
+	tl = EV_ACCLOW + EV_SATURATE(movl, 0xffffffff, t2);
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)ah * (signed64)bh;
+	t2 = (signed64)al * (signed64)bl;
+	th = EV_ACCHIGH + (t1 & 0xffffffff);
+	tl = EV_ACCLOW + (t2 & 0xffffffff);
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int mov;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_smf(ah, bh, &mov);
+	t2 = ev_multiply32_smf(al, bl, &mov);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
+				 EV_ACCLOW + (t2 & 0xffffffff));
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)ah * (signed64)bh;
+	t2 = (signed64)al * (signed64)bl;
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
+				 EV_ACCLOW + (t2 & 0xffffffff));
+		//printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
+		//printf("evmwlsmiaaw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)ah * (unsigned64)bh;
+	t2 = (unsigned64)al * (unsigned64)bl;
+	th = EV_ACCHIGH + (t1 & 0xffffffff);
+	tl = EV_ACCLOW + (t2 & 0xffffffff);
+	ovh = (th >> 32);
+	ovl = (tl >> 32);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
+			           EV_SATURATE(ovl, 0xffffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)ah * (unsigned64)bh;
+	t2 = (unsigned64)al * (unsigned64)bl;
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
+				 EV_ACCLOW + (t2 & 0xffffffff));
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int movl, movh, ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_ssf(ah, bh, &movh);
+	t2 = ev_multiply32_ssf(al, bl, &movl);
+	th = EV_ACCHIGH - EV_SATURATE(movh, 0xffffffff, t1);
+	tl = EV_ACCLOW - EV_SATURATE(movl, 0xffffffff, t2);
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)ah * (signed64)bh;
+	t2 = (signed64)al * (signed64)bl;
+	th = EV_ACCHIGH - (t1 & 0xffffffff);
+	tl = EV_ACCLOW - (t2 & 0xffffffff);
+	ovh = EV_SAT_P_S32(th);
+	ovl = EV_SAT_P_S32(tl);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
+			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	int mov;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = ev_multiply32_smf(ah, bh, &mov);
+	t2 = ev_multiply32_smf(al, bl, &mov);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
+				 EV_ACCLOW - (t2 & 0xffffffff));
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
+	signed32 al, ah, bl, bh;
+	signed64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (signed64)ah * (signed64)bh;
+	t2 = (signed64)al * (signed64)bl;
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
+				 EV_ACCLOW - (t2 & 0xffffffff));
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2, tl, th;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)ah * (unsigned64)bh;
+	t2 = (unsigned64)al * (unsigned64)bl;
+	th = EV_ACCHIGH - (t1 & 0xffffffff);
+	tl = EV_ACCLOW - (t2 & 0xffffffff);
+	ovh = (th >> 32);
+	ovl = (tl >> 32);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
+			           EV_SATURATE(ovl, 0xffffffff, tl));
+		//printf("evmwlusianw: ovl %d ovh %d al %d ah %d bl %d bh %d t1 %qd t2 %qd th %qd tl %qd\n", ovl, ovh, al, ah, al, bh, t1, t2, th, tl);
+		//printf("evmwlusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
+	unsigned32 al, ah, bl, bh;
+	unsigned64 t1, t2;
+	al = *rA;
+	ah = *rAh;
+	bl = *rB;
+	bh = *rBh;
+	t1 = (unsigned64)ah * (unsigned64)bh;
+	t2 = (unsigned64)al * (unsigned64)bl;
+	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
+				   EV_ACCLOW - (t2 & 0xffffffff));
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = ev_multiply32_ssf(a, b, &movl);
+	EV_SET_REG1(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
+	EV_SET_SPEFSCR_OV(movl, 0);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = ev_multiply32_ssf(a, b, &movl);
+	EV_SET_REG1_ACC(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
+	EV_SET_SPEFSCR_OV(movl, 0);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = ev_multiply32_smf(a, b, &movl);
+	EV_SET_REG1(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = ev_multiply32_smf(a, b, &movl);
+	EV_SET_REG1_ACC(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = (signed64)a * (signed64)b;
+	EV_SET_REG1(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
+	signed32 a, b;
+	signed64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = (signed64)a * (signed64)b;
+	EV_SET_REG1_ACC(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
+	unsigned32 a, b;
+	unsigned64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = (signed64)a * (signed64)b;
+	EV_SET_REG1(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
+	unsigned32 a, b;
+	unsigned64 t;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t = (signed64)a * (signed64)b;
+	EV_SET_REG1_ACC(*rSh, *rS, t);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
+	signed64 t1, t2;
+	signed32 a, b;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t1 = ev_multiply32_ssf(a, b, &movl);
+	t2 = ACC + EV_SATURATE(movl, 0x7fffffffffffffff, t1);
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	EV_SET_SPEFSCR_OV(movl, 0);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
+	signed64 t1, t2;
+	signed32 a, b;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t1 = ev_multiply32_smf(a, b, &movl);
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
+	signed64 t1, t2;
+	signed32 a, b;
+	a = *rA;
+	b = *rB;
+	t1 = (signed64)a * (signed64)b;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
+	unsigned64 t1, t2;
+	unsigned32 a, b;
+	a = *rA;
+	b = *rB;
+	t1 = (unsigned64)a * (unsigned64)b;
+	t2 = ACC + t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
+	signed64 t1, t2;
+	signed32 a, b;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t1 = ev_multiply32_ssf(a, b, &movl);
+	t2 = ACC - EV_SATURATE(movl, 0x7fffffffffffffff, t1);
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	EV_SET_SPEFSCR_OV(movl, 0);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
+	signed64 t1, t2;
+	signed32 a, b;
+	int movl;
+	a = *rA;
+	b = *rB;
+	t1 = ev_multiply32_smf(a, b, &movl);
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
+	signed64 t1, t2;
+	signed32 a, b;
+	a = *rA;
+	b = *rB;
+	t1 = (signed64)a * (signed64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
+	unsigned64 t1, t2;
+	unsigned32 a, b;
+	a = *rA;
+	b = *rB;
+	t1 = (unsigned64)a * (unsigned64)b;
+	t2 = ACC - t1;
+	EV_SET_REG1_ACC(*rSh, *rS, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
+	signed64 t1, t2;
+	signed32 al, ah;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
+	t2 = (signed64)EV_ACCLOW + (signed64)al;
+	ovh = EV_SAT_P_S32(t1);
+	ovl = EV_SAT_P_S32(t2);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t1),
+			           EV_SATURATE_ACC(ovl, t2 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t2));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
+	signed64 t1, t2;
+	signed32 al, ah;
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
+	t2 = (signed64)EV_ACCLOW + (signed64)al;
+	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
+		//printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
+		//printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
+	signed64 t1, t2;
+	unsigned32 al, ah;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
+	t2 = (signed64)EV_ACCLOW + (signed64)al;
+	ovh = EV_SAT_P_U32(t1);
+	ovl = EV_SAT_P_U32(t2);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
+				   EV_SATURATE(ovl, 0xffffffff, t2));
+		//printf("evaddusiaaw: ovl %d ovh %d al %d ah %d t1 %qd t2 %qd\n", ovl, ovh, al, ah, t1, t2);
+		//printf("evaddusiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
+	unsigned64 t1, t2;
+	unsigned32 al, ah;
+	al = *rA;
+	ah = *rAh;
+	t1 = (unsigned64)EV_ACCHIGH + (unsigned64)ah;
+	t2 = EV_ACCLOW + al;
+	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
+	signed64 t1, t2;
+	signed32 al, ah;
+	int ovl, ovh;
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
+	t2 = (signed64)EV_ACCLOW - (signed64)al;
+	ovh = EV_SAT_P_S32(t1);
+	ovl = EV_SAT_P_S32(t2);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
+			           EV_SATURATE_ACC(ovl, t2, 0x80000000, 0x7fffffff, t2));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
+	signed64 t1, t2;
+	signed32 al, ah;
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
+	t2 = (signed64)EV_ACCLOW - (signed64)al;
+	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
+	signed64 t1, t2;
+	unsigned32 al, ah;
+	int ovl, ovh;
+	
+	al = *rA;
+	ah = *rAh;
+	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
+	t2 = (signed64)EV_ACCLOW - (signed64)al;
+	ovh = EV_SAT_P_U32(t1);
+	ovl = EV_SAT_P_U32(t2);
+	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
+			           EV_SATURATE(ovl, 0, t2));
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
+	unsigned64 t1, t2;
+	unsigned32 al, ah;
+	al = *rA;
+	ah = *rAh;
+	t1 = (unsigned64)EV_ACCHIGH - (unsigned64)ah;
+	t2 = (unsigned64)EV_ACCLOW - (unsigned64)al;
+	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.0,21.1220:EVX:e500:evmra %RS,%RA:Initialize Accumulator
+	EV_SET_REG2_ACC(*rSh, *rS, *rAh, *rA);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
+	signed32 dividendh, dividendl, divisorh, divisorl;
+	signed32 w1, w2;
+	int ovh, ovl;
+	dividendh = *rAh;
+	dividendl = *rA;
+	divisorh = *rBh;
+	divisorl = *rB;
+	if (dividendh < 0 && divisorh == 0) {
+	  w1 = 0x80000000;
+	  ovh = 1;
+	} else if (dividendh > 0 && divisorh == 0) {
+	  w1 = 0x7fffffff;
+	  ovh = 1;
+	} else if (dividendh == 0x80000000 && divisorh == -1) {
+	  w1 = 0x7fffffff;
+	  ovh = 1;
+	} else {
+	  w1 = dividendh / divisorh;
+	  ovh = 0;
+	}
+	if (dividendl < 0 && divisorl == 0) {
+	  w2 = 0x80000000;
+	  ovl = 1;
+	} else if (dividendl > 0 && divisorl == 0) {
+	  w2 = 0x7fffffff;
+	  ovl = 1;
+	} else if (dividendl == 0x80000000 && divisorl == -1) {
+	  w2 = 0x7fffffff;
+	  ovl = 1;
+	} else {
+	  w2 = dividendl / divisorl;
+	  ovl = 0;
+	}
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+	
+
+0.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
+	unsigned32 dividendh, dividendl, divisorh, divisorl;
+	unsigned32 w1, w2;
+	int ovh, ovl;
+	dividendh = *rAh;
+	dividendl = *rA;
+	divisorh = *rBh;
+	divisorl = *rB;
+	if (divisorh == 0) {
+	  w1 = 0xffffffff;
+	  ovh = 1;
+	} else {
+	  w1 = dividendh / divisorh;
+	  ovh = 0;
+	}
+	if (divisorl == 0) {
+	  w2 = 0xffffffff;
+	  ovl = 1;
+	} else {
+	  w2 = dividendl / divisorl;
+	  ovl = 0;
+	}
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	EV_SET_SPEFSCR_OV(ovl, ovh);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
+
+
+#
+# A.2.9 Floating Point SPE Instructions
+#
+
+0.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
+	unsigned32 w1, w2;
+	w1 = *rAh & 0x7fffffff;
+	w2 = *rA & 0x7fffffff;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
+	unsigned32 w1, w2;
+	w1 = *rAh | 0x80000000;
+	w2 = *rA | 0x80000000;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
+	unsigned32 w1, w2;
+	w1 = *rAh;
+	w2 = *rA;
+	w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
+	w2 = (w2 & 0x7fffffff) | ((~w2) & 0x80000000);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
+	unsigned32 w1, w2;
+	w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
+	w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
+	unsigned32 w1, w2;
+	w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
+	w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
+	unsigned32 w1, w2;
+	w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
+	w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
+	signed32 w1, w2;
+	w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
+	w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.652:EVX:e500:evfscmpgt %BF,%RA,%RB:Vector Floating-Point Compare Greater Than
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
+	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
+	if (sim_fpu_is_gt(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_gt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.653:EVX:e500:evfscmplt %BF,%RA,%RB:Vector Floating-Point Compare Less Than
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
+	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
+	if (sim_fpu_is_lt(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_lt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.654:EVX:e500:evfscmpeq %BF,%RA,%RB:Vector Floating-Point Compare Equal
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
+	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
+	if (sim_fpu_is_eq(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_eq(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.668:EVX:e500:evfststgt %BF,%RA,%RB:Vector Floating-Point Test Greater Than
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (sim_fpu_is_gt(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_gt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9./,11.RA,16.RB,21.669:EVX:e500:evfststlt %BF,%RA,%RB:Vector Floating-Point Test Less Than
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (sim_fpu_is_lt(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_lt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9./,11.RA,16.RB,21.670:EVX:e500:evfststeq %BF,%RA,%RB:Vector Floating-Point Test Equal
+	sim_fpu al, ah, bl, bh;
+	int w, ch, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&ah, *rAh);
+	sim_fpu_32to (&bl, *rB);
+	sim_fpu_32to (&bh, *rBh);
+	if (sim_fpu_is_eq(&ah, &bh))
+	  ch = 1;
+	else
+	  ch = 0;
+	if (sim_fpu_is_eq(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
+	unsigned32 f, w1, w2;
+	sim_fpu b;
+	
+	sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
+	sim_fpu_to32 (&w1, &b);
+	sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
+	sim_fpu_to32 (&w2, &b);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
+	unsigned32 w1, w2;
+	sim_fpu b;
+	
+	sim_fpu_32to (&b, *rBh);
+	sim_fpu_to32u (&w1, &b, sim_fpu_round_zero);
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
+
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer 
+	signed32 w1, w2;
+	sim_fpu b, x, y;
+	
+	sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
+	sim_fpu_to32 (&w1, &b);
+	sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
+	sim_fpu_to32 (&w2, &b);
+
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
+	unsigned32 w1, w2, bh, bl;
+	sim_fpu b, x, y;
+	bh = *rBh;
+	if (bh == 0xffffffff)
+	  sim_fpu_to32 (&w1, &sim_fpu_one);
+	else {
+	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	  sim_fpu_u32to (&y, bh, sim_fpu_round_default);
+	  sim_fpu_div (&b, &y, &x);
+	  sim_fpu_to32 (&w1, &b);
+	}
+	bl = *rB;
+	if (bl == 0xffffffff)
+	  sim_fpu_to32 (&w2, &sim_fpu_one);
+	else {
+	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	  sim_fpu_u32to (&y, bl, sim_fpu_round_default);
+	  sim_fpu_div (&b, &y, &x);
+	  sim_fpu_to32 (&w2, &b);
+	}
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
+	unsigned32 w1, w2;
+	sim_fpu b, x, y;
+	
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_i32to (&y, *rBh, sim_fpu_round_default);
+	sim_fpu_div (&b, &y, &x);
+	sim_fpu_to32 (&w1, &b);
+	
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
+	sim_fpu_div (&b, &y, &x);
+	sim_fpu_to32 (&w2, &b);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
+	unsigned32 w1, w2;
+	sim_fpu b;
+	
+	sim_fpu_32to (&b, *rBh);
+	sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
+	signed32 w1, w2;
+	sim_fpu b;
+	
+	sim_fpu_32to (&b, *rBh);
+	sim_fpu_to32i (&w1, &b, sim_fpu_round_default);
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
+	signed32 w1, w2;
+	sim_fpu b;
+	
+	sim_fpu_32to (&b, *rBh);
+	sim_fpu_to32i (&w1, &b, sim_fpu_round_zero);
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
+	unsigned32 w1, w2;
+	sim_fpu b, x, y;
+	
+	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	sim_fpu_32to (&y, *rBh);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
+	
+	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	sim_fpu_32to (&y, *rB);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
+	signed32 w1, w2;
+	sim_fpu b, x, y;
+	
+	sim_fpu_32to (&y, *rBh);
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32i (&w1, &b, sim_fpu_round_near);
+	
+	sim_fpu_32to (&y, *rB);
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_near);
+	
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
+	unsigned32 w1, w2;
+	w1 = *rSh;
+	w2 = *rA & 0x7fffffff;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
+	unsigned32 w1, w2;
+	w1 = *rSh;
+	w2 = *rA | 0x80000000;
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
+	unsigned32 w1, w2;
+	w1 = *rSh;
+	w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
+	unsigned32 w;
+	w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG(*rS, w);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
+	unsigned32 w;
+	w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG(*rS, w);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
+	unsigned32 w;
+	w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
+	EV_SET_REG(*rS, w);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
+	unsigned32 w;
+	w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
+	EV_SET_REG(*rS, w);
+	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.716:EVX:e500:efscmpgt %BF,%RA,%RB:Floating-Point Compare Greater Than
+	sim_fpu a, b;
+	int w, cl;
+	sim_fpu_32to (&a, *rA);
+	sim_fpu_32to (&b, *rB);
+	if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (sim_fpu_is_gt(&a, &b))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.717:EVX:e500:efscmplt %BF,%RA,%RB:Floating-Point Compare Less Than
+	sim_fpu al, bl;
+	int w, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&bl, *rB);
+	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (sim_fpu_is_lt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.718:EVX:e500:efscmpeq %BF,%RA,%RB:Floating-Point Compare Equal
+	sim_fpu al, bl;
+	int w, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&bl, *rB);
+	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
+	  EV_SET_SPEFSCR_BITS(spefscr_finv);
+	if (sim_fpu_is_eq(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
+
+0.4,6.BF,9./,11.RA,16.RB,21.732:EVX:e500:efststgt %BF,%RA,%RB:Floating-Point Test Greater Than
+	sim_fpu al, bl;
+	int w, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&bl, *rB);
+	if (sim_fpu_is_gt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9./,11.RA,16.RB,21.733:EVX:e500:efststlt %BF,%RA,%RB:Floating-Point Test Less Than
+	sim_fpu al, bl;
+	int w, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&bl, *rB);
+	if (sim_fpu_is_lt(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.BF,9./,11.RA,16.RB,21.734:EVX:e500:efststeq %BF,%RA,%RB:Floating-Point Test Equal
+	sim_fpu al, bl;
+	int w, cl;
+	sim_fpu_32to (&al, *rA);
+	sim_fpu_32to (&bl, *rB);
+	if (sim_fpu_is_eq(&al, &bl))
+	  cl = 1;
+	else
+	  cl = 0;
+	w = cl << 2 | cl << 1;
+	CR_SET(BF, w);
+	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
+
+0.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
+	signed32 f, w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
+	sim_fpu_to32 (&w2, &b);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
+	unsigned32 w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
+	sim_fpu_to32 (&w2, &b);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
+	unsigned32 w1, w2;
+	sim_fpu b, x, y;
+	w1 = *rSh;
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
+	sim_fpu_div (&b, &y, &x);
+	sim_fpu_to32 (&w2, &b);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
+	unsigned32 w1, w2, bl;
+	sim_fpu b, x, y;
+	w1 = *rSh;
+	bl = *rB;
+	if (bl == 0xffffffff)
+	  sim_fpu_to32 (&w2, &sim_fpu_one);
+	else {
+	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	  sim_fpu_u32to (&y, bl, sim_fpu_round_default);
+	  sim_fpu_div (&b, &y, &x);
+	  sim_fpu_to32 (&w2, &b);
+	}
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
+	signed64 temp;
+	signed32 w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
+	signed64 temp;
+	signed32 w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
+	unsigned64 temp;
+	signed32 w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
+	unsigned64 temp;
+	signed32 w1, w2;
+	sim_fpu b;
+	w1 = *rSh;
+	sim_fpu_32to (&b, *rB);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
+	unsigned32 w1, w2;
+	sim_fpu b, x, y;
+	w1 = *rSh;
+	sim_fpu_32to (&y, *rB);
+	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
+	sim_fpu_to32 (&w2, &b);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+0.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
+	unsigned32 w1, w2;
+	sim_fpu b, x, y;
+	w1 = *rSh;
+	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
+	sim_fpu_32to (&y, *rB);
+	sim_fpu_mul (&b, &y, &x);
+	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
+
+
+#
+# A.2.10 Vector Load/Store Instructions
+#
+
+0.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
+	unsigned64 m;
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	m = MEM(unsigned, EA, 8);
+	EV_SET_REG1(*rSh, *rS, m);
+		//printf("evldd(%d<-%d + %u): m %08x.%08x, *rSh %x *rS %x\n", RS, RA, UIMM, (int)(m >> 32), (int)m, *rSh, *rS);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
+	unsigned64 m;
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	m = MEM(unsigned, EA, 8);
+	EV_SET_REG1(*rSh, *rS, m);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1, w2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	w1 = MEM(unsigned, EA, 4);
+	w2 = MEM(unsigned, EA + 4, 4);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1, w2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	w1 = MEM(unsigned, EA, 4);
+	w2 = MEM(unsigned, EA + 4, 4);
+	EV_SET_REG2(*rSh, *rS, w1, w2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	h1 = MEM(unsigned, EA, 2);
+	h2 = MEM(unsigned, EA + 2, 2);
+	h3 = MEM(unsigned, EA + 4, 2);
+	h4 = MEM(unsigned, EA + 6, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = MEM(unsigned, EA, 2);
+	h2 = MEM(unsigned, EA + 2, 2);
+	h3 = MEM(unsigned, EA + 4, 2);
+	h4 = MEM(unsigned, EA + 6, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 2);
+	h1 = MEM(unsigned, EA, 2);
+	h2 = 0;
+	h3 = MEM(unsigned, EA + 2, 2);
+	h4 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = MEM(unsigned, EA, 2);
+	h2 = 0;
+	h3 = MEM(unsigned, EA + 2, 2);
+	h4 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 2);
+	h1 = 0;
+	h2 = MEM(unsigned, EA, 2);
+	h3 = 0;
+	h4 = MEM(unsigned, EA + 2, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = 0;
+	h2 = MEM(unsigned, EA, 2);
+	h3 = 0;
+	h4 = MEM(unsigned, EA + 2, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 2);
+	h2 = MEM(unsigned, EA, 2);
+	if (h2 & 0x8000)
+	  h1 = 0xffff;
+	else
+	  h1 = 0;
+	h4 = MEM(unsigned, EA + 2, 2);
+	if (h4 & 0x8000)
+	  h3 = 0xffff;
+	else
+	  h3 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h2 = MEM(unsigned, EA, 2);
+	if (h2 & 0x8000)
+	  h1 = 0xffff;
+	else
+	  h1 = 0;
+	h4 = MEM(unsigned, EA + 2, 2);
+	if (h4 & 0x8000)
+	  h3 = 0xffff;
+	else
+	  h3 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 2);
+	w1 = MEM(unsigned, EA, 4);
+	EV_SET_REG2(*rSh, *rS, w1, w1);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	w1 = MEM(unsigned, EA, 4);
+	EV_SET_REG2(*rSh, *rS, w1, w1);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 2);
+	h1 = MEM(unsigned, EA, 2);
+	h2 = MEM(unsigned, EA + 2, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = MEM(unsigned, EA, 2);
+	h2 = MEM(unsigned, EA + 2, 2);
+	EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 1);
+	h = MEM(unsigned, EA, 2);
+	EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h = MEM(unsigned, EA, 2);
+	EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 1);
+	h = MEM(unsigned, EA, 2);
+	EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h = MEM(unsigned, EA, 2);
+	EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 1);
+	h2 = MEM(unsigned, EA, 2);
+	if (h2 & 0x8000)
+	  h1 = 0xffff;
+	else
+	  h1 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h2 = MEM(unsigned, EA, 2);
+	if (h2 & 0x8000)
+	  h1 = 0xffff;
+	else
+	  h1 = 0;
+	EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+
+0.4,6.RS,11.RA,16.UIMM,21.801:EVX:e500:evstdd %RS,%RA,%UIMM:Vector Store Double of Double
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	STORE(EA, 4, (*rSh));
+	STORE(EA + 4, 4, (*rS));
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.800:EVX:e500:evstddx %RS,%RA,%RB:Vector Store Double of Double Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	STORE(EA, 4, (*rSh));
+	STORE(EA + 4, 4, (*rS));
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1, w2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	w1 = *rSh;
+	w2 = *rS;
+	STORE(EA + 0, 4, w1);
+	STORE(EA + 4, 4, w2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w1, w2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	w1 = *rSh;
+	w2 = *rS;
+	STORE(EA + 0, 4, w1);
+	STORE(EA + 4, 4, w2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	h1 = EV_HIHALF(*rSh);
+	h2 = EV_LOHALF(*rSh);
+	h3 = EV_HIHALF(*rS);
+	h4 = EV_LOHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	STORE(EA + 4, 2, h3);
+	STORE(EA + 6, 2, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2, h3, h4;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = EV_HIHALF(*rSh);
+	h2 = EV_LOHALF(*rSh);
+	h3 = EV_HIHALF(*rS);
+	h4 = EV_LOHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	STORE(EA + 4, 2, h3);
+	STORE(EA + 6, 2, h4);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	w = *rSh;
+	STORE(EA, 4, w);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	w = *rSh;
+	STORE(EA, 4, w);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	w = *rS;
+	STORE(EA, 4, w);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned32 w;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	w = *rS;
+	STORE(EA, 4, w);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	h1 = EV_HIHALF(*rSh);
+	h2 = EV_HIHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = EV_HIHALF(*rSh);
+	h2 = EV_HIHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+0.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + (UIMM << 3);
+	h1 = EV_LOHALF(*rSh);
+	h2 = EV_LOHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
+
+0.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
+	unsigned_word b;
+	unsigned_word EA;
+	unsigned16 h1, h2;
+	if (RA_is_0) b = 0;
+	else         b = *rA;
+	EA = b + *rB;
+	h1 = EV_LOHALF(*rSh);
+	h2 = EV_LOHALF(*rS);
+	STORE(EA + 0, 2, h1);
+	STORE(EA + 2, 2, h2);
+	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
+
+
+#
+# 4.5.1 Integer Select Instruction
+#
+
+0.31,6.RS,11.RA,16.RB,21.CRB,26.30:X:e500:isel %RS,%RA,%RB,%CRB:Integer Select
+	if (CR & (1 << (31 - (unsigned)CRB)))
+	  if (RA_is_0)
+	    EV_SET_REG1(*rSh, *rS, 0);
+	  else
+	    EV_SET_REG2(*rSh, *rS, *rAh, *rA);
+	else
+	  EV_SET_REG2(*rSh, *rS, *rBh, *rB);
+	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
Index: e500_expression.h
===================================================================
RCS file: e500_expression.h
diff -N e500_expression.h
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ e500_expression.h	22 Jun 2003 16:24:08 -0000
@@ -0,0 +1,173 @@
+/* e500 expression macros, for PSIM, the PowerPC simulator.
+
+   Copyright 2003 Free Software Foundation, Inc.
+
+   Contributed by Red Hat Inc; developed under contract from Motorola.
+   Written by matthew green <mrg@redhat.com>.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* e500 register dance */
+#define EV_SET_REG4(sh, sl, h0, h1, h2, h3) do { \
+	(sh) = (((h0) & 0xffff) << 16) | ((h1) & 0xffff); \
+	(sl) = (((h2) & 0xffff) << 16) | ((h3) & 0xffff); \
+} while (0)
+#define EV_SET_REG4_ACC(sh, sl, h0, h1, h2, h3) do { \
+	(sh) = (((h0) & 0xffff) << 16) | ((h1) & 0xffff); \
+	(sl) = (((h2) & 0xffff) << 16) | ((h3) & 0xffff); \
+	ACC = ((unsigned64)(sh) << 32) | (sl & 0xffffffff); \
+} while (0)
+
+#define EV_SET_REG2(sh, sl, dh, dl) do { \
+	(sh) = (dh) & 0xffffffff; \
+	(sl) = (dl) & 0xffffffff; \
+} while (0)
+#define EV_SET_REG2_ACC(sh, sl, dh, dl) do { \
+	(sh) = (dh) & 0xffffffff; \
+	(sl) = (dl) & 0xffffffff; \
+	ACC = ((unsigned64)(sh) << 32) | ((sl) & 0xffffffff); \
+} while (0)
+
+#define EV_SET_REG1(sh, sl, d) do { \
+	(sh) = ((unsigned64)(d) >> 32) & 0xffffffff; \
+	(sl) = (d) & 0xffffffff; \
+} while (0)
+#define EV_SET_REG1_ACC(sh, sl, d) do { \
+	(sh) = ((unsigned64)(d) >> 32) & 0xffffffff; \
+	(sl) = (d) & 0xffffffff; \
+	ACC = (d); \
+} while (0)
+
+#define EV_SET_REG(s, d) do { \
+	(s) = (d) & 0xffffffff; \
+} while (0)
+
+/* get the low or high half word of a word */
+#define EV_LOHALF(x)	((unsigned32)(x) & 0xffff)
+#define EV_HIHALF(x)	(((unsigned32)(x) >> 16) & 0xffff)
+
+/* partially visible accumulator accessors */
+#define EV_SET_ACC(rh, rl) \
+	ACC = ((unsigned64)(rh) << 32) | ((rl) & 0xffffffff)
+
+#define EV_ACCLOW	(ACC & 0xffffffff)
+#define EV_ACCHIGH	((ACC >> 32) & 0xffffffff)
+
+/* bit manipulation macros needed for e500 SPE */
+#define EV_BITREVERSE16(x) \
+		  (((x) & 0x0001) << 15) \
+		| (((x) & 0x0002) << 13) \
+		| (((x) & 0x0004) << 11) \
+		| (((x) & 0x0008) << 9) \
+		| (((x) & 0x0010) << 7) \
+		| (((x) & 0x0020) << 5) \
+		| (((x) & 0x0040) << 3) \
+		| (((x) & 0x0080) << 1) \
+		| (((x) & 0x0100) >> 1) \
+		| (((x) & 0x0200) >> 3) \
+		| (((x) & 0x0400) >> 5) \
+		| (((x) & 0x0800) >> 7) \
+		| (((x) & 0x1000) >> 9) \
+		| (((x) & 0x2000) >> 11) \
+		| (((x) & 0x4000) >> 13) \
+		| (((x) & 0x8000) >> 15)
+
+/* saturation helpers */
+#define EV_MUL16_SSF(a,b)	((signed64)((signed32)(signed16)(a) * (signed32)(signed16)(b)) << 1)
+/* this one loses the top sign bit; be careful */
+#define EV_MUL32_SSF(a,b)	(((signed64)(signed32)(a) * (signed64)(signed32)(b)) << 1)
+#define EV_SAT_P_S32(x)		((((signed64)(x)) < -0x80000000LL) || (((signed64)(x)) > 0x7fffffffLL))
+#define EV_SAT_P_U32(x)		((((signed64)(x)) < -0LL) || (((signed64)(x)) > 0xffffffffLL))
+
+#define EV_SATURATE(flag, sat_val, val) \
+	((flag) ? (sat_val) : (val))
+
+#define EV_SATURATE_ACC(flag, sign, negative_sat_val, positive_sat_val, val) \
+	((flag) ? ((((sign) >> 63) & 1) ? (negative_sat_val) : (positive_sat_val)) : (val))
+
+/* SPEFSCR handling.  */
+
+/* These bits must be clear.  */
+#define EV_SPEFSCR_MASK (BIT(40) | BIT(41) | spefscr_mode | BIT(56))
+
+/* The Inexact and Divide by zero sticky bits are based on others.  */
+#define EV_SET_SPEFSCR(bits) do { \
+  int finxs = (bits) & (spefscr_fgh|spefscr_fxh|spefscr_fg|spefscr_fx); \
+  int fdbzs = (bits) & (spefscr_fdbzh|spefscr_fdbz); \
+  SPREG(spr_spefscr) = ((bits) & ~EV_SPEFSCR_MASK) | \
+		       (finxs ? spefscr_finxs : 0) | \
+		       (fdbzs ? spefscr_fdbzs : 0); \
+} while (0)
+
+#define EV_SET_SPEFSCR_BITS(s) \
+  EV_SET_SPEFSCR(SPREG(spr_spefscr) | (s))
+
+#define EV_SET_SPEFSCR_OV(l,h) do { \
+	unsigned32 _sPefScR = SPREG(spr_spefscr); \
+	if (l) \
+	  _sPefScR |= spefscr_ov | spefscr_sov; \
+	else \
+	  _sPefScR &= ~spefscr_ov; \
+	if (h) \
+	  _sPefScR |= spefscr_ovh | spefscr_sovh; \
+	else \
+	  _sPefScR &= ~spefscr_ovh; \
+	EV_SET_SPEFSCR(_sPefScR); \
+} while (0)
+
+/* SPE floating point helpers.  */
+
+#define EV_PMAX	0x7f7fffff
+#define EV_NMAX	0xff7fffff
+#define EV_PMIN	0x00800001
+#define EV_NMIN	0x80800001
+
+#define	EV_IS_INFDENORMNAN(x) \
+	(sim_fpu_is_infinity(x) || sim_fpu_is_denorm(x) || sim_fpu_is_nan(x))
+
+/* These aren't used (yet?)  For now, SPU is always enabled.
+   Would be nice if they were generated by igen for e500.  */
+#define SPU_BEGIN \
+{ \
+  if (MSR & msr_e500_spu_enable) { \
+
+#define SPU_END \
+  } else { \
+    /* FIXME: raise SPU unavailable.  */ \
+  } \
+}
+
+/* These are also not yet used.  */
+#define SPU_FP_BEGIN \
+{
+
+#define SPU_FP_END \
+  { \
+     unsigned s = SPEFSCR; \
+     /* Check SPEFSCR; raise exceptions if any required.  */ \
+     if (((spefscr_finxe || spefscr_finve) \
+	 && (s & (spefscr_finvh|spefscr_finv))) \
+      || ((spefscr_finxe || spefscr_fdbze) \
+	 && (s & (spefscr_fdbzh|spefscr_fdbz))) \
+      || ((spefscr_finxe || spefscr_funfe) \
+	 && (s & (spefscr_funfh|spefscr_funf))) \
+      || ((spefscr_finxe || spefscr_fovfe) \
+	 && (s & (spefscr_fovfh|spefscr_fovf)))) \
+       /* FIXME: raise exceptions.  */; \
+   } \
+}
Index: e500_registers.h
===================================================================
RCS file: e500_registers.h
diff -N e500_registers.h
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ e500_registers.h	22 Jun 2003 16:24:08 -0000
@@ -0,0 +1,83 @@
+/* e500 registers, for PSIM, the PowerPC simulator.
+
+   Copyright 2003 Free Software Foundation, Inc.
+
+   Contributed by Red Hat Inc; developed under contract from Motorola.
+   Written by matthew green <mrg@redhat.com>.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* e500 accumulator.  */
+
+typedef unsigned64 accreg;
+
+enum {
+  msr_e500_spu_enable = BIT(38)
+};
+
+/* E500 regsiters.  */
+
+enum
+  {
+  spefscr_sovh = BIT(32),	/* summary integer overlow (high) */
+  spefscr_ovh = BIT(33),	/* int overflow (high) */
+  spefscr_fgh = BIT(34),	/* FP guard (high) */
+  spefscr_fxh = BIT(35),	/* FP sticky (high) */
+  spefscr_finvh = BIT(36),	/* FP invalid operand (high) */
+  spefscr_fdbzh = BIT(37),	/* FP divide by zero (high) */
+  spefscr_funfh = BIT(38),	/* FP underflow (high) */
+  spefscr_fovfh = BIT(39),	/* FP overflow (high) */
+  spefscr_finxs = BIT(42),	/* FP inexact sticky */
+  spefscr_finvs = BIT(43),	/* FP invalid operand sticky */
+  spefscr_fdbzs = BIT(44),	/* FP divide by zero sticky */
+  spefscr_funfs = BIT(45),	/* FP underflow sticky */
+  spefscr_fovfs = BIT(46),	/* FP overflow sticky */
+  spefscr_mode = BIT(47),	/* SPU MODE (read only) */
+  spefscr_sov = BIT(48),	/* Summary integer overlow (low) */
+  spefscr_ov = BIT(49),		/* int overflow (low) */
+  spefscr_fg = BIT(50),		/* FP guard (low) */
+  spefscr_fx = BIT(51),		/* FP sticky (low) */
+  spefscr_finv = BIT(52),	/* FP invalid operand (low) */
+  spefscr_fdbz = BIT(53),	/* FP divide by zero (low) */
+  spefscr_funf = BIT(54),	/* FP underflow (low) */
+  spefscr_fovf = BIT(55),	/* FP overflow (low) */
+  spefscr_finxe = BIT(57),	/* FP inexact enable */
+  spefscr_finve = BIT(58),	/* FP invalid operand enable */
+  spefscr_fdbze = BIT(59),	/* FP divide by zero enable */
+  spefscr_funfe = BIT(60),	/* FP underflow enable */
+  spefscr_fovfe = BIT(61),	/* FP overflow enable */
+  spefscr_frmc0 = BIT(62),	/* FP round mode control */
+  spefscr_frmc1 = BIT(63),
+  spefscr_frmc = (spefscr_frmc0 | spefscr_frmc1),
+};
+
+struct e500_regs {
+  /* e500 high bits.  */
+  signed_word gprh[32];
+  /* Accumulator */
+  accreg acc;
+};
+
+/* SPE partially visible acculator */
+#define ACC		cpu_registers(processor)->e500.acc
+
+/* e500 register high bits */
+#define GPRH(N)		cpu_registers(processor)->e500.gprh[N]
+
+/* e500 unified vector register */
+#define EVR(N)		((((unsigned64)GPRH(N)) << 32) | GPR(N))
Index: idecode_expression.h
===================================================================
RCS file: /cvs/src/src/sim/ppc/idecode_expression.h,v
retrieving revision 1.1.1.1
diff -u -r1.1.1.1 idecode_expression.h
--- idecode_expression.h	16 Apr 1999 01:35:10 -0000	1.1.1.1
+++ idecode_expression.h	22 Jun 2003 16:24:09 -0000
@@ -1,6 +1,6 @@
 /*  This file is part of the program psim.
 
-    Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
+    Copyright 1994, 1995, 1996, 1997, 2003 Andrew Cagney
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
@@ -18,6 +18,13 @@
  
     */
 
+/* Additional, and optional expressions.  */
+#ifdef WITH_ALTIVEC
+#include "altivec_expression.h"
+#endif
+#ifdef WITH_E500
+#include "e500_expression.h"
+#endif
 
 /* 32bit target expressions:
 
Index: ppc-instructions
===================================================================
RCS file: /cvs/src/src/sim/ppc/ppc-instructions,v
retrieving revision 1.6
diff -u -r1.6 ppc-instructions
--- ppc-instructions	22 Jun 2003 01:52:34 -0000	1.6
+++ ppc-instructions	22 Jun 2003 16:24:16 -0000
@@ -198,6 +198,8 @@
 	  unsigned32 fp_busy;				/* floating point registers that are busy */
 	  unsigned32 cr_fpscr_busy;			/* CR/FPSCR registers that are busy */
 	  signed16 spr_busy;				/* SPR register that is busy or PPC_NO_SPR */
+	  unsigned32 vr_busy;				/* AltiVec registers that are busy */
+	  signed16 vscr_busy;				/* AltiVec status register busy */
 	  signed16 issue;				/* # of cycles until unit can accept another insn */
 	  signed16 done;				/* # of cycles until insn is done */
 	  signed16 nr_writebacks;			/* # of registers this unit writes back */
@@ -228,6 +230,8 @@
 	  unsigned32 fp_busy;				/* floating point registers that are busy */
 	  unsigned32 cr_fpscr_busy;			/* CR/FPSCR registers that are busy */
 	  unsigned8 spr_busy[nr_of_sprs];		/* SPR registers that are busy */
+	  unsigned32 vr_busy;				/* AltiVec registers that are busy */
+	  unsigned8 vscr_busy;				/* AltiVec SC register busy */
 	  unsigned8 busy[nr_ppc_function_units];	/* whether a function is busy or not */
 	};
 
@@ -320,6 +324,15 @@
 	}
 	if (busy->spr_busy != PPC_NO_SPR)
 	  TRACE(trace_model, ("Register %s is now available.\n", spr_name(busy->spr_busy)));
+	if (busy->vr_busy) {
+	  for(i = 0; i < 32; i++) {
+	    if (((1 << i) & busy->vr_busy) != 0) {
+	      TRACE(trace_model, ("Register v%d is now available.\n", i));
+	    }
+	  }
+	}
+	if (busy->vscr_busy)
+	  TRACE(trace_model, ("VSCR Register is now available.\n", spr_name(busy->spr_busy)));
 
 # Trace making registers busy
 void::model-static::model_trace_make_busy:model_data *model_ptr, unsigned32 int_mask, unsigned32 fp_mask, unsigned32 cr_mask
@@ -398,6 +411,8 @@
 	      model_ptr->cr_fpscr_busy &= ~cur_busy->cr_fpscr_busy;
 	      if (cur_busy->spr_busy != PPC_NO_SPR)
 		model_ptr->spr_busy[cur_busy->spr_busy] = 0;
+	      model_ptr->vr_busy &= ~cur_busy->vr_busy;
+	      model_ptr->vscr_busy = ~cur_busy->vscr_busy;
 
 	      if (WITH_TRACE && ppc_trace[trace_model])
 		model_trace_release(model_ptr, cur_busy);
@@ -454,6 +469,8 @@
 	  busy->fp_busy = 0;
 	  busy->cr_fpscr_busy = 0;
 	  busy->nr_writebacks = 0;
+	  busy->vr_busy = 0;
+	  busy->vscr_busy = 0;
 	}
 
 	busy->unit = unit;
@@ -4973,3 +4990,6 @@
 0.31,6.RT,11.RA,16.RB,21.310,31./:X:earwax::External Control In Word Indexed
 
 0.31,6.RS,11.RA,16.RB,21.438,31./:X:earwax::External Control Out Word Indexed
+
+:include:::altivec.igen
+:include:::e500.igen
Index: ppc-spr-table
===================================================================
RCS file: /cvs/src/src/sim/ppc/ppc-spr-table,v
retrieving revision 1.2
diff -u -r1.2 ppc-spr-table
--- ppc-spr-table	1 Dec 2001 18:56:36 -0000	1.2
+++ ppc-spr-table	22 Jun 2003 16:24:17 -0000
@@ -31,6 +31,7 @@
 SDR1:25:0:0
 SRR0:26:0:0
 SRR1:27:0:0
+VRSAVE:256:0:0
 SPRG0:272:0:0
 SPRG1:273:0:0
 SPRG2:274:0:0
@@ -39,6 +40,7 @@
 TBL:284:0:0
 TBU:285:0:0
 PVR:287:0:0
+SPEFSCR:512:0:0
 IBAT0U:528:0:0
 IBAT0L:529:0:0
 IBAT1U:530:0:0
Index: psim.c
===================================================================
RCS file: /cvs/src/src/sim/ppc/psim.c,v
retrieving revision 1.3
diff -u -r1.3 psim.c
--- psim.c	20 Jun 2003 13:32:34 -0000	1.3
+++ psim.c	22 Jun 2003 16:24:20 -0000
@@ -848,6 +848,30 @@
     *(unsigned_word*)cooked_buf = model_get_number_of_cycles(cpu_model(processor));
     break;
 
+#ifdef WITH_ALTIVEC
+  case reg_vr:
+    *(vreg*)cooked_buf = cpu_registers(processor)->altivec.vr[description.index];
+    break;
+
+  case reg_vscr:
+    *(vscreg*)cooked_buf = cpu_registers(processor)->altivec.vscr;
+    break;
+#endif
+
+#ifdef WITH_E500
+  case reg_gprh:
+    *(gpreg*)cooked_buf = cpu_registers(processor)->e500.gprh[description.index];
+    break;
+
+  case reg_evr:
+    *(unsigned64*)cooked_buf = EVR(description.index);
+    break;
+
+  case reg_acc:
+    *(accreg*)cooked_buf = cpu_registers(processor)->e500.acc;
+    break;
+#endif
+
   default:
     printf_filtered("psim_read_register(processor=0x%lx,buf=0x%lx,reg=%s) %s\n",
 		    (unsigned long)processor, (unsigned long)buf, reg,
@@ -873,6 +897,21 @@
     case 8:
       *(unsigned_8*)buf = H2T_8(*(unsigned_8*)cooked_buf);
       break;
+#ifdef WITH_ALTIVEC
+    case 16:
+      if (CURRENT_HOST_BYTE_ORDER != CURRENT_TARGET_BYTE_ORDER)
+        {
+	  union { vreg v; unsigned_8 d[2]; } h, t;
+          memcpy(&h.v/*dest*/, cooked_buf/*src*/, description.size);
+	  { _SWAP_8(t.d[0] =, h.d[1]); }
+	  { _SWAP_8(t.d[1] =, h.d[0]); }
+          memcpy(buf/*dest*/, &t/*src*/, description.size);
+          break;
+        }
+      else
+        memcpy(buf/*dest*/, cooked_buf/*src*/, description.size);
+      break;
+#endif
     }
   }
   else {
@@ -937,6 +976,20 @@
     case 8:
       *(unsigned_8*)cooked_buf = T2H_8(*(unsigned_8*)buf);
       break;
+#ifdef WITH_ALTIVEC
+    case 16:
+      if (CURRENT_HOST_BYTE_ORDER != CURRENT_TARGET_BYTE_ORDER)
+        {
+	  union { vreg v; unsigned_8 d[2]; } h, t;
+          memcpy(&t.v/*dest*/, buf/*src*/, description.size);
+	  { _SWAP_8(h.d[0] =, t.d[1]); }
+	  { _SWAP_8(h.d[1] =, t.d[0]); }
+          memcpy(cooked_buf/*dest*/, &h/*src*/, description.size);
+          break;
+        }
+      else
+        memcpy(cooked_buf/*dest*/, buf/*src*/, description.size);
+#endif
     }
   }
   else {
@@ -977,6 +1030,35 @@
   case reg_fpscr:
     cpu_registers(processor)->fpscr = *(fpscreg*)cooked_buf;
     break;
+
+#ifdef WITH_E500
+  case reg_gprh:
+    cpu_registers(processor)->e500.gprh[description.index] = *(gpreg*)cooked_buf;
+    break;
+
+  case reg_evr:
+    {
+      unsigned64 v;
+      v = *(unsigned64*)cooked_buf;
+      cpu_registers(processor)->e500.gprh[description.index] = v >> 32;
+      cpu_registers(processor)->gpr[description.index] = v;
+      break;
+    }
+
+  case reg_acc:
+    cpu_registers(processor)->e500.acc = *(accreg*)cooked_buf;
+    break;
+#endif
+
+#ifdef WITH_ALTIVEC
+  case reg_vr:
+    cpu_registers(processor)->altivec.vr[description.index] = *(vreg*)cooked_buf;
+    break;
+
+  case reg_vscr:
+    cpu_registers(processor)->altivec.vscr = *(vscreg*)cooked_buf;
+    break;
+#endif
 
   default:
     printf_filtered("psim_write_register(processor=0x%lx,cooked_buf=0x%lx,reg=%s) %s\n",
Index: registers.c
===================================================================
RCS file: /cvs/src/src/sim/ppc/registers.c,v
retrieving revision 1.1.1.1
diff -u -r1.1.1.1 registers.c
--- registers.c	16 Apr 1999 01:35:11 -0000	1.1.1.1
+++ registers.c	22 Jun 2003 16:24:20 -0000
@@ -150,6 +150,35 @@
     description.index = spr_ctr;
     description.size = sizeof(unsigned_word);
   }
+#ifdef WITH_ALTIVEC
+  else if (reg[0] == 'v' && reg[1] == 'r' && are_digits(reg + 2)) {
+    description.type = reg_vr;
+    description.index = atoi(reg+2);
+    description.size = sizeof(vreg);
+  }
+   else if (!strcmp(reg, "vscr")) {
+    description.type = reg_vscr;
+    description.index = 0;
+    description.size = sizeof(vscreg);
+  }
+#endif
+#ifdef WITH_E500
+  else if (reg[0] == 'e' && reg[1] == 'v' && are_digits(reg + 2)) {
+    description.type = reg_evr;
+    description.index = atoi(reg+2);
+    description.size = sizeof(unsigned64);
+  }
+  else if (reg[0] == 'r' && reg[1] == 'h' && are_digits(reg + 2)) {
+    description.type = reg_gprh;
+    description.index = atoi(reg+2);
+    description.size = sizeof(gpreg);
+  }
+  else if (!strcmp(reg, "acc")) {
+    description.type = reg_acc;
+    description.index = 0;
+    description.size = sizeof(unsigned64);
+  }
+#endif
   else {
     sprs spr = find_spr(reg);
     if (spr != nr_of_sprs) {
Index: registers.h
===================================================================
RCS file: /cvs/src/src/sim/ppc/registers.h,v
retrieving revision 1.1.1.1
diff -u -r1.1.1.1 registers.h
--- registers.h	16 Apr 1999 01:35:11 -0000	1.1.1.1
+++ registers.h	22 Jun 2003 16:24:21 -0000
@@ -1,6 +1,6 @@
 /*  This file is part of the program psim.
 
-    Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
+    Copyright 1994, 1997, 2003 Andrew Cagney
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
@@ -28,6 +28,19 @@
  *
  */
 
+/* FIXME:
+
+   For the moment use macro's to determine if the E500 or Altivec
+   registers should be included.  IGEN should instead of a :register:
+   field to facilitate the specification and generation of per ISA
+   registers.  */
+
+#ifdef WITH_E500
+#include "e500_registers.h"
+#endif
+#if WITH_ALTIVEC
+#include "altivec_registers.h"
+#endif
 
 /**
  ** General Purpose Registers
@@ -228,7 +241,6 @@
   srr1_subsequent_instruction = BIT(47)
 };
 
-
 /**
  ** storage interrupt registers
  **/
@@ -264,8 +276,14 @@
   /* Segment Registers */
   sreg sr[nr_of_srs];
 
-} registers;
+#if WITH_ALTIVEC
+  struct altivec_regs altivec;
+#endif
+#if WITH_E500
+  struct e500_regs e500;
+#endif
 
+} registers;
 
 /* dump out all the registers */
 
@@ -281,6 +299,12 @@
   reg_gpr, reg_fpr, reg_spr, reg_msr,
   reg_cr, reg_fpscr, reg_pc, reg_sr,
   reg_insns, reg_stalls, reg_cycles,
+#ifdef WITH_ALTIVEC
+  reg_vr, reg_vscr,
+#endif
+#ifdef WITH_E500
+  reg_acc, reg_gprh, reg_evr,
+#endif
   nr_register_types
 } register_types;
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]