This is the mail archive of the gdb-patches@sources.redhat.com mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

[RFA]: Fix partial memory transfers


I submit the enclosed patch to target.c for approval.

My recent commit to move the dcache on top of the target xfer_memory
vector introduced a bug such that target_partial_xfer_memory() would
always perform a complete transfer, which defeated the entire purpose
of that function.

The enclosed patch fixes do_xfer_memory(), the low-level function that
calls the xfer_memory vector function(s), to perform only one transfer.
The dcache already has the logic that will call do_xfer_memory multiple
times if it returns a partial transfer.

This patch also fixes up comments for the *xfer_memory* functions.
Some have been wrong/misleading for a very long time.

This patch also includes a bug fix from Nick, which caused me to look
at and fine the above bug.  I'm including it in this patch because he
retracted the bug fix when I mentioned this patch was coming.

        --jtc

2000-11-08  J.T. Conklin  <jtc@redback.com>

	* target.c (do_xfer_memory): Only perform a single memory transfer
 	instead of iterating to tranfer the entire region.  Higher layers
 	are expected to call this function multiple times for partial
 	transfers.
	(target_xfer_memory_partial): Remove unused local variables.

2000-11-07  Nick Duffek  <nsd@redhat.com>

	* target.c (target_xfer_memory_partial): Return bytes transferred
	instead of 0.

Index: target.c
===================================================================
RCS file: /cvs/src/src/gdb/target.c,v
retrieving revision 1.14
diff -c -r1.14 target.c
*** target.c	2000/11/03 22:00:56	1.14
--- target.c	2000/11/08 20:53:15
***************
*** 837,851 ****
    return target_xfer_memory (memaddr, myaddr, len, 1);
  }
  
! /* Move memory to or from the targets.  Iterate until all of it has
!    been moved, if necessary.  The top target gets priority; anything
!    it doesn't want, is offered to the next one down, etc.  Note the
!    business with curlen:  if an early target says "no, but I have a
!    boundary overlapping this xfer" then we shorten what we offer to
!    the subsequent targets so the early guy will get a chance at the
!    tail before the subsequent ones do. 
  
!    Result is 0 or errno value.  */
  
  int
  do_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write)
--- 837,846 ----
    return target_xfer_memory (memaddr, myaddr, len, 1);
  }
  
! /* Move memory to or from the targets.  The top target gets priority;
!    if it cannot handle it, it is offered to the next one down, etc.
  
!    Result is -1 on error, or the number of bytes transfered.  */
  
  int
  do_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write)
***************
*** 863,880 ****
       0.  */
    errno = 0;
  
!   /* The quick case is that the top target does it all.  */
    res = current_target.to_xfer_memory
      (memaddr, myaddr, len, write, &current_target);
-   if (res == len)
-     return len;
- 
-   if (res > 0)
-     goto bump;
-   /* If res <= 0 then we call it again in the loop.  Ah well.  */
  
!   while (len > 0)
      {
        for (item = target_stack; item; item = item->next)
  	{
  	  t = item->target_ops;
--- 858,871 ----
       0.  */
    errno = 0;
  
!   /* The quick case is that the top target can handle the transfer.  */
    res = current_target.to_xfer_memory
      (memaddr, myaddr, len, write, &current_target);
  
!   if (res <= 0)
      {
+       /* If res <= 0 then we call it again in the loop.  Ah well. */
+   
        for (item = target_stack; item; item = item->next)
  	{
  	  t = item->target_ops;
***************
*** 889,907 ****
  	}
  
        if (res <= 0)
! 	{
! 	    return -1;
! 	}
!     bump:
!       done    += res;
!       memaddr += res;
!       myaddr  += res;
!       len     -= res;
      }
!   
!   return done;
  }
  
  static int
  target_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write)
  {
--- 880,897 ----
  	}
  
        if (res <= 0)
! 	return -1;
      }
! 
!   return res;
  }
  
+ 
+ /* Perform a memory transfer.  Iterate until the entire region has
+    been transfered.
+ 
+    Result is 0 or errno value.  */
+ 
  static int
  target_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write)
  {
***************
*** 936,953 ****
    return 0;			/* We managed to cover it all somehow. */
  }
  
  
! /* Perform a partial memory transfer.  */
  
  static int
  target_xfer_memory_partial (CORE_ADDR memaddr, char *myaddr, int len,
  			    int write_p, int *err)
  {
    int res;
-   int err_res;
-   int len_res;
-   struct target_ops *t;
-   struct target_stack_item *item;
  
    /* Zero length requests are ok and require no work.  */
    if (len == 0)
--- 926,941 ----
    return 0;			/* We managed to cover it all somehow. */
  }
  
+ 
+ /* Perform a partial memory transfer.
  
!    Result is -1 on error, or the number of bytes transfered.  */
  
  static int
  target_xfer_memory_partial (CORE_ADDR memaddr, char *myaddr, int len,
  			    int write_p, int *err)
  {
    int res;
  
    /* Zero length requests are ok and require no work.  */
    if (len == 0)
***************
*** 968,974 ****
      }
  
    *err = 0;
!   return 0;
  }
  
  int
--- 956,962 ----
      }
  
    *err = 0;
!   return res;
  }
  
  int


-- 
J.T. Conklin
RedBack Networks

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]