Commit aad0e180 authored by monty@donna.mysql.com's avatar monty@donna.mysql.com
Browse files

Merge work:/home/bk/mysql into donna.mysql.com:/home/my/bk/mysql

parents b119ae92 adeed916
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -139,3 +139,6 @@ INSTALL-BINARY: mysql.info $(GT)

../MIRRORS:		manual.texi $(srcdir)/Support/generate-mirror-listing.pl
	perl -w $(srcdir)/Support/generate-mirror-listing.pl manual.texi > $@

# Don't update the files from bitkeeper
%::SCCS/s.%
+30 −5
Original line number Diff line number Diff line
@@ -813,6 +813,7 @@ MySQL change history
Changes in release 3.23.x  (Recommended; beta)
* News-3.23.28::                
* News-3.23.27::                Changes in release 3.23.27
* News-3.23.26::                Changes in release 3.23.26
* News-3.23.25::                Changes in release 3.23.25
@@ -19862,6 +19863,13 @@ The buffer that is allocated to cache index and rows for @code{BDB} tables.
If you don't use @code{BDB} tables, you should set this to 0 or
start @code{mysqld} with @code{--skip-bdb} o not waste memory for this cache.
@item @code{bdb_lock_max}
The maximum number of locks (1000 by default) you can have active on a BDB
table. You should increase this if you get errors of type
@code{bdb: Lock table is out of available locks} when you have do long
transactions or when mysqld has to examine a lot of rows to calculate
the query.
@item @code{concurrent_inserts}
If @code{ON} (the default), @strong{MySQL} will allow you to use @code{INSERT}
on @code{MyISAM} tables at the same time as you run @code{SELECT} queries
@@ -24674,7 +24682,7 @@ limits. Here are some examples:
@multitable @columnfractions .5 .5
@item @strong{Operating System} @tab @strong{File Size Limit}
@item Linux-Intel @tab 2G (or 4G with reiserfs)
@item Linux-Intel 32 bit@tab 2G, 4G or bigger depending on Linux version
@item Linux-Alpha @tab 8T (?)
@item Solaris 2.5.1 @tab 2G (possible 4G with patch)
@item Solaris 2.6 @tab 4G
@@ -24682,6 +24690,10 @@ limits. Here are some examples:
@item Solaris 2.7 ULTRA-SPARC @tab 8T (?)
@end multitable
On Linux 2.2 you can get bigger tables than 2G by using the LFS patch for
the ext2 file system.  On Linux 2.4 there exists also patches for ReiserFS
to get support for big files.
This means that the table size for @strong{MySQL} is normally limited by
the operating system.
@@ -24695,14 +24707,15 @@ this), you should set the @code{AVG_ROW_LENGTH} and @code{MAX_ROWS}
parameter when you create your table.  @xref{CREATE TABLE}.  You can
also set these later with @code{ALTER TABLE}. @xref{ALTER TABLE}.
If you need to have bigger tables than 2G / 4G
If your big table is going to be read-only, you could use
@code{myisampack} to merge and compress many tables to one.
@code{myisampack} usually compresses a table by at least 50%, so you can
have, in effect, much bigger tables.  @xref{myisampack, ,
@code{myisampack}}.
You can go around the operating system file limit for @code{MyISAM} data
files by using the @code{RAID} option. @xref{CREATE TABLE}.
Another solution can be the included MERGE library, which allows you to
handle a collection of identical tables as one. @xref{MERGE, MERGE
tables}.
@@ -25435,7 +25448,9 @@ multiple CPU machines one should use Solaris (because the threads works
really nice) or Linux (because the 2.2 kernel has really good SMP
support). Also on 32bit machines Linux has a 2G file size limit by
default. Hopefully this will be fixed soon when new filesystems is
released (XFS/Reiserfs).
released (XFS/Reiserfs).  If you have a desperate need for files bigger
tan 2G on Linux-intel 32 bit, you should get the LFS patch for the ext2
file system.
Because we have not run @strong{MySQL} in production on that many platforms we
advice you to test your intended platform before choosing it, if possible.
@@ -38141,6 +38156,7 @@ version. The replication and BerkeleyDB code is still under development,
though, so 3.23 is not released as a stable version yet.
@menu
* News-3.23.28::                Changes in release 3.23.28
* News-3.23.27::                Changes in release 3.23.27
* News-3.23.26::                Changes in release 3.23.26
* News-3.23.25::                Changes in release 3.23.25
@@ -38171,7 +38187,16 @@ though, so 3.23 is not released as a stable version yet.
* News-3.23.0::                 Changes in release 3.23.0
@end menu
@node News-3.23.27, News-3.23.26, News-3.23.x, News-3.23.x
@node News-3.23.28, News-3.23.27, News-3.23.x, News-3.23.x
@appendixsubsec Changes in release 3.23.28
@itemize @bullet
@item
Fixed bug in a BDB key compare function when comparing part keys.
@item
Added variable @code{bdb_lock_max} to @code{mysqld}.
@end itemize
@node News-3.23.27, News-3.23.26, News-3.23.28, News-3.23.x
@appendixsubsec Changes in release 3.23.27
@itemize @bullet
@item
+0 −1
Original line number Diff line number Diff line
@@ -1378,7 +1378,6 @@ void mi_check_print_info(MI_CHECK *param __attribute__((unused)),
  VOID(vfprintf(stdout, fmt, args));
  VOID(fputc('\n',stdout));
  va_end(args);
  return;
}

/* VARARGS */
+8 KiB (172 KiB)

File changed.

No diff preview for this file type.

+65 −0
Original line number Diff line number Diff line
@@ -3439,6 +3439,23 @@ int Field_string::pack_cmp(const char *a, const char *b, uint length)
}


int Field_string::pack_cmp(const char *b, uint length)
{
  uint b_length= (uint) (uchar) *b++;
  char *end= ptr + field_length;
  while (end > ptr && end[-1] == ' ')
    end--;
  uint a_length = (uint) (end - ptr);

  if (binary_flag)
  {
    int cmp= memcmp(ptr,b,min(a_length,b_length));
    return cmp ? cmp : (int) (a_length - b_length);
  }
  return my_sortncmp(ptr,a_length, b, b_length);
}


uint Field_string::packed_col_length(const char *ptr)
{
  if (field_length > 255)
@@ -3637,6 +3654,27 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length)
  return my_sortncmp(a,a_length, b,b_length);
}

int Field_varstring::pack_cmp(const char *b, uint key_length)
{
  char *a=ptr+2;
  uint a_length=uint2korr(ptr);
  uint b_length;
  if (key_length > 255)
  {
    b_length=uint2korr(b); b+=2;
  }
  else
  {
    b_length= (uint) (uchar) *b++;
  }
  if (binary_flag)
  {
    int cmp= memcmp(a,b,min(a_length,b_length));
    return cmp ? cmp : (int) (a_length - b_length);
  }
  return my_sortncmp(a,a_length, b,b_length);
}

uint Field_varstring::packed_col_length(const char *ptr)
{
  if (field_length > 255)
@@ -4019,6 +4057,33 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length)
  return my_sortncmp(a,a_length, b,b_length);
}


int Field_blob::pack_cmp(const char *b, uint key_length)
{
  char *a;
  memcpy_fixed(&a,ptr+packlength,sizeof(char*));
  if (!a)
    return key_length > 0 ? -1 : 0;
  uint a_length=get_length(ptr);
  uint b_length;

  if (key_length > 255)
  {
    b_length=uint2korr(b); b+=2;
  }
  else
  {
    b_length= (uint) (uchar) *b++;
  }
  if (binary_flag)
  {
    int cmp= memcmp(a,b,min(a_length,b_length));
    return cmp ? cmp : (int) (a_length - b_length);
  }
  return my_sortncmp(a,a_length, b,b_length);
}


char *Field_blob::pack_key(char *to, const char *from, uint max_length)
{
  uint length=uint2korr(to);
Loading