roxen.lists.roxen.general

Subject Author Date
Re: Tiny fix included (Re: [PATCH 14/17] New module: gzip-on-the-fly) Stephen R. van den Berg <srb[at]cuci[dot]nl> 30-01-2009
Martin Jonsson wrote:
>Stephen R. van den Berg wrote:
>>The implementation looks rather good.  Excellent work.
>>Maybe one minor fixup:

>Thanks! I applied your patch yesterday, even though it seems Pike 
>doesn't really care about the 'b' mode flag. It shouldn't hurt to be 

Not even under Windows?  (It should, I never tested it though).

>Are you satisfied with the available configuration settings?

Excellent.  Everything I need is there.

Two things:
a. I would suggest changing the defaults to the ones present in the patch
   below.  The rationale is that for typical small setups the presented
   defaults have the largest relative benefit and the smallest impact on
   CPU or memory.  If the setup is CPU bound, the administrator is likely
   to be more experienced and will be perfectly capable of tuning the
   parameters to satisfy his/her needs.  If the setup wants a few extra
   percent in compression at the expense of more than ten times the
   CPU time, the compression level can be turned up by that same
   knowledgeable webadmin.
b. When the data is not actually shrinking after compression, do not
   use the "compressed" blob.

commit 9cb159e1aaffff707386e9fbfe5705be4ccea58e
Author: Stephen R. van den Berg <<srb[at]cuci.nl>>
Date:   Fri Jan 30 09:30:18 2009 +0100

    Tune HTTP compression

diff --git a/server/base_server/configuration.pike
b/server/base_server/configuration.pike
index 4cdf3a8..8eb663b 100644
--- a/server/base_server/configuration.pike
+++ b/server/base_server/configuration.pike
@@ -4943,7 +4943,7 @@ low."))->add_changed_callback(lambda(object v)
 			   { http_compr_minlen = v->query(); });
   http_compr_minlen = query("http_compression_min_size");
 
-  defvar("http_compression_max_size", 1048576,
+  defvar("http_compression_max_size", 0,
 	 DLOCALE(0, "Compression: Maximum content size"),
 	 TYPE_INT,
 	 DLOCALE(0, "The maximum file size for which to enable compression. "
@@ -4957,7 +4957,7 @@ low."))->add_changed_callback(lambda(object v)
   http_compr_maxlen = query("http_compression_max_size");
 
   Variable.Int comp_level = 
-    Variable.Int(5, 0, DLOCALE(0, "Compression: Compression level"),
+    Variable.Int(1, 0, DLOCALE(0, "Compression: Compression level"),
 		 DLOCALE(0, "The compression level to use (integer between 1 "
 			 "and 9). Higher number means more compression at the"
 			 " cost of processing power and vice versa. You may "
@@ -4966,7 +4966,7 @@ low."))->add_changed_callback(lambda(object v)
   comp_level->set_range(1, 9);
   defvar("http_compression_level", comp_level);
 		 
-  defvar("http_compression_dynamic_reqs", 0,
+  defvar("http_compression_dynamic_reqs", 1,
 	 DLOCALE(0, "Compression: Compress dynamic requests"),
 	 TYPE_FLAG,
 	 DLOCALE(0, "If enabled, even requests that aren't cacheable in the "
diff --git a/server/protocols/http.pike b/server/protocols/http.pike
index 94f00f2..21b0aea 100644
--- a/server/protocols/http.pike
+++ b/server/protocols/http.pike
@@ -2051,7 +2051,9 @@ private string try_gzip_data(string data, string mimetype)
       compress_main_mimetypes[main_type])) &&
      len >= min_data_length && 
      (!max_data_length || len <= max_data_length)) {
-    return gzip_data(data);
+    data = gzip_data(data);
+    if(len>sizeof(data))
+      return data;
   }
   return 0;
 }
-- 
Sincerely,
           Stephen R. van den Berg.
"Papers in string theory are published at a rate above the speed of light.
 This is no problem since no information is being transmitted." -- H. Kleinert