[PATCH] printk: drivers/char/ftape/compressor/zftape-compress.c
[linux-2.6] / drivers / char / ftape / compressor / zftape-compress.c
1 /*
2  *      Copyright (C) 1994-1997 Claus-Justus Heine
3
4  This program is free software; you can redistribute it and/or
5  modify it under the terms of the GNU General Public License as
6  published by the Free Software Foundation; either version 2, or (at
7  your option) any later version.
8  
9  This program is distributed in the hope that it will be useful, but
10  WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  General Public License for more details.
13  
14  You should have received a copy of the GNU General Public License
15  along with this program; see the file COPYING.  If not, write to
16  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  USA.
18  
19  *
20  *     This file implements a "generic" interface between the *
21  *     zftape-driver and a compression-algorithm. The *
22  *     compression-algorithm currently used is a LZ77. I use the *
23  *     implementation lzrw3 by Ross N. Williams (Renaissance *
24  *     Software). The compression program itself is in the file
25  *     lzrw3.c * and lzrw3.h.  To adopt another compression algorithm
26  *     the functions * zft_compress() and zft_uncompress() must be
27  *     changed * appropriately. See below.
28  */
29
30 #include <linux/errno.h>
31 #include <linux/mm.h>
32 #include <linux/module.h>
33
34 #include <linux/zftape.h>
35
36 #include <asm/uaccess.h>
37
38 #include "../zftape/zftape-init.h"
39 #include "../zftape/zftape-eof.h"
40 #include "../zftape/zftape-ctl.h"
41 #include "../zftape/zftape-write.h"
42 #include "../zftape/zftape-read.h"
43 #include "../zftape/zftape-rw.h"
44 #include "../compressor/zftape-compress.h"
45 #include "../zftape/zftape-vtbl.h"
46 #include "../compressor/lzrw3.h"
47
48 /*
49  *   global variables
50  */
51
52 /* I handle the allocation of this buffer as a special case, because
53  * it's size varies depending on the tape length inserted.
54  */
55
56 /* local variables 
57  */
58 static void *zftc_wrk_mem = NULL;
59 static __u8 *zftc_buf     = NULL;
60 static void *zftc_scratch_buf  = NULL;
61
62 /* compression statistics 
63  */
64 static unsigned int zftc_wr_uncompressed = 0;
65 static unsigned int zftc_wr_compressed   = 0;
66 static unsigned int zftc_rd_uncompressed = 0;
67 static unsigned int zftc_rd_compressed   = 0;
68
69 /* forward */
70 static int  zftc_write(int *write_cnt,
71                        __u8 *dst_buf, const int seg_sz,
72                        const __u8 __user *src_buf, const int req_len,
73                        const zft_position *pos, const zft_volinfo *volume);
74 static int  zftc_read(int *read_cnt,
75                       __u8  __user *dst_buf, const int to_do,
76                       const __u8 *src_buf, const int seg_sz,
77                       const zft_position *pos, const zft_volinfo *volume);
78 static int  zftc_seek(unsigned int new_block_pos, 
79                       zft_position *pos, const zft_volinfo *volume,
80                       __u8 *buffer);
81 static void zftc_lock   (void);
82 static void zftc_reset  (void);
83 static void zftc_cleanup(void);
84 static void zftc_stats      (void);
85
86 /* compressed segment. This conforms to QIC-80-MC, Revision K.
87  * 
88  * Rev. K applies to tapes with `fixed length format' which is
89  * indicated by format code 2,3 and 5. See below for format code 4 and 6
90  *
91  * 2 bytes: offset of compression segment structure
92  *          29k > offset >= 29k-18: data from previous segment ens in this
93  *                                  segment and no compressed block starts
94  *                                  in this segment
95  *                     offset == 0: data from previous segment occupies entire
96  *                                  segment and continues in next segment
97  * n bytes: remainder from previous segment
98  * 
99  * Rev. K:  
100  * 4 bytes: 4 bytes: files set byte offset
101  * Post Rev. K and QIC-3020/3020:
102  * 8 bytes: 8 bytes: files set byte offset
103  * 2 bytes: byte count N (amount of data following)
104  *          bit 15 is set if data is compressed, bit 15 is not
105  *          set if data is uncompressed
106  * N bytes: data (as much as specified in the byte count)
107  * 2 bytes: byte count N_1 of next cluster
108  * N_1 bytes: data of next cluset
109  * 2 bytes: byte count N_2 of next cluster
110  * N_2 bytes: ...  
111  *
112  * Note that the `N' byte count accounts only for the bytes that in the
113  * current segment if the cluster spans to the next segment.
114  */
115
116 typedef struct
117 {
118         int cmpr_pos;             /* actual position in compression buffer */
119         int cmpr_sz;              /* what is left in the compression buffer
120                                    * when copying the compressed data to the
121                                    * deblock buffer
122                                    */
123         unsigned int first_block; /* location of header information in
124                                    * this segment
125                                    */
126         unsigned int count;       /* amount of data of current block
127                                    * contained in current segment 
128                                    */
129         unsigned int offset;      /* offset in current segment */
130         unsigned int spans:1;     /* might continue in next segment */
131         unsigned int uncmpr;      /* 0x8000 if this block contains
132                                    * uncompressed data 
133                                    */
134         __s64 foffs;              /* file set byte offset, same as in 
135                                    * compression map segment
136                                    */
137 } cmpr_info;
138
139 static cmpr_info cseg; /* static data. Must be kept uptodate and shared by 
140                         * read, write and seek functions
141                         */
142
143 #define DUMP_CMPR_INFO(level, msg, info)                                \
144         TRACE(level, msg "\n"                                           \
145               KERN_INFO "cmpr_pos   : %d\n"                             \
146               KERN_INFO "cmpr_sz    : %d\n"                             \
147               KERN_INFO "first_block: %d\n"                             \
148               KERN_INFO "count      : %d\n"                             \
149               KERN_INFO "offset     : %d\n"                             \
150               KERN_INFO "spans      : %d\n"                             \
151               KERN_INFO "uncmpr     : 0x%04x\n"                         \
152               KERN_INFO "foffs      : " LL_X,                           \
153               (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block,   \
154               (info)->count, (info)->offset, (info)->spans == 1,        \
155               (info)->uncmpr, LL((info)->foffs))
156
157 /*   dispatch compression segment info, return error code
158  *  
159  *   afterwards, cseg->offset points to start of data of the NEXT
160  *   compressed block, and cseg->count contains the amount of data
161  *   left in the actual compressed block. cseg->spans is set to 1 if
162  *   the block is continued in the following segment. Otherwise it is
163  *   set to 0. 
164  */
165 static int get_cseg (cmpr_info *cinfo, const __u8 *buff, 
166                      const unsigned int seg_sz,
167                      const zft_volinfo *volume)
168 {
169         TRACE_FUN(ft_t_flow);
170
171         cinfo->first_block = GET2(buff, 0);
172         if (cinfo->first_block == 0) { /* data spans to next segment */
173                 cinfo->count  = seg_sz - sizeof(__u16);
174                 cinfo->offset = seg_sz;
175                 cinfo->spans = 1;
176         } else { /* cluster definetely ends in this segment */
177                 if (cinfo->first_block > seg_sz) {
178                         /* data corrupted */
179                         TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
180                                     KERN_INFO "segment size: %d\n"
181                                     KERN_INFO "first block : %d",
182                                     seg_sz, cinfo->first_block);
183                 }
184                 cinfo->count  = cinfo->first_block - sizeof(__u16);
185                 cinfo->offset = cinfo->first_block;
186                 cinfo->spans = 0;
187         }
188         /* now get the offset the first block should have in the
189          * uncompressed data stream.
190          *
191          * For this magic `18' refer to CRF-3 standard or QIC-80MC,
192          * Rev. K.  
193          */
194         if ((seg_sz - cinfo->offset) > 18) {
195                 if (volume->qic113) { /* > revision K */
196                         TRACE(ft_t_data_flow, "New QIC-113 compliance");
197                         cinfo->foffs = GET8(buff, cinfo->offset);
198                         cinfo->offset += sizeof(__s64); 
199                 } else {
200                         TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
201                         cinfo->foffs   = (__s64)GET4(buff, cinfo->offset);
202                         cinfo->offset += sizeof(__u32); 
203                 }
204         }
205         if (cinfo->foffs > volume->size) {
206                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
207                             KERN_INFO "offset in current volume: %d\n"
208                             KERN_INFO "size of current volume  : %d",
209                             (int)(cinfo->foffs>>10), (int)(volume->size>>10));
210         }
211         if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
212                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
213                             KERN_INFO "block size : %d\n"
214                             KERN_INFO "data record: %d",
215                             volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
216         }
217         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
218         TRACE_EXIT 0;
219 }
220
221 /*  This one is called, when a new cluster starts in same segment.
222  *  
223  *  Note: if this is the first cluster in the current segment, we must
224  *  not check whether there are more than 18 bytes available because
225  *  this have already been done in get_cseg() and there may be less
226  *  than 18 bytes available due to header information.
227  * 
228  */
229 static void get_next_cluster(cmpr_info *cluster, const __u8 *buff, 
230                              const int seg_sz, const int finish)
231 {
232         TRACE_FUN(ft_t_flow);
233
234         if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
235                 cluster->count   = GET2(buff, cluster->offset);
236                 cluster->uncmpr  = cluster->count & 0x8000;
237                 cluster->count  -= cluster->uncmpr;
238                 cluster->offset += sizeof(__u16);
239                 cluster->foffs   = 0;
240                 if ((cluster->offset + cluster->count) < seg_sz) {
241                         cluster->spans = 0;
242                 } else if (cluster->offset + cluster->count == seg_sz) {
243                         cluster->spans = !finish;
244                 } else {
245                         /* either an error or a volume written by an 
246                          * old version. If this is a data error, then we'll
247                          * catch it later.
248                          */
249                         TRACE(ft_t_data_flow, "Either error or old volume");
250                         cluster->spans = 1;
251                         cluster->count = seg_sz - cluster->offset;
252                 }
253         } else {
254                 cluster->count = 0;
255                 cluster->spans = 0;
256                 cluster->foffs = 0;
257         }
258         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
259         TRACE_EXIT;
260 }
261
262 static void zftc_lock(void)
263 {
264 }
265
266 /*  this function is needed for zftape_reset_position in zftape-io.c 
267  */
268 static void zftc_reset(void)
269 {
270         TRACE_FUN(ft_t_flow);
271
272         memset((void *)&cseg, '\0', sizeof(cseg));
273         zftc_stats();
274         TRACE_EXIT;
275 }
276
277 static int cmpr_mem_initialized = 0;
278 static unsigned int alloc_blksz = 0;
279
280 static int zft_allocate_cmpr_mem(unsigned int blksz)
281 {
282         TRACE_FUN(ft_t_flow);
283
284         if (cmpr_mem_initialized && blksz == alloc_blksz) {
285                 TRACE_EXIT 0;
286         }
287         TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
288                     zftc_cleanup());
289         TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
290                     zftc_cleanup());
291         alloc_blksz = blksz;
292         TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
293                     zftc_cleanup());
294         cmpr_mem_initialized = 1;
295         TRACE_EXIT 0;
296 }
297
298 static void zftc_cleanup(void)
299 {
300         TRACE_FUN(ft_t_flow);
301
302         zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
303         zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
304         zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
305         cmpr_mem_initialized = alloc_blksz = 0;
306         TRACE_EXIT;
307 }
308
309 /*****************************************************************************
310  *                                                                           *
311  *  The following two functions "ftape_compress()" and                       *
312  *  "ftape_uncompress()" are the interface to the actual compression         *
313  *  algorithm (i.e. they are calling the "compress()" function from          *
314  *  the lzrw3 package for now). These routines could quite easily be         *
315  *  changed to adopt another compression algorithm instead of lzrw3,         *
316  *  which currently is used.                                                 *
317  *                                                                           *
318  *****************************************************************************/
319
320 /* called by zft_compress_write() to perform the compression. Must
321  * return the size of the compressed data.
322  *
323  * NOTE: The size of the compressed data should not exceed the size of
324  *       the uncompressed data. Most compression algorithms have means
325  *       to store data unchanged if the "compressed" data amount would
326  *       exceed the original one. Mostly this is done by storing some
327  *       flag-bytes in front of the compressed data to indicate if it
328  *       is compressed or not. Thus the worst compression result
329  *       length is the original length plus those flag-bytes.
330  *
331  *       We don't want that, as the QIC-80 standard provides a means
332  *       of marking uncompressed blocks by simply setting bit 15 of
333  *       the compressed block's length. Thus a compessed block can
334  *       have at most a length of 2^15-1 bytes. The QIC-80 standard
335  *       restricts the block-length even further, allowing only 29k -
336  *       6 bytes.
337  *
338  *       Currently, the maximum blocksize used by zftape is 28k.
339  *
340  *       In short: don't exceed the length of the input-package, set
341  *       bit 15 of the compressed size to 1 if you have copied data
342  *       instead of compressing it.
343  */
344 static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
345
346         __s32 compressed_sz;
347         TRACE_FUN(ft_t_flow);
348         
349
350         lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
351                        in_buffer, in_sz, out_buffer, &compressed_sz);
352         if (TRACE_LEVEL >= ft_t_info) {
353                 /*  the compiler will optimize this away when
354                  *  compiled with NO_TRACE_AT_ALL option
355                  */
356                 TRACE(ft_t_data_flow, "\n"
357                       KERN_INFO "before compression: %d bytes\n"
358                       KERN_INFO "after compresison : %d bytes", 
359                       in_sz, 
360                       (int)(compressed_sz < 0 
361                       ? -compressed_sz : compressed_sz));
362                 /*  for statistical purposes
363                  */
364                 zftc_wr_compressed   += (compressed_sz < 0 
365                                            ? -compressed_sz : compressed_sz);
366                 zftc_wr_uncompressed += in_sz;
367         }
368         TRACE_EXIT (int)compressed_sz;
369 }
370
371 /* called by zft_compress_read() to decompress the data. Must
372  * return the size of the decompressed data for sanity checks
373  * (compared with zft_blk_sz)
374  *
375  * NOTE: Read the note for zft_compress() above!  If bit 15 of the
376  *       parameter in_sz is set, then the data in in_buffer isn't
377  *       compressed, which must be handled by the un-compression
378  *       algorithm. (I changed lzrw3 to handle this.)
379  *
380  *  The parameter max_out_sz is needed to prevent buffer overruns when 
381  *  uncompressing corrupt data.
382  */
383 static unsigned int zft_uncompress(__u8 *in_buffer, 
384                                    int in_sz, 
385                                    __u8 *out_buffer,
386                                    unsigned int max_out_sz)
387
388         TRACE_FUN(ft_t_flow);
389         
390         lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
391                        in_buffer, (__s32)in_sz,
392                        out_buffer, (__u32 *)&max_out_sz);
393         
394         if (TRACE_LEVEL >= ft_t_info) {
395                 TRACE(ft_t_data_flow, "\n"
396                       KERN_INFO "before decompression: %d bytes\n"
397                       KERN_INFO "after decompression : %d bytes", 
398                       in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
399                 /*  for statistical purposes
400                  */
401                 zftc_rd_compressed   += in_sz < 0 ? -in_sz : in_sz;
402                 zftc_rd_uncompressed += max_out_sz;
403         }
404         TRACE_EXIT (unsigned int)max_out_sz;
405 }
406
407 /* print some statistics about the efficiency of the compression to
408  * the kernel log 
409  */
410 static void zftc_stats(void)
411 {
412         TRACE_FUN(ft_t_flow);
413
414         if (TRACE_LEVEL < ft_t_info) {
415                 TRACE_EXIT;
416         }
417         if (zftc_wr_uncompressed != 0) {
418                 if (zftc_wr_compressed > (1<<14)) {
419                         TRACE(ft_t_info, "compression statistics (writing):\n"
420                               KERN_INFO " compr./uncmpr.   : %3d %%",
421                               (((zftc_wr_compressed>>10) * 100)
422                                / (zftc_wr_uncompressed>>10)));
423                 } else {
424                         TRACE(ft_t_info, "compression statistics (writing):\n"
425                               KERN_INFO " compr./uncmpr.   : %3d %%",
426                               ((zftc_wr_compressed * 100)
427                                / zftc_wr_uncompressed));
428                 }
429         }
430         if (zftc_rd_uncompressed != 0) {
431                 if (zftc_rd_compressed > (1<<14)) {
432                         TRACE(ft_t_info, "compression statistics (reading):\n"
433                               KERN_INFO " compr./uncmpr.   : %3d %%",
434                               (((zftc_rd_compressed>>10) * 100)
435                                / (zftc_rd_uncompressed>>10)));
436                 } else {
437                         TRACE(ft_t_info, "compression statistics (reading):\n"
438                               KERN_INFO " compr./uncmpr.   : %3d %%",
439                               ((zftc_rd_compressed * 100)
440                                / zftc_rd_uncompressed));
441                 }
442         }
443         /* only print it once: */
444         zftc_wr_uncompressed = 
445                 zftc_wr_compressed  =
446                 zftc_rd_uncompressed =
447                 zftc_rd_compressed   = 0;
448         TRACE_EXIT;
449 }
450
451 /* start new compressed block 
452  */
453 static int start_new_cseg(cmpr_info *cluster, 
454                           char *dst_buf, 
455                           const zft_position *pos,
456                           const unsigned int blk_sz,
457                           const char *src_buf,
458                           const int this_segs_sz,
459                           const int qic113)
460 {
461         int size_left;
462         int cp_cnt;
463         int buf_pos;
464         TRACE_FUN(ft_t_flow);
465
466         size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
467         TRACE(ft_t_data_flow,"\n" 
468               KERN_INFO "segment size   : %d\n"
469               KERN_INFO "compressed_sz: %d\n"
470               KERN_INFO "size_left      : %d",
471               this_segs_sz, cluster->cmpr_sz, size_left);
472         if (size_left > 18) { /* start a new cluseter */
473                 cp_cnt = cluster->cmpr_sz;
474                 cluster->cmpr_sz = 0;
475                 buf_pos = cp_cnt + sizeof(__u16);
476                 PUT2(dst_buf, 0, buf_pos);
477
478                 if (qic113) {
479                         __s64 foffs = pos->volume_pos;
480                         if (cp_cnt) foffs += (__s64)blk_sz;
481
482                         TRACE(ft_t_data_flow, "new style QIC-113 header");
483                         PUT8(dst_buf, buf_pos, foffs);
484                         buf_pos += sizeof(__s64);
485                 } else {
486                         __u32 foffs = (__u32)pos->volume_pos;
487                         if (cp_cnt) foffs += (__u32)blk_sz;
488                         
489                         TRACE(ft_t_data_flow, "old style QIC-80MC header");
490                         PUT4(dst_buf, buf_pos, foffs);
491                         buf_pos += sizeof(__u32);
492                 }
493         } else if (size_left >= 0) {
494                 cp_cnt = cluster->cmpr_sz;
495                 cluster->cmpr_sz = 0;
496                 buf_pos = cp_cnt + sizeof(__u16);
497                 PUT2(dst_buf, 0, buf_pos);  
498                 /* zero unused part of segment. */
499                 memset(dst_buf + buf_pos, '\0', size_left);
500                 buf_pos = this_segs_sz;
501         } else { /* need entire segment and more space */
502                 PUT2(dst_buf, 0, 0); 
503                 cp_cnt = this_segs_sz - sizeof(__u16);
504                 cluster->cmpr_sz  -= cp_cnt;
505                 buf_pos = this_segs_sz;
506         }
507         memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
508         cluster->cmpr_pos += cp_cnt;
509         TRACE_EXIT buf_pos;
510 }
511
512 /* return-value: the number of bytes removed from the user-buffer
513  *               `src_buf' or error code
514  *
515  *  int *write_cnt           : how much actually has been moved to the
516  *                             dst_buf. Need not be initialized when
517  *                             function returns with an error code
518  *                             (negativ return value) 
519  *  __u8 *dst_buf            : kernel space buffer where the has to be
520  *                             copied to. The contents of this buffers
521  *                             goes to a specific segment.
522  *  const int seg_sz         : the size of the segment dst_buf will be
523  *                             copied to.
524  *  const zft_position *pos  : struct containing the coordinates in
525  *                             the current volume (byte position,
526  *                             segment id of current segment etc)
527  *  const zft_volinfo *volume: information about the current volume,
528  *                             size etc.
529  *  const __u8 *src_buf      : user space buffer that contains the
530  *                             data the user wants to be written to
531  *                             tape.
532  *  const int req_len        : the amount of data the user wants to be
533  *                             written to tape.
534  */
535 static int zftc_write(int *write_cnt,
536                       __u8 *dst_buf, const int seg_sz,
537                       const __u8 __user *src_buf, const int req_len,
538                       const zft_position *pos, const zft_volinfo *volume)
539 {
540         int req_len_left = req_len;
541         int result;
542         int len_left;
543         int buf_pos_write = pos->seg_byte_pos;
544         TRACE_FUN(ft_t_flow);
545         
546         /* Note: we do not unlock the module because
547          * there are some values cached in that `cseg' variable.  We
548          * don't don't want to use this information when being
549          * unloaded by kerneld even when the tape is full or when we
550          * cannot allocate enough memory.
551          */
552         if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
553                 TRACE_EXIT -ENOSPC;
554         }    
555         if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
556                 /* should we unlock the module? But it shouldn't 
557                  * be locked anyway ...
558                  */
559                 TRACE_EXIT -ENOMEM;
560         }
561         if (buf_pos_write == 0) { /* fill a new segment */
562                 *write_cnt = buf_pos_write = start_new_cseg(&cseg,
563                                                             dst_buf,
564                                                             pos,
565                                                             volume->blk_sz,
566                                                             zftc_buf, 
567                                                             seg_sz,
568                                                             volume->qic113);
569                 if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
570                         req_len_left -= result = volume->blk_sz;
571                         cseg.cmpr_pos  = 0;
572                 } else {
573                         result = 0;
574                 }
575         } else {
576                 *write_cnt = result = 0;
577         }
578         
579         len_left = seg_sz - buf_pos_write;
580         while ((req_len_left > 0) && (len_left > 18)) {
581                 /* now we have some size left for a new compressed
582                  * block.  We know, that the compression buffer is
583                  * empty (else there wouldn't be any space left).  
584                  */
585                 if (copy_from_user(zftc_scratch_buf, src_buf + result, 
586                                    volume->blk_sz) != 0) {
587                         TRACE_EXIT -EFAULT;
588                 }
589                 req_len_left -= volume->blk_sz;
590                 cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz, 
591                                             zftc_buf);
592                 if (cseg.cmpr_sz < 0) {
593                         cseg.uncmpr = 0x8000;
594                         cseg.cmpr_sz = -cseg.cmpr_sz;
595                 } else {
596                         cseg.uncmpr = 0;
597                 }
598                 /* increment "result" iff we copied the entire
599                  * compressed block to the zft_deblock_buf 
600                  */
601                 len_left -= sizeof(__u16);
602                 if (len_left >= cseg.cmpr_sz) {
603                         len_left -= cseg.count = cseg.cmpr_sz;
604                         cseg.cmpr_pos = cseg.cmpr_sz = 0;
605                         result += volume->blk_sz;
606                 } else {
607                         cseg.cmpr_sz       -= 
608                                 cseg.cmpr_pos =
609                                 cseg.count    = len_left;
610                         len_left = 0;
611                 }
612                 PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
613                 buf_pos_write += sizeof(__u16);
614                 memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
615                 buf_pos_write += cseg.count;
616                 *write_cnt    += cseg.count + sizeof(__u16);
617                 FT_SIGNAL_EXIT(_DONT_BLOCK);
618         }
619         /* erase the remainder of the segment if less than 18 bytes
620          * left (18 bytes is due to the QIC-80 standard) 
621          */
622         if (len_left <= 18) {
623                 memset(dst_buf + buf_pos_write, '\0', len_left);
624                 (*write_cnt) += len_left;
625         }
626         TRACE(ft_t_data_flow, "returning %d", result);
627         TRACE_EXIT result;
628 }   
629
630 /* out:
631  *
632  * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
633  *                (result)
634  * int *to_do   : the remaining size of the read-request.
635  *
636  * in:
637  *
638  * char *buff          : buff is the address of the upper part of the user
639  *                       buffer, that hasn't been filled with data yet.
640
641  * int buf_pos_read    : copy of from _ftape_read()
642  * int buf_len_read    : copy of buf_len_rd from _ftape_read()
643  * char *zft_deblock_buf: zft_deblock_buf
644  * unsigned short blk_sz: the block size valid for this volume, may differ
645  *                            from zft_blk_sz.
646  * int finish: if != 0 means that this is the last segment belonging
647  *  to this volume
648  * returns the amount of data actually copied to the user-buffer
649  *
650  * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
651  * be set to 0 
652  */
653 static int zftc_read (int *read_cnt, 
654                       __u8  __user *dst_buf, const int to_do, 
655                       const __u8 *src_buf, const int seg_sz, 
656                       const zft_position *pos, const zft_volinfo *volume)
657 {          
658         int uncompressed_sz;         
659         int result = 0;
660         int remaining = to_do;
661         TRACE_FUN(ft_t_flow);
662
663         TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
664         if (pos->seg_byte_pos == 0) {
665                 /* new segment just read
666                  */
667                 TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
668                             *read_cnt = 0);
669                 memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16), 
670                        cseg.count);
671                 cseg.cmpr_pos += cseg.count;
672                 *read_cnt      = cseg.offset;
673                 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
674         } else {
675                 *read_cnt = 0;
676         }
677         /* loop and uncompress until user buffer full or
678          * deblock-buffer empty 
679          */
680         TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
681               cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
682         while ((cseg.spans == 0) && (remaining > 0)) {
683                 if (cseg.cmpr_pos  != 0) { /* cmpr buf is not empty */
684                         uncompressed_sz = 
685                                 zft_uncompress(zftc_buf,
686                                                cseg.uncmpr == 0x8000 ?
687                                                -cseg.cmpr_pos : cseg.cmpr_pos,
688                                                zftc_scratch_buf,
689                                                volume->blk_sz);
690                         if (uncompressed_sz != volume->blk_sz) {
691                                 *read_cnt = 0;
692                                 TRACE_ABORT(-EIO, ft_t_warn,
693                                       "Uncompressed blk (%d) != blk size (%d)",
694                                       uncompressed_sz, volume->blk_sz);
695                         }       
696                         if (copy_to_user(dst_buf + result, 
697                                          zftc_scratch_buf, 
698                                          uncompressed_sz) != 0 ) {
699                                 TRACE_EXIT -EFAULT;
700                         }
701                         remaining      -= uncompressed_sz;
702                         result     += uncompressed_sz;
703                         cseg.cmpr_pos  = 0;
704                 }                                              
705                 if (remaining > 0) {
706                         get_next_cluster(&cseg, src_buf, seg_sz, 
707                                          volume->end_seg == pos->seg_pos);
708                         if (cseg.count != 0) {
709                                 memcpy(zftc_buf, src_buf + cseg.offset,
710                                        cseg.count);
711                                 cseg.cmpr_pos = cseg.count;
712                                 cseg.offset  += cseg.count;
713                                 *read_cnt += cseg.count + sizeof(__u16);
714                         } else {
715                                 remaining = 0;
716                         }
717                 }
718                 TRACE(ft_t_data_flow, "\n" 
719                       KERN_INFO "compressed_sz: %d\n"
720                       KERN_INFO "compos       : %d\n"
721                       KERN_INFO "*read_cnt    : %d",
722                       cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
723         }
724         if (seg_sz - cseg.offset <= 18) {
725                 *read_cnt += seg_sz - cseg.offset;
726                 TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
727         }
728         TRACE(ft_t_data_flow, "\n"
729               KERN_INFO "segment size   : %d\n"
730               KERN_INFO "read count     : %d\n"
731               KERN_INFO "buf_pos_read   : %d\n"
732               KERN_INFO "remaining      : %d",
733                 seg_sz, *read_cnt, pos->seg_byte_pos, 
734                 seg_sz - *read_cnt - pos->seg_byte_pos);
735         TRACE(ft_t_data_flow, "returning: %d", result);
736         TRACE_EXIT result;
737 }                
738
739 /* seeks to the new data-position. Reads sometimes a segment.
740  *  
741  * start_seg and end_seg give the boundaries of the current volume
742  * blk_sz is the blk_sz of the current volume as stored in the
743  * volume label
744  *
745  * We don't allow blocksizes less than 1024 bytes, therefore we don't need
746  * a 64 bit argument for new_block_pos.
747  */
748
749 static int seek_in_segment(const unsigned int to_do, cmpr_info  *c_info,
750                            const char *src_buf, const int seg_sz, 
751                            const int seg_pos, const zft_volinfo *volume);
752 static int slow_seek_forward_until_error(const unsigned int distance,
753                                          cmpr_info *c_info, zft_position *pos, 
754                                          const zft_volinfo *volume, __u8 *buf);
755 static int search_valid_segment(unsigned int segment,
756                                 const unsigned int end_seg,
757                                 const unsigned int max_foffs,
758                                 zft_position *pos, cmpr_info *c_info,
759                                 const zft_volinfo *volume, __u8 *buf);
760 static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
761                              zft_position *pos, const zft_volinfo *volume,
762                              __u8 *buf);
763 static int compute_seg_pos(unsigned int dest, zft_position *pos,
764                            const zft_volinfo *volume);
765
766 #define ZFT_SLOW_SEEK_THRESHOLD  10 /* segments */
767 #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
768 #define ZFT_FAST_SEEK_BACKUP     10 /* segments */
769
770 static int zftc_seek(unsigned int new_block_pos,
771                      zft_position *pos, const zft_volinfo *volume, __u8 *buf)
772 {
773         unsigned int dest;
774         int limit;
775         int distance;
776         int result = 0;
777         int seg_dist;
778         int new_seg;
779         int old_seg = 0;
780         int fast_seek_trials = 0;
781         TRACE_FUN(ft_t_flow);
782
783         if (new_block_pos == 0) {
784                 pos->seg_pos      = volume->start_seg;
785                 pos->seg_byte_pos = 0;
786                 pos->volume_pos   = 0;
787                 zftc_reset();
788                 TRACE_EXIT 0;
789         }
790         dest = new_block_pos * (volume->blk_sz >> 10);
791         distance = dest - (pos->volume_pos >> 10);
792         while (distance != 0) {
793                 seg_dist = compute_seg_pos(dest, pos, volume);
794                 TRACE(ft_t_noise, "\n"
795                       KERN_INFO "seg_dist: %d\n"
796                       KERN_INFO "distance: %d\n"
797                       KERN_INFO "dest    : %d\n"
798                       KERN_INFO "vpos    : %d\n"
799                       KERN_INFO "seg_pos : %d\n"
800                       KERN_INFO "trials  : %d",
801                       seg_dist, distance, dest,
802                       (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
803                       fast_seek_trials);
804                 if (distance > 0) {
805                         if (seg_dist < 0) {
806                                 TRACE(ft_t_bug, "BUG: distance %d > 0, "
807                                       "segment difference %d < 0",
808                                       distance, seg_dist);
809                                 result = -EIO;
810                                 break;
811                         }
812                         new_seg = pos->seg_pos + seg_dist;
813                         if (new_seg > volume->end_seg) {
814                                 new_seg = volume->end_seg;
815                         }
816                         if (old_seg == new_seg || /* loop */
817                             seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
818                             fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
819                                 TRACE(ft_t_noise, "starting slow seek:\n"
820                                    KERN_INFO "fast seek failed too often: %s\n"
821                                    KERN_INFO "near target position      : %s\n"
822                                    KERN_INFO "looping between two segs  : %s",
823                                       (fast_seek_trials >= 
824                                        ZFT_FAST_SEEK_MAX_TRIALS)
825                                       ? "yes" : "no",
826                                       (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD) 
827                                       ? "yes" : "no",
828                                       (old_seg == new_seg)
829                                       ? "yes" : "no");
830                                 result = slow_seek_forward(dest, &cseg, 
831                                                            pos, volume, buf);
832                                 break;
833                         }
834                         old_seg = new_seg;
835                         limit = volume->end_seg;
836                         fast_seek_trials ++;
837                         for (;;) {
838                                 result = search_valid_segment(new_seg, limit,
839                                                               volume->size,
840                                                               pos, &cseg,
841                                                               volume, buf);
842                                 if (result == 0 || result == -EINTR) {
843                                         break;
844                                 }
845                                 if (new_seg == volume->start_seg) {
846                                         result = -EIO; /* set errror 
847                                                         * condition
848                                                         */
849                                         break;
850                                 }
851                                 limit    = new_seg;
852                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
853                                 if (new_seg < volume->start_seg) {
854                                         new_seg = volume->start_seg;
855                                 }
856                         }
857                         if (result < 0) {
858                                 TRACE(ft_t_warn,
859                                       "Couldn't find a readable segment");
860                                 break;
861                         }
862                 } else /* if (distance < 0) */ {
863                         if (seg_dist > 0) {
864                                 TRACE(ft_t_bug, "BUG: distance %d < 0, "
865                                       "segment difference %d >0",
866                                       distance, seg_dist);
867                                 result = -EIO;
868                                 break;
869                         }
870                         new_seg = pos->seg_pos + seg_dist;
871                         if (fast_seek_trials > 0 && seg_dist == 0) {
872                                 /* this avoids sticking to the same
873                                  * segment all the time. On the other hand:
874                                  * if we got here for the first time, and the
875                                  * deblock_buffer still contains a valid
876                                  * segment, then there is no need to skip to 
877                                  * the previous segment if the desired position
878                                  * is inside this segment.
879                                  */
880                                 new_seg --;
881                         }
882                         if (new_seg < volume->start_seg) {
883                                 new_seg = volume->start_seg;
884                         }
885                         limit   = pos->seg_pos;
886                         fast_seek_trials ++;
887                         for (;;) {
888                                 result = search_valid_segment(new_seg, limit,
889                                                               pos->volume_pos,
890                                                               pos, &cseg,
891                                                               volume, buf);
892                                 if (result == 0 || result == -EINTR) {
893                                         break;
894                                 }
895                                 if (new_seg == volume->start_seg) {
896                                         result = -EIO; /* set errror 
897                                                         * condition
898                                                         */
899                                         break;
900                                 }
901                                 limit    = new_seg;
902                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
903                                 if (new_seg < volume->start_seg) {
904                                         new_seg = volume->start_seg;
905                                 }
906                         }
907                         if (result < 0) {
908                                 TRACE(ft_t_warn,
909                                       "Couldn't find a readable segment");
910                                 break;
911                         }
912                 }
913                 distance = dest - (pos->volume_pos >> 10);
914         }
915         TRACE_EXIT result;
916 }
917
918
919 /*  advance inside the given segment at most to_do bytes.
920  *  of kilobytes moved
921  */
922
923 static int seek_in_segment(const unsigned int to_do,
924                            cmpr_info  *c_info,
925                            const char *src_buf, 
926                            const int seg_sz, 
927                            const int seg_pos,
928                            const zft_volinfo *volume)
929 {
930         int result = 0;
931         int blk_sz = volume->blk_sz >> 10;
932         int remaining = to_do;
933         TRACE_FUN(ft_t_flow);
934
935         if (c_info->offset == 0) {
936                 /* new segment just read
937                  */
938                 TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
939                 c_info->cmpr_pos += c_info->count;
940                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
941         }
942         /* loop and uncompress until user buffer full or
943          * deblock-buffer empty 
944          */
945         TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
946               c_info->cmpr_sz, c_info->cmpr_pos);
947         while (c_info->spans == 0 && remaining > 0) {
948                 if (c_info->cmpr_pos  != 0) { /* cmpr buf is not empty */
949                         result       += blk_sz;
950                         remaining    -= blk_sz;
951                         c_info->cmpr_pos = 0;
952                 }
953                 if (remaining > 0) {
954                         get_next_cluster(c_info, src_buf, seg_sz, 
955                                          volume->end_seg == seg_pos);
956                         if (c_info->count != 0) {
957                                 c_info->cmpr_pos = c_info->count;
958                                 c_info->offset  += c_info->count;
959                         } else {
960                                 break;
961                         }
962                 }
963                 /*  Allow escape from this loop on signal!
964                  */
965                 FT_SIGNAL_EXIT(_DONT_BLOCK);
966                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
967                 TRACE(ft_t_noise, "to_do: %d", remaining);
968         }
969         if (seg_sz - c_info->offset <= 18) {
970                 c_info->offset = seg_sz;
971         }
972         TRACE(ft_t_noise, "\n"
973               KERN_INFO "segment size   : %d\n"
974               KERN_INFO "buf_pos_read   : %d\n"
975               KERN_INFO "remaining      : %d",
976               seg_sz, c_info->offset,
977               seg_sz - c_info->offset);
978         TRACE_EXIT result;
979 }                
980
981 static int slow_seek_forward_until_error(const unsigned int distance,
982                                          cmpr_info *c_info,
983                                          zft_position *pos, 
984                                          const zft_volinfo *volume,
985                                          __u8 *buf)
986 {
987         unsigned int remaining = distance;
988         int seg_sz;
989         int seg_pos;
990         int result;
991         TRACE_FUN(ft_t_flow);
992         
993         seg_pos = pos->seg_pos;
994         do {
995                 TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf, 
996                                                        FT_RD_AHEAD),);
997                 /* now we have the contents of the actual segment in
998                  * the deblock buffer
999                  */
1000                 TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
1001                                                      seg_sz, seg_pos,volume),);
1002                 remaining        -= result;
1003                 pos->volume_pos  += result<<10;
1004                 pos->seg_pos      = seg_pos;
1005                 pos->seg_byte_pos = c_info->offset;
1006                 seg_pos ++;
1007                 if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
1008                         pos->seg_pos ++;
1009                         pos->seg_byte_pos = 0;
1010                         c_info->offset = 0;
1011                 }
1012                 /*  Allow escape from this loop on signal!
1013                  */
1014                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1015                 TRACE(ft_t_noise, "\n"
1016                       KERN_INFO "remaining:  %d\n"
1017                       KERN_INFO "seg_pos:    %d\n"
1018                       KERN_INFO "end_seg:    %d\n"
1019                       KERN_INFO "result:     %d",
1020                       remaining, seg_pos, volume->end_seg, result);  
1021         } while (remaining > 0 && seg_pos <= volume->end_seg);
1022         TRACE_EXIT 0;
1023 }
1024
1025 /* return segment id of next segment containing valid data, -EIO otherwise
1026  */
1027 static int search_valid_segment(unsigned int segment,
1028                                 const unsigned int end_seg,
1029                                 const unsigned int max_foffs,
1030                                 zft_position *pos,
1031                                 cmpr_info *c_info,
1032                                 const zft_volinfo *volume,
1033                                 __u8 *buf)
1034 {
1035         cmpr_info tmp_info;
1036         int seg_sz;
1037         TRACE_FUN(ft_t_flow);
1038         
1039         memset(&tmp_info, 0, sizeof(cmpr_info));
1040         while (segment <= end_seg) {
1041                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1042                 TRACE(ft_t_noise,
1043                       "Searching readable segment between %d and %d",
1044                       segment, end_seg);
1045                 seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
1046                 if ((seg_sz > 0) &&
1047                     (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
1048                     (tmp_info.foffs != 0 || segment == volume->start_seg)) {
1049                         if ((tmp_info.foffs>>10) > max_foffs) {
1050                                 TRACE_ABORT(-EIO, ft_t_noise, "\n"
1051                                             KERN_INFO "cseg.foff: %d\n"
1052                                             KERN_INFO "dest     : %d",
1053                                             (int)(tmp_info.foffs >> 10),
1054                                             max_foffs);
1055                         }
1056                         DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
1057                         *c_info           = tmp_info;
1058                         pos->seg_pos      = segment;
1059                         pos->volume_pos   = c_info->foffs;
1060                         pos->seg_byte_pos = c_info->offset;
1061                         TRACE(ft_t_noise, "found segment at %d", segment);
1062                         TRACE_EXIT 0;
1063                 }
1064                 segment++;
1065         }
1066         TRACE_EXIT -EIO;
1067 }
1068
1069 static int slow_seek_forward(unsigned int dest,
1070                              cmpr_info *c_info,
1071                              zft_position *pos,
1072                              const zft_volinfo *volume,
1073                              __u8 *buf)
1074 {
1075         unsigned int distance;
1076         int result = 0;
1077         TRACE_FUN(ft_t_flow);
1078                 
1079         distance = dest - (pos->volume_pos >> 10);
1080         while ((distance > 0) &&
1081                (result = slow_seek_forward_until_error(distance,
1082                                                        c_info,
1083                                                        pos,
1084                                                        volume,
1085                                                        buf)) < 0) {
1086                 if (result == -EINTR) {
1087                         break;
1088                 }
1089                 TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
1090                 /* the failing segment is either pos->seg_pos or
1091                  * pos->seg_pos + 1. There is no need to further try
1092                  * that segment, because ftape_read_segment() already
1093                  * has tried very much to read it. So we start with
1094                  * following segment, which is pos->seg_pos + 1
1095                  */
1096                 if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
1097                                         pos, c_info,
1098                                         volume, buf) < 0) {
1099                         TRACE(ft_t_noise, "search_valid_segment() failed");
1100                         result = -EIO;
1101                         break;
1102                 }
1103                 distance = dest - (pos->volume_pos >> 10);
1104                 result = 0;
1105                 TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
1106                 /* found valid segment, retry the seek */
1107         }
1108         TRACE_EXIT result;
1109 }
1110
1111 static int compute_seg_pos(const unsigned int dest,
1112                            zft_position *pos,
1113                            const zft_volinfo *volume)
1114 {
1115         int segment;
1116         int distance = dest - (pos->volume_pos >> 10);
1117         unsigned int raw_size;
1118         unsigned int virt_size;
1119         unsigned int factor;
1120         TRACE_FUN(ft_t_flow);
1121
1122         if (distance >= 0) {
1123                 raw_size  = volume->end_seg - pos->seg_pos + 1;
1124                 virt_size = ((unsigned int)(volume->size>>10) 
1125                              - (unsigned int)(pos->volume_pos>>10)
1126                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1127                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1128                 if (virt_size == 0 || raw_size == 0) {
1129                         TRACE_EXIT 0;
1130                 }
1131                 if (raw_size >= (1<<25)) {
1132                         factor = raw_size/(virt_size>>7);
1133                 } else {
1134                         factor = (raw_size<<7)/virt_size;
1135                 }
1136                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1137                 segment = (segment * factor)>>7;
1138         } else {
1139                 raw_size  = pos->seg_pos - volume->start_seg + 1;
1140                 virt_size = ((unsigned int)(pos->volume_pos>>10)
1141                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1142                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1143                 if (virt_size == 0 || raw_size == 0) {
1144                         TRACE_EXIT 0;
1145                 }
1146                 if (raw_size >= (1<<25)) {
1147                         factor = raw_size/(virt_size>>7);
1148                 } else {
1149                         factor = (raw_size<<7)/virt_size;
1150                 }
1151                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1152         }
1153         TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
1154         TRACE_EXIT segment;
1155 }
1156
1157 static struct zft_cmpr_ops cmpr_ops = {
1158         zftc_write,
1159         zftc_read,
1160         zftc_seek,
1161         zftc_lock,
1162         zftc_reset,
1163         zftc_cleanup
1164 };
1165
1166 int zft_compressor_init(void)
1167 {
1168         TRACE_FUN(ft_t_flow);
1169         
1170 #ifdef MODULE
1171         printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
1172         if (TRACE_LEVEL >= ft_t_info) {
1173                 printk(
1174 KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
1175 KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n");
1176         }
1177 #else /* !MODULE */
1178         /* print a short no-nonsense boot message */
1179         printk(KERN_INFO "zftape compressor v1.00a 970514\n");
1180         printk(KERN_INFO "For use with " FTAPE_VERSION "\n");
1181 #endif /* MODULE */
1182         TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
1183         TRACE(ft_t_info, "installing compressor for zftape ...");
1184         TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
1185         TRACE_EXIT 0;
1186 }
1187
1188 #ifdef MODULE
1189
1190 MODULE_AUTHOR(
1191         "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
1192 MODULE_DESCRIPTION(
1193 "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
1194 MODULE_LICENSE("GPL");
1195
1196 /* Called by modules package when installing the driver
1197  */
1198 int init_module(void)
1199 {
1200         return zft_compressor_init();
1201 }
1202
1203 #endif /* MODULE */