001 /*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing,
013 * software distributed under the License is distributed on an
014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
015 * KIND, either express or implied. See the License for the
016 * specific language governing permissions and limitations
017 * under the License.
018 */
019 package org.apache.commons.compress.archivers.zip;
020
021 import java.util.zip.ZipException;
022
023 /**
024 * Holds size and other extended information for entries that use Zip64
025 * features.
026 *
027 * <p>From {@link http://www.pkware.com/documents/casestudies/APPNOTE.TXT PKWARE's APPNOTE.TXT}
028 * <pre>
029 * Zip64 Extended Information Extra Field (0x0001):
030 *
031 * The following is the layout of the zip64 extended
032 * information "extra" block. If one of the size or
033 * offset fields in the Local or Central directory
034 * record is too small to hold the required data,
035 * a Zip64 extended information record is created.
036 * The order of the fields in the zip64 extended
037 * information record is fixed, but the fields will
038 * only appear if the corresponding Local or Central
039 * directory record field is set to 0xFFFF or 0xFFFFFFFF.
040 *
041 * Note: all fields stored in Intel low-byte/high-byte order.
042 *
043 * Value Size Description
044 * ----- ---- -----------
045 * (ZIP64) 0x0001 2 bytes Tag for this "extra" block type
046 * Size 2 bytes Size of this "extra" block
047 * Original
048 * Size 8 bytes Original uncompressed file size
049 * Compressed
050 * Size 8 bytes Size of compressed data
051 * Relative Header
052 * Offset 8 bytes Offset of local header record
053 * Disk Start
054 * Number 4 bytes Number of the disk on which
055 * this file starts
056 *
057 * This entry in the Local header must include BOTH original
058 * and compressed file size fields. If encrypting the
059 * central directory and bit 13 of the general purpose bit
060 * flag is set indicating masking, the value stored in the
061 * Local Header for the original file size will be zero.
062 * </pre></p>
063 *
064 * <p>Currently Commons Compress doesn't support encrypting the
065 * central directory so the not about masking doesn't apply.</p>
066 *
067 * <p>The implementation relies on data being read from the local file
068 * header and assumes that both size values are always present.</p>
069 *
070 * @since Apache Commons Compress 1.2
071 * @NotThreadSafe
072 */
073 public class Zip64ExtendedInformationExtraField implements ZipExtraField {
074 // TODO: the LFH should probably not contain relativeHeaderOffset
075 // and diskStart but then ZipArchivePOutputStream won't write it to
076 // the CD either - need to test interop with other implementations
077 // to see whether they do have a problem with the extraneous
078 // information inside the LFH
079
080 private static final ZipShort HEADER_ID = new ZipShort(0x0001);
081
082 private static final int WORD = 4, DWORD = 8;
083
084 private ZipEightByteInteger size, compressedSize, relativeHeaderOffset;
085 private ZipLong diskStart;
086
087 /**
088 * This constructor should only be used by the code that reads
089 * archives inside of Commons Compress.
090 */
091 public Zip64ExtendedInformationExtraField() { }
092
093 /**
094 * Creates an extra field based on the original and compressed size.
095 *
096 * @param size the entry's original size
097 * @param compressedSize the entry's compressed size
098 *
099 * @throws IllegalArgumentException if size or compressedSize is null
100 */
101 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
102 ZipEightByteInteger compressedSize) {
103 this(size, compressedSize, null, null);
104 }
105
106 /**
107 * Creates an extra field based on all four possible values.
108 *
109 * @param size the entry's original size
110 * @param compressedSize the entry's compressed size
111 *
112 * @throws IllegalArgumentException if size or compressedSize is null
113 */
114 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
115 ZipEightByteInteger compressedSize,
116 ZipEightByteInteger relativeHeaderOffset,
117 ZipLong diskStart) {
118 if (size == null) {
119 throw new IllegalArgumentException("size must not be null");
120 }
121 if (compressedSize == null) {
122 throw new IllegalArgumentException("compressedSize must not be null");
123 }
124 this.size = size;
125 this.compressedSize = compressedSize;
126 this.relativeHeaderOffset = relativeHeaderOffset;
127 this.diskStart = diskStart;
128 }
129
130 /** {@inheritDoc} */
131 public ZipShort getHeaderId() {
132 return HEADER_ID;
133 }
134
135 /** {@inheritDoc} */
136 public ZipShort getLocalFileDataLength() {
137 return getCentralDirectoryLength();
138 }
139
140 /** {@inheritDoc} */
141 public ZipShort getCentralDirectoryLength() {
142 return new ZipShort(2 * DWORD // both size fields
143 + (relativeHeaderOffset != null ? DWORD : 0)
144 + (diskStart != null ? WORD : 0));
145 }
146
147 /** {@inheritDoc} */
148 public byte[] getLocalFileDataData() {
149 return getCentralDirectoryData();
150 }
151
152 /** {@inheritDoc} */
153 public byte[] getCentralDirectoryData() {
154 byte[] data = new byte[getCentralDirectoryLength().getValue()];
155 addSizes(data);
156 int off = 2 * DWORD;
157 if (relativeHeaderOffset != null) {
158 System.arraycopy(relativeHeaderOffset.getBytes(), 0, data, off, DWORD);
159 off += DWORD;
160 }
161 if (diskStart != null) {
162 System.arraycopy(diskStart.getBytes(), 0, data, off, WORD);
163 off += WORD;
164 }
165 return data;
166 }
167
168 /** {@inheritDoc} */
169 public void parseFromLocalFileData(byte[] buffer, int offset, int length)
170 throws ZipException {
171 if (length < 2 * DWORD) {
172 throw new ZipException("Zip64 extended information must contain"
173 + " both size values in the local file"
174 + " header.");
175 }
176 size = new ZipEightByteInteger(buffer, offset);
177 offset += DWORD;
178 compressedSize = new ZipEightByteInteger(buffer, offset);
179 offset += DWORD;
180 int remaining = length - 2 * DWORD;
181 if (remaining >= DWORD) {
182 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset);
183 offset += DWORD;
184 remaining -= DWORD;
185 }
186 if (remaining >= WORD) {
187 diskStart = new ZipLong(buffer, offset);
188 offset += WORD;
189 remaining -= WORD;
190 }
191 }
192
193 /** {@inheritDoc} */
194 public void parseFromCentralDirectoryData(byte[] buffer, int offset,
195 int length)
196 throws ZipException {
197 // if there is no size information in here, we are screwed and
198 // can only hope things will get resolved by LFH data later
199 // But there are some cases that can be detected
200 // * all data is there
201 // * length % 8 == 4 -> at least we can identify the diskStart field
202 if (length >= 3 * DWORD + WORD) {
203 parseFromLocalFileData(buffer, offset, length);
204 } else if (length % DWORD == WORD) {
205 diskStart = new ZipLong(buffer, offset + length - WORD);
206 }
207 }
208
209 /**
210 * The uncompressed size stored in this extra field.
211 */
212 public ZipEightByteInteger getSize() {
213 return size;
214 }
215
216 /**
217 * The compressed size stored in this extra field.
218 */
219 public ZipEightByteInteger getCompressedSize() {
220 return compressedSize;
221 }
222
223 /**
224 * The relative header offset stored in this extra field.
225 */
226 public ZipEightByteInteger getRelativeHeaderOffset() {
227 return relativeHeaderOffset;
228 }
229
230 /**
231 * The disk start number stored in this extra field.
232 */
233 public ZipLong getDiskStartNumber() {
234 return diskStart;
235 }
236
237 private void addSizes(byte[] data) {
238 System.arraycopy(size.getBytes(), 0, data, 0, DWORD);
239 System.arraycopy(compressedSize.getBytes(), 0, data, DWORD, DWORD);
240 }
241 }