001 /*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing,
013 * software distributed under the License is distributed on an
014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
015 * KIND, either express or implied. See the License for the
016 * specific language governing permissions and limitations
017 * under the License.
018 */
019 package org.apache.commons.compress.archivers.zip;
020
021 import java.util.zip.ZipException;
022
023 import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
024 import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
025
026 /**
027 * Holds size and other extended information for entries that use Zip64
028 * features.
029 *
030 * <p>From {@link "http://www.pkware.com/documents/casestudies/APPNOTE.TXT PKWARE's APPNOTE.TXT"}
031 * <pre>
032 * Zip64 Extended Information Extra Field (0x0001):
033 *
034 * The following is the layout of the zip64 extended
035 * information "extra" block. If one of the size or
036 * offset fields in the Local or Central directory
037 * record is too small to hold the required data,
038 * a Zip64 extended information record is created.
039 * The order of the fields in the zip64 extended
040 * information record is fixed, but the fields will
041 * only appear if the corresponding Local or Central
042 * directory record field is set to 0xFFFF or 0xFFFFFFFF.
043 *
044 * Note: all fields stored in Intel low-byte/high-byte order.
045 *
046 * Value Size Description
047 * ----- ---- -----------
048 * (ZIP64) 0x0001 2 bytes Tag for this "extra" block type
049 * Size 2 bytes Size of this "extra" block
050 * Original
051 * Size 8 bytes Original uncompressed file size
052 * Compressed
053 * Size 8 bytes Size of compressed data
054 * Relative Header
055 * Offset 8 bytes Offset of local header record
056 * Disk Start
057 * Number 4 bytes Number of the disk on which
058 * this file starts
059 *
060 * This entry in the Local header must include BOTH original
061 * and compressed file size fields. If encrypting the
062 * central directory and bit 13 of the general purpose bit
063 * flag is set indicating masking, the value stored in the
064 * Local Header for the original file size will be zero.
065 * </pre></p>
066 *
067 * <p>Currently Commons Compress doesn't support encrypting the
068 * central directory so the not about masking doesn't apply.</p>
069 *
070 * <p>The implementation relies on data being read from the local file
071 * header and assumes that both size values are always present.</p>
072 *
073 * @since Apache Commons Compress 1.2
074 * @NotThreadSafe
075 */
076 public class Zip64ExtendedInformationExtraField implements ZipExtraField {
077
078 static final ZipShort HEADER_ID = new ZipShort(0x0001);
079
080 private static final String LFH_MUST_HAVE_BOTH_SIZES_MSG =
081 "Zip64 extended information must contain"
082 + " both size values in the local file header.";
083
084 private ZipEightByteInteger size, compressedSize, relativeHeaderOffset;
085 private ZipLong diskStart;
086
087 /**
088 * Stored in {@link #parseFromCentralDirectoryData
089 * parseFromCentralDirectoryData} so it can be reused when ZipFile
090 * calls {@link #reparseCentralDirectoryData
091 * reparseCentralDirectoryData}.
092 *
093 * <p>Not used for anything else</p>
094 *
095 * @since Apache Commons Compress 1.3
096 */
097 private byte[] rawCentralDirectoryData;
098
099 /**
100 * This constructor should only be used by the code that reads
101 * archives inside of Commons Compress.
102 */
103 public Zip64ExtendedInformationExtraField() { }
104
105 /**
106 * Creates an extra field based on the original and compressed size.
107 *
108 * @param size the entry's original size
109 * @param compressedSize the entry's compressed size
110 *
111 * @throws IllegalArgumentException if size or compressedSize is null
112 */
113 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
114 ZipEightByteInteger compressedSize) {
115 this(size, compressedSize, null, null);
116 }
117
118 /**
119 * Creates an extra field based on all four possible values.
120 *
121 * @param size the entry's original size
122 * @param compressedSize the entry's compressed size
123 *
124 * @throws IllegalArgumentException if size or compressedSize is null
125 */
126 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
127 ZipEightByteInteger compressedSize,
128 ZipEightByteInteger relativeHeaderOffset,
129 ZipLong diskStart) {
130 this.size = size;
131 this.compressedSize = compressedSize;
132 this.relativeHeaderOffset = relativeHeaderOffset;
133 this.diskStart = diskStart;
134 }
135
136 /** {@inheritDoc} */
137 public ZipShort getHeaderId() {
138 return HEADER_ID;
139 }
140
141 /** {@inheritDoc} */
142 public ZipShort getLocalFileDataLength() {
143 return new ZipShort(size != null ? 2 * DWORD : 0);
144 }
145
146 /** {@inheritDoc} */
147 public ZipShort getCentralDirectoryLength() {
148 return new ZipShort((size != null ? DWORD : 0)
149 + (compressedSize != null ? DWORD : 0)
150 + (relativeHeaderOffset != null ? DWORD : 0)
151 + (diskStart != null ? WORD : 0));
152 }
153
154 /** {@inheritDoc} */
155 public byte[] getLocalFileDataData() {
156 if (size != null || compressedSize != null) {
157 if (size == null || compressedSize == null) {
158 throw new IllegalArgumentException(LFH_MUST_HAVE_BOTH_SIZES_MSG);
159 }
160 byte[] data = new byte[2 * DWORD];
161 addSizes(data);
162 return data;
163 }
164 return new byte[0];
165 }
166
167 /** {@inheritDoc} */
168 public byte[] getCentralDirectoryData() {
169 byte[] data = new byte[getCentralDirectoryLength().getValue()];
170 int off = addSizes(data);
171 if (relativeHeaderOffset != null) {
172 System.arraycopy(relativeHeaderOffset.getBytes(), 0, data, off, DWORD);
173 off += DWORD;
174 }
175 if (diskStart != null) {
176 System.arraycopy(diskStart.getBytes(), 0, data, off, WORD);
177 off += WORD;
178 }
179 return data;
180 }
181
182 /** {@inheritDoc} */
183 public void parseFromLocalFileData(byte[] buffer, int offset, int length)
184 throws ZipException {
185 if (length == 0) {
186 // no local file data at all, may happen if an archive
187 // only holds a ZIP64 extended information extra field
188 // inside the central directory but not inside the local
189 // file header
190 return;
191 }
192 if (length < 2 * DWORD) {
193 throw new ZipException(LFH_MUST_HAVE_BOTH_SIZES_MSG);
194 }
195 size = new ZipEightByteInteger(buffer, offset);
196 offset += DWORD;
197 compressedSize = new ZipEightByteInteger(buffer, offset);
198 offset += DWORD;
199 int remaining = length - 2 * DWORD;
200 if (remaining >= DWORD) {
201 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset);
202 offset += DWORD;
203 remaining -= DWORD;
204 }
205 if (remaining >= WORD) {
206 diskStart = new ZipLong(buffer, offset);
207 offset += WORD;
208 remaining -= WORD;
209 }
210 }
211
212 /** {@inheritDoc} */
213 public void parseFromCentralDirectoryData(byte[] buffer, int offset,
214 int length)
215 throws ZipException {
216 // store for processing in reparseCentralDirectoryData
217 rawCentralDirectoryData = new byte[length];
218 System.arraycopy(buffer, offset, rawCentralDirectoryData, 0, length);
219
220 // if there is no size information in here, we are screwed and
221 // can only hope things will get resolved by LFH data later
222 // But there are some cases that can be detected
223 // * all data is there
224 // * length == 24 -> both sizes and offset
225 // * length % 8 == 4 -> at least we can identify the diskStart field
226 if (length >= 3 * DWORD + WORD) {
227 parseFromLocalFileData(buffer, offset, length);
228 } else if (length == 3 * DWORD) {
229 size = new ZipEightByteInteger(buffer, offset);
230 offset += DWORD;
231 compressedSize = new ZipEightByteInteger(buffer, offset);
232 offset += DWORD;
233 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset);
234 } else if (length % DWORD == WORD) {
235 diskStart = new ZipLong(buffer, offset + length - WORD);
236 }
237 }
238
239 /**
240 * Parses the raw bytes read from the central directory extra
241 * field with knowledge which fields are expected to be there.
242 *
243 * <p>All four fields inside the zip64 extended information extra
244 * field are optional and only present if their corresponding
245 * entry inside the central directory contains the correct magic
246 * value.</p>
247 */
248 public void reparseCentralDirectoryData(boolean hasUncompressedSize,
249 boolean hasCompressedSize,
250 boolean hasRelativeHeaderOffset,
251 boolean hasDiskStart)
252 throws ZipException {
253 if (rawCentralDirectoryData != null) {
254 int expectedLength = (hasUncompressedSize ? DWORD : 0)
255 + (hasCompressedSize ? DWORD : 0)
256 + (hasRelativeHeaderOffset ? DWORD : 0)
257 + (hasDiskStart ? WORD : 0);
258 if (rawCentralDirectoryData.length != expectedLength) {
259 throw new ZipException("central directory zip64 extended"
260 + " information extra field's length"
261 + " doesn't match central directory"
262 + " data. Expected length "
263 + expectedLength + " but is "
264 + rawCentralDirectoryData.length);
265 }
266 int offset = 0;
267 if (hasUncompressedSize) {
268 size = new ZipEightByteInteger(rawCentralDirectoryData, offset);
269 offset += DWORD;
270 }
271 if (hasCompressedSize) {
272 compressedSize = new ZipEightByteInteger(rawCentralDirectoryData,
273 offset);
274 offset += DWORD;
275 }
276 if (hasRelativeHeaderOffset) {
277 relativeHeaderOffset =
278 new ZipEightByteInteger(rawCentralDirectoryData, offset);
279 offset += DWORD;
280 }
281 if (hasDiskStart) {
282 diskStart = new ZipLong(rawCentralDirectoryData, offset);
283 offset += WORD;
284 }
285 }
286 }
287
288 /**
289 * The uncompressed size stored in this extra field.
290 */
291 public ZipEightByteInteger getSize() {
292 return size;
293 }
294
295 /**
296 * The uncompressed size stored in this extra field.
297 */
298 public void setSize(ZipEightByteInteger size) {
299 this.size = size;
300 }
301
302 /**
303 * The compressed size stored in this extra field.
304 */
305 public ZipEightByteInteger getCompressedSize() {
306 return compressedSize;
307 }
308
309 /**
310 * The uncompressed size stored in this extra field.
311 */
312 public void setCompressedSize(ZipEightByteInteger compressedSize) {
313 this.compressedSize = compressedSize;
314 }
315
316 /**
317 * The relative header offset stored in this extra field.
318 */
319 public ZipEightByteInteger getRelativeHeaderOffset() {
320 return relativeHeaderOffset;
321 }
322
323 /**
324 * The relative header offset stored in this extra field.
325 */
326 public void setRelativeHeaderOffset(ZipEightByteInteger rho) {
327 relativeHeaderOffset = rho;
328 }
329
330 /**
331 * The disk start number stored in this extra field.
332 */
333 public ZipLong getDiskStartNumber() {
334 return diskStart;
335 }
336
337 /**
338 * The disk start number stored in this extra field.
339 */
340 public void setDiskStartNumber(ZipLong ds) {
341 diskStart = ds;
342 }
343
344 private int addSizes(byte[] data) {
345 int off = 0;
346 if (size != null) {
347 System.arraycopy(size.getBytes(), 0, data, 0, DWORD);
348 off += DWORD;
349 }
350 if (compressedSize != null) {
351 System.arraycopy(compressedSize.getBytes(), 0, data, off, DWORD);
352 off += DWORD;
353 }
354 return off;
355 }
356 }