1 | /* |
2 | |
3 | Derby - Class org.apache.derby.client.net.Request |
4 | |
5 | Copyright (c) 2001, 2005 The Apache Software Foundation or its licensors, where applicable. |
6 | |
7 | Licensed under the Apache License, Version 2.0 (the "License"); |
8 | you may not use this file except in compliance with the License. |
9 | You may obtain a copy of the License at |
10 | |
11 | http://www.apache.org/licenses/LICENSE-2.0 |
12 | |
13 | Unless required by applicable law or agreed to in writing, software |
14 | distributed under the License is distributed on an "AS IS" BASIS, |
15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
16 | See the License for the specific language governing permissions and |
17 | limitations under the License. |
18 | |
19 | */ |
20 | package org.apache.derby.client.net; |
21 | |
22 | import org.apache.derby.client.am.DisconnectException; |
23 | import org.apache.derby.client.am.EncryptionManager; |
24 | import org.apache.derby.client.am.ClientMessageId; |
25 | import org.apache.derby.client.am.SqlException; |
26 | import org.apache.derby.client.am.Utils; |
27 | import org.apache.derby.shared.common.reference.SQLState; |
28 | |
29 | import java.io.UnsupportedEncodingException; |
30 | |
31 | public class Request { |
32 | |
33 | // byte array buffer used for constructing requests. |
34 | // currently requests are built starting at the beginning of the buffer. |
35 | protected byte[] bytes_; |
36 | |
37 | // keeps track of the next position to place a byte in the buffer. |
38 | // so the last valid byte in the message is at bytes_[offset - 1] |
39 | protected int offset_; |
40 | |
41 | // a stack is used to keep track of offsets into the buffer where 2 byte |
42 | // ddm length values are located. these length bytes will be automatically updated |
43 | // by this object when construction of a particular object has completed. |
44 | // right now the max size of the stack is 10. this is an arbitrary number which |
45 | // should be sufficiently large enough to handle all situations. |
46 | private final static int MAX_MARKS_NESTING = 10; |
47 | private int[] markStack_ = new int[MAX_MARKS_NESTING]; |
48 | private int top_ = 0; |
49 | |
50 | // the ccsid manager for the connection is stored in this object. it will |
51 | // be used when constructing character ddm data. it will NOT be used for |
52 | // building any FDOCA data. |
53 | protected CcsidManager ccsidManager_; |
54 | |
55 | // This Object tracks the location of the current |
56 | // Dss header length bytes. This is done so |
57 | // the length bytes can be automatically |
58 | // updated as information is added to this stream. |
59 | private int dssLengthLocation_ = 0; |
60 | |
61 | // tracks the request correlation ID to use for commands and command objects. |
62 | // this is automatically updated as commands are built and sent to the server. |
63 | private int correlationID_ = 0; |
64 | |
65 | private boolean simpleDssFinalize = false; |
66 | |
67 | // Used to mask out password when trace is on. |
68 | protected boolean passwordIncluded_ = false; |
69 | protected int passwordStart_ = 0; |
70 | protected int passwordLength_ = 0; |
71 | |
72 | protected NetAgent netAgent_; |
73 | |
74 | |
75 | // construct a request object specifying the minimum buffer size |
76 | // to be used to buffer up the built requests. also specify the ccsid manager |
77 | // instance to be used when building ddm character data. |
78 | Request(NetAgent netAgent, int minSize, CcsidManager ccsidManager) { |
79 | netAgent_ = netAgent; |
80 | bytes_ = new byte[minSize]; |
81 | ccsidManager_ = ccsidManager; |
82 | clearBuffer(); |
83 | } |
84 | |
85 | // construct a request object specifying the ccsid manager instance |
86 | // to be used when building ddm character data. This will also create |
87 | // a buffer using the default size (see final static DEFAULT_BUFFER_SIZE value). |
88 | Request(NetAgent netAgent, CcsidManager ccsidManager, int bufferSize) { |
89 | //this (netAgent, Request.DEFAULT_BUFFER_SIZE, ccsidManager); |
90 | this(netAgent, bufferSize, ccsidManager); |
91 | } |
92 | |
93 | protected final void clearBuffer() { |
94 | offset_ = 0; |
95 | top_ = 0; |
96 | for (int i = 0; i < markStack_.length; i++) { |
97 | if (markStack_[i] != 0) { |
98 | markStack_[i] = 0; |
99 | } else { |
100 | break; |
101 | } |
102 | } |
103 | dssLengthLocation_ = 0; |
104 | } |
105 | |
106 | final void initialize() { |
107 | clearBuffer(); |
108 | correlationID_ = 0; |
109 | } |
110 | |
111 | // set the ccsid manager value. this method allows the ccsid manager to be |
112 | // changed so a request object can be reused by different connections with |
113 | // different ccsid managers. |
114 | final void setCcsidMgr(CcsidManager ccsidManager) { |
115 | ccsidManager_ = ccsidManager; |
116 | } |
117 | |
118 | // ensure length at the end of the buffer for a certain amount of data. |
119 | // if the buffer does not contain sufficient room for the data, the buffer |
120 | // will be expanded by the larger of (2 * current size) or (current size + length). |
121 | // the data from the previous buffer is copied into the larger buffer. |
122 | protected final void ensureLength(int length) { |
123 | if (length > bytes_.length) { |
124 | byte newBytes[] = new byte[Math.max(bytes_.length << 1, length)]; |
125 | System.arraycopy(bytes_, 0, newBytes, 0, offset_); |
126 | bytes_ = newBytes; |
127 | } |
128 | } |
129 | |
130 | // creates an request dss in the buffer to contain a ddm command |
131 | // object. calling this method means any previous dss objects in |
132 | // the buffer are complete and their length and chaining bytes can |
133 | // be updated appropriately. |
134 | protected final void createCommand() { |
135 | buildDss(false, false, false, DssConstants.GDSFMT_RQSDSS, ++correlationID_, false); |
136 | } |
137 | |
138 | // creates an request dss in the buffer to contain a ddm command |
139 | // object. calling this method means any previous dss objects in |
140 | // the buffer are complete and their length and chaining bytes can |
141 | // be updated appropriately. |
142 | protected void createXACommand() { |
143 | buildDss(false, false, false, DssConstants.GDSFMT_RQSDSS_NOREPLY, ++correlationID_, false); |
144 | } |
145 | |
146 | // creates an object dss in the buffer to contain a ddm command |
147 | // data object. calling this method means any previous dss objects in |
148 | // the buffer are complete and their length and chaining bytes can |
149 | // be updated appropriately. |
150 | final void createCommandData() { |
151 | buildDss(true, |
152 | false, |
153 | false, |
154 | DssConstants.GDSFMT_OBJDSS, |
155 | correlationID_, |
156 | false); |
157 | } |
158 | |
159 | final void createEncryptedCommandData() { |
160 | if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA || |
161 | netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) { |
162 | buildDss(true, false, false, DssConstants.GDSFMT_ENCOBJDSS, correlationID_, false); |
163 | } else { |
164 | buildDss(true, |
165 | false, |
166 | false, |
167 | DssConstants.GDSFMT_OBJDSS, |
168 | correlationID_, |
169 | false); |
170 | } |
171 | } |
172 | |
173 | |
174 | // experimental lob section |
175 | |
176 | private final void buildDss(boolean dssHasSameCorrelator, |
177 | boolean chainedToNextStructure, |
178 | boolean nextHasSameCorrelator, |
179 | int dssType, |
180 | int corrId, |
181 | boolean simpleFinalizeBuildingNextDss) { |
182 | if (doesRequestContainData()) { |
183 | if (simpleDssFinalize) { |
184 | finalizeDssLength(); |
185 | } else { |
186 | finalizePreviousChainedDss(dssHasSameCorrelator); |
187 | } |
188 | } |
189 | |
190 | ensureLength(offset_ + 6); |
191 | |
192 | // save the length position and skip |
193 | // note: the length position is saved so it can be updated |
194 | // with a different value later. |
195 | dssLengthLocation_ = offset_; |
196 | // always turn on chaining flags... this is helpful for lobs... |
197 | // these bytes will get rest if dss lengths are finalized. |
198 | bytes_[offset_++] = (byte) 0xFF; |
199 | bytes_[offset_++] = (byte) 0xFF; |
200 | |
201 | // insert the manditory 0xD0 and the dssType |
202 | bytes_[offset_++] = (byte) 0xD0; |
203 | if (chainedToNextStructure) { |
204 | dssType |= DssConstants.GDSCHAIN; |
205 | if (nextHasSameCorrelator) { |
206 | dssType |= DssConstants.GDSCHAIN_SAME_ID; |
207 | } |
208 | } |
209 | bytes_[offset_++] = (byte) (dssType & 0xff); |
210 | |
211 | // write the request correlation id |
212 | // use method that writes a short |
213 | bytes_[offset_++] = (byte) ((corrId >>> 8) & 0xff); |
214 | bytes_[offset_++] = (byte) (corrId & 0xff); |
215 | |
216 | simpleDssFinalize = simpleFinalizeBuildingNextDss; |
217 | } |
218 | |
219 | // We need to reuse the agent's sql exception accumulation mechanism |
220 | // for this write exception, pad if the length is too big, and truncation if the length is too small |
221 | final void writeScalarStream(boolean chained, |
222 | boolean chainedWithSameCorrelator, |
223 | int codePoint, |
224 | int length, |
225 | java.io.InputStream in, |
226 | boolean writeNullByte, |
227 | int parameterIndex) throws DisconnectException, SqlException { |
228 | int leftToRead = length; |
229 | int extendedLengthByteCount = prepScalarStream(chained, |
230 | chainedWithSameCorrelator, |
231 | writeNullByte, |
232 | leftToRead); |
233 | int bytesToRead; |
234 | |
235 | if (writeNullByte) { |
236 | bytesToRead = Utils.min(leftToRead, DssConstants.MAX_DSS_LEN - 6 - 4 - 1 - extendedLengthByteCount); |
237 | } else { |
238 | bytesToRead = Utils.min(leftToRead, DssConstants.MAX_DSS_LEN - 6 - 4 - extendedLengthByteCount); |
239 | } |
240 | |
241 | if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA || |
242 | netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) { |
243 | |
244 | byte[] lengthAndCodepoint; |
245 | lengthAndCodepoint = buildLengthAndCodePointForEncryptedLob(codePoint, |
246 | leftToRead, |
247 | writeNullByte, |
248 | extendedLengthByteCount); |
249 | |
250 | |
251 | |
252 | // we need to stream the input, rather than fully materialize it |
253 | // write the data |
254 | |
255 | byte[] clearedBytes = new byte[leftToRead]; |
256 | int bytesRead = 0; |
257 | int totalBytesRead = 0; |
258 | int pos = 0; |
259 | do { |
260 | try { |
261 | bytesRead = in.read(clearedBytes, pos, leftToRead); |
262 | totalBytesRead += bytesRead; |
263 | } catch (java.io.IOException e) { |
264 | padScalarStreamForError(leftToRead, bytesToRead); |
265 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
266 | netAgent_.accumulateReadException(new SqlException(netAgent_.logWriter_, |
267 | new ClientMessageId(SQLState.NET_IOEXCEPTION_ON_READ), |
268 | new Integer(parameterIndex), e.getMessage(), e)); |
269 | return; |
270 | } |
271 | if (bytesRead == -1) { |
272 | //padScalarStreamForError(leftToRead, bytesToRead); |
273 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
274 | /*throw new SqlException(netAgent_.logWriter_, |
275 | "End of Stream prematurely reached while reading InputStream, parameter #" + |
276 | parameterIndex + |
277 | ". Remaining data has been padded with 0x0.");*/ |
278 | //is it OK to do a chain break Exception here. It's not good to |
279 | //pad it with 0 and encrypt and send it to the server because it takes too much time |
280 | //can't just throw a SQLException either because some of the data PRPSQLSTT etc have already |
281 | //been sent to the server, and server is waiting for EXTDTA, server hangs for this. |
282 | netAgent_.accumulateChainBreakingReadExceptionAndThrow( |
283 | new DisconnectException(netAgent_, |
284 | new ClientMessageId(SQLState.NET_PREMATURE_EOS_DISCONNECT), |
285 | new Integer(parameterIndex))); |
286 | return; |
287 | |
288 | /*netAgent_.accumulateReadException( |
289 | new SqlException(netAgent_.logWriter_, |
290 | "End of Stream prematurely reached while reading InputStream, parameter #" + |
291 | parameterIndex + |
292 | ". Remaining data has been padded with 0x0.")); |
293 | return;*/ |
294 | } else { |
295 | pos += bytesRead; |
296 | //offset_ += bytesRead; //comment this out for data stream encryption. |
297 | leftToRead -= bytesRead; |
298 | } |
299 | |
300 | } while (leftToRead > 0); |
301 | |
302 | // check to make sure that the specified length wasn't too small |
303 | try { |
304 | if (in.read() != -1) { |
305 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
306 | netAgent_.accumulateReadException(new SqlException( |
307 | netAgent_.logWriter_, |
308 | new ClientMessageId(SQLState.NET_INPUTSTREAM_LENGTH_TOO_SMALL), |
309 | new Integer(parameterIndex))); |
310 | } |
311 | } catch (java.io.IOException e) { |
312 | netAgent_.accumulateReadException(new SqlException( |
313 | netAgent_.logWriter_, |
314 | new ClientMessageId( |
315 | SQLState.NET_IOEXCEPTION_ON_STREAMLEN_VERIFICATION), |
316 | new Integer(parameterIndex), |
317 | e.getMessage(), |
318 | e)); |
319 | } |
320 | |
321 | byte[] newClearedBytes = new byte[clearedBytes.length + |
322 | lengthAndCodepoint.length]; |
323 | System.arraycopy(lengthAndCodepoint, 0, newClearedBytes, 0, |
324 | lengthAndCodepoint.length); |
325 | System.arraycopy(clearedBytes, 0, newClearedBytes, lengthAndCodepoint.length, clearedBytes.length); |
326 | //it's wrong here, need to add in the real length after the codepoing 146c |
327 | byte[] encryptedBytes; |
328 | encryptedBytes = netAgent_.netConnection_.getEncryptionManager(). |
329 | encryptData(newClearedBytes, |
330 | NetConfiguration.SECMEC_EUSRIDPWD, |
331 | netAgent_.netConnection_.getTargetPublicKey(), |
332 | netAgent_.netConnection_.getTargetPublicKey()); |
333 | |
334 | int encryptedBytesLength = encryptedBytes.length; |
335 | int sendingLength = bytes_.length - offset_; |
336 | if (encryptedBytesLength > (bytes_.length - offset_)) { |
337 | |
338 | System.arraycopy(encryptedBytes, 0, bytes_, offset_, (bytes_.length - offset_)); |
339 | offset_ = 32767; |
340 | try { |
341 | sendBytes(netAgent_.getOutputStream()); |
342 | } catch (java.io.IOException ioe) { |
343 | netAgent_.throwCommunicationsFailure(ioe); |
344 | } |
345 | } else { |
346 | System.arraycopy(encryptedBytes, 0, bytes_, offset_, encryptedBytesLength); |
347 | offset_ = offset_ + encryptedBytes.length; |
348 | } |
349 | |
350 | encryptedBytesLength = encryptedBytesLength - sendingLength; |
351 | while (encryptedBytesLength > 0) { |
352 | //dssLengthLocation_ = offset_; |
353 | offset_ = 0; |
354 | |
355 | if ((encryptedBytesLength - 32765) > 0) { |
356 | bytes_[offset_++] = (byte) (0xff); |
357 | bytes_[offset_++] = (byte) (0xff); |
358 | System.arraycopy(encryptedBytes, sendingLength, bytes_, offset_, 32765); |
359 | encryptedBytesLength -= 32765; |
360 | sendingLength += 32765; |
361 | offset_ = 32767; |
362 | try { |
363 | sendBytes(netAgent_.getOutputStream()); |
364 | } catch (java.io.IOException ioe) { |
365 | netAgent_.throwCommunicationsFailure(ioe); |
366 | } |
367 | } else { |
368 | int leftlength = encryptedBytesLength + 2; |
369 | bytes_[offset_++] = (byte) ((leftlength >>> 8) & 0xff); |
370 | bytes_[offset_++] = (byte) (leftlength & 0xff); |
371 | |
372 | System.arraycopy(encryptedBytes, sendingLength, bytes_, offset_, encryptedBytesLength); |
373 | |
374 | offset_ += encryptedBytesLength; |
375 | dssLengthLocation_ = offset_; |
376 | encryptedBytesLength = 0; |
377 | } |
378 | |
379 | } |
380 | } else //if not data strteam encryption |
381 | { |
382 | buildLengthAndCodePointForLob(codePoint, |
383 | leftToRead, |
384 | writeNullByte, |
385 | extendedLengthByteCount); |
386 | |
387 | int bytesRead = 0; |
388 | int totalBytesRead = 0; |
389 | do { |
390 | do { |
391 | try { |
392 | bytesRead = in.read(bytes_, offset_, bytesToRead); |
393 | totalBytesRead += bytesRead; |
394 | } catch (java.io.IOException e) { |
395 | padScalarStreamForError(leftToRead, bytesToRead); |
396 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
397 | netAgent_.accumulateReadException(new SqlException( |
398 | netAgent_.logWriter_, |
399 | new ClientMessageId(SQLState.NET_IOEXCEPTION_ON_READ), |
400 | new Integer(parameterIndex), |
401 | e.getMessage(), |
402 | e)); |
403 | |
404 | return; |
405 | } |
406 | if (bytesRead == -1) { |
407 | padScalarStreamForError(leftToRead, bytesToRead); |
408 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
409 | netAgent_.accumulateReadException(new SqlException(netAgent_.logWriter_, |
410 | new ClientMessageId(SQLState.NET_PREMATURE_EOS), |
411 | new Integer(parameterIndex))); |
412 | return; |
413 | } else { |
414 | bytesToRead -= bytesRead; |
415 | offset_ += bytesRead; |
416 | leftToRead -= bytesRead; |
417 | } |
418 | } while (bytesToRead > 0); |
419 | |
420 | bytesToRead = flushScalarStreamSegment(leftToRead, bytesToRead); |
421 | } while (leftToRead > 0); |
422 | |
423 | // check to make sure that the specified length wasn't too small |
424 | try { |
425 | if (in.read() != -1) { |
426 | // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable. |
427 | netAgent_.accumulateReadException(new SqlException(netAgent_.logWriter_, |
428 | new ClientMessageId(SQLState.NET_INPUTSTREAM_LENGTH_TOO_SMALL), |
429 | new Integer(parameterIndex))); |
430 | } |
431 | } catch (java.io.IOException e) { |
432 | netAgent_.accumulateReadException(new SqlException( |
433 | netAgent_.logWriter_, |
434 | new ClientMessageId( |
435 | SQLState.NET_IOEXCEPTION_ON_STREAMLEN_VERIFICATION), |
436 | new Integer(parameterIndex), |
437 | e.getMessage(), |
438 | e)); |
439 | } |
440 | |
441 | } |
442 | |
443 | |
444 | } |
445 | |
446 | // Throw DataTruncation, instead of closing connection if input size mismatches |
447 | // An implication of this, is that we need to extend the chaining model |
448 | // for writes to accomodate chained write exceptoins |
449 | final void writeScalarStream(boolean chained, |
450 | boolean chainedWithSameCorrelator, |
451 | int codePoint, |
452 | int length, |
453 | java.io.Reader r, |
454 | boolean writeNullByte, |
455 | int parameterIndex) throws DisconnectException, |
456 | SqlException{ |
457 | |
458 | writeScalarStream(chained, |
459 | chainedWithSameCorrelator, |
460 | codePoint, |
461 | length * 2, |
462 | new UTF32BEEncodedInputStream( r ), |
463 | writeNullByte, |
464 | parameterIndex); |
465 | } |
466 | |
467 | |
468 | // prepScalarStream does the following prep for writing stream data: |
469 | // 1. Flushes an existing DSS segment, if necessary |
470 | // 2. Determines if extended length bytes are needed |
471 | // 3. Creates a new DSS/DDM header and a null byte indicator, if applicable |
472 | protected final int prepScalarStream(boolean chained, |
473 | boolean chainedWithSameCorrelator, |
474 | boolean writeNullByte, |
475 | int leftToRead) throws DisconnectException { |
476 | int extendedLengthByteCount; |
477 | |
478 | int nullIndicatorSize = 0; |
479 | if (writeNullByte) { |
480 | // leftToRead is cast to (long) on the off chance that +4+1 pushes it outside the range of int |
481 | extendedLengthByteCount = calculateExtendedLengthByteCount((long) leftToRead + 4 + 1); |
482 | nullIndicatorSize = 1; |
483 | } else { |
484 | extendedLengthByteCount = calculateExtendedLengthByteCount(leftToRead + 4); |
485 | } |
486 | |
487 | // flush the existing DSS segment if this stream will not fit in the send buffer |
488 | // leftToRead is cast to (long) on the off chance that +4+1 pushes it outside the range of int |
489 | if (10 + extendedLengthByteCount + nullIndicatorSize + (long) leftToRead + offset_ > DssConstants.MAX_DSS_LEN) { |
490 | try { |
491 | if (simpleDssFinalize) { |
492 | finalizeDssLength(); |
493 | } else { |
494 | finalizePreviousChainedDss(true); |
495 | } |
496 | sendBytes(netAgent_.getOutputStream()); |
497 | } catch (java.io.IOException e) { |
498 | netAgent_.throwCommunicationsFailure(e); |
499 | } |
500 | } |
501 | |
502 | if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA || |
503 | netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) { |
504 | buildDss(true, |
505 | chained, |
506 | chainedWithSameCorrelator, |
507 | DssConstants.GDSFMT_ENCOBJDSS, |
508 | correlationID_, |
509 | true); |
510 | } else |
511 | // buildDss should not call ensure length. |
512 | { |
513 | buildDss(true, |
514 | chained, |
515 | chainedWithSameCorrelator, |
516 | DssConstants.GDSFMT_OBJDSS, |
517 | correlationID_, |
518 | true); |
519 | } |
520 | |
521 | return extendedLengthByteCount; |
522 | } |
523 | |
524 | |
525 | // Writes out a scalar stream DSS segment, along with DSS continuation headers, |
526 | // if necessary. |
527 | protected final int flushScalarStreamSegment(int leftToRead, |
528 | int bytesToRead) throws DisconnectException { |
529 | int newBytesToRead = bytesToRead; |
530 | |
531 | // either at end of data, end of dss segment, or both. |
532 | if (leftToRead != 0) { |
533 | // 32k segment filled and not at end of data. |
534 | if ((Utils.min(2 + leftToRead, 32767)) > (bytes_.length - offset_)) { |
535 | try { |
536 | sendBytes(netAgent_.getOutputStream()); |
537 | } catch (java.io.IOException ioe) { |
538 | netAgent_.throwCommunicationsFailure(ioe); |
539 | } |
540 | } |
541 | dssLengthLocation_ = offset_; |
542 | bytes_[offset_++] = (byte) (0xff); |
543 | bytes_[offset_++] = (byte) (0xff); |
544 | newBytesToRead = Utils.min(leftToRead, 32765); |
545 | } |
546 | |
547 | return newBytesToRead; |
548 | } |
549 | |
550 | // the offset_ must not be updated when an error is encountered |
551 | // note valid data may be overwritten |
552 | protected final void padScalarStreamForError(int leftToRead, int bytesToRead) throws DisconnectException { |
553 | do { |
554 | do { |
555 | bytes_[offset_++] = (byte) (0x0); // use 0x0 as the padding byte |
556 | bytesToRead--; |
557 | leftToRead--; |
558 | } while (bytesToRead > 0); |
559 | |
560 | bytesToRead = flushScalarStreamSegment(leftToRead, bytesToRead); |
561 | } while (leftToRead > 0); |
562 | } |
563 | |
564 | private final void writeExtendedLengthBytes(int extendedLengthByteCount, long length) { |
565 | int shiftSize = (extendedLengthByteCount - 1) * 8; |
566 | for (int i = 0; i < extendedLengthByteCount; i++) { |
567 | bytes_[offset_++] = (byte) ((length >>> shiftSize) & 0xff); |
568 | shiftSize -= 8; |
569 | } |
570 | } |
571 | |
572 | private final byte[] writeExtendedLengthBytesForEncryption(int extendedLengthByteCount, long length) { |
573 | int shiftSize = (extendedLengthByteCount - 1) * 8; |
574 | byte[] extendedLengthBytes = new byte[extendedLengthByteCount]; |
575 | for (int i = 0; i < extendedLengthByteCount; i++) { |
576 | extendedLengthBytes[i] = (byte) ((length >>> shiftSize) & 0xff); |
577 | shiftSize -= 8; |
578 | } |
579 | return extendedLengthBytes; |
580 | } |
581 | |
582 | // experimental lob section - end |
583 | |
584 | // used to finialize a dss which is already in the buffer |
585 | // before another dss is built. this includes updating length |
586 | // bytes and chaining bits. |
587 | protected final void finalizePreviousChainedDss(boolean dssHasSameCorrelator) { |
588 | finalizeDssLength(); |
589 | bytes_[dssLengthLocation_ + 3] |= 0x40; |
590 | if (dssHasSameCorrelator) // for blobs |
591 | { |
592 | bytes_[dssLengthLocation_ + 3] |= 0x10; |
593 | } |
594 | } |
595 | |
596 | // method to determine if any data is in the request. |
597 | // this indicates there is a dss object already in the buffer. |
598 | protected final boolean doesRequestContainData() { |
599 | return offset_ != 0; |
600 | } |
601 | |
602 | // signal the completion of a Dss Layer A object. The length of |
603 | // dss object will be calculated based on the difference between the |
604 | // start of the dss, saved on the beginDss call, and the current |
605 | // offset into the buffer which marks the end of the data. In the event |
606 | // the length requires the use of continuation Dss headers, one for each 32k |
607 | // chunk of data, the data will be shifted and the continuation headers |
608 | // will be inserted with the correct values as needed. |
609 | // Note: In the future, we may try to optimize this approach |
610 | // in an attempt to avoid these shifts. |
611 | protected final void finalizeDssLength() { |
612 | // calculate the total size of the dss and the number of bytes which would |
613 | // require continuation dss headers. The total length already includes the |
614 | // the 6 byte dss header located at the beginning of the dss. It does not |
615 | // include the length of any continuation headers. |
616 | int totalSize = offset_ - dssLengthLocation_; |
617 | int bytesRequiringContDssHeader = totalSize - 32767; |
618 | |
619 | // determine if continuation headers are needed |
620 | if (bytesRequiringContDssHeader > 0) { |
621 | |
622 | // the continuation headers are needed, so calculate how many. |
623 | // after the first 32767 worth of data, a continuation header is |
624 | // needed for every 32765 bytes (32765 bytes of data + 2 bytes of |
625 | // continuation header = 32767 Dss Max Size). |
626 | int contDssHeaderCount = bytesRequiringContDssHeader / 32765; |
627 | if (bytesRequiringContDssHeader % 32765 != 0) { |
628 | contDssHeaderCount++; |
629 | } |
630 | |
631 | // right now the code will shift to the right. In the future we may want |
632 | // to try something fancier to help reduce the copying (maybe keep |
633 | // space in the beginning of the buffer??). |
634 | // the offset points to the next available offset in the buffer to place |
635 | // a piece of data, so the last dataByte is at offset -1. |
636 | // various bytes will need to be shifted by different amounts |
637 | // depending on how many dss headers to insert so the amount to shift |
638 | // will be calculated and adjusted as needed. ensure there is enough room |
639 | // for all the conutinuation headers and adjust the offset to point to the |
640 | // new end of the data. |
641 | int dataByte = offset_ - 1; |
642 | int shiftOffset = contDssHeaderCount * 2; |
643 | ensureLength(offset_ + shiftOffset); |
644 | offset_ += shiftOffset; |
645 | |
646 | // mark passOne to help with calculating the length of the final (first or |
647 | // rightmost) continuation header. |
648 | boolean passOne = true; |
649 | do { |
650 | // calculate chunk of data to shift |
651 | int dataToShift = bytesRequiringContDssHeader % 32765; |
652 | if (dataToShift == 0) { |
653 | dataToShift = 32765; |
654 | } |
655 | |
656 | // perform the shift |
657 | dataByte -= dataToShift; |
658 | System.arraycopy(bytes_, dataByte + 1,bytes_, dataByte + shiftOffset + 1, dataToShift); |
659 | |
660 | // calculate the value the value of the 2 byte continuation dss header which |
661 | // includes the length of itself. On the first pass, if the length is 32767 |
662 | // we do not want to set the continuation dss header flag. |
663 | int twoByteContDssHeader = dataToShift + 2; |
664 | if (passOne) { |
665 | passOne = false; |
666 | } else { |
667 | if (twoByteContDssHeader == 32767) { |
668 | twoByteContDssHeader = 0xFFFF; |
669 | } |
670 | } |
671 | |
672 | // insert the header's length bytes |
673 | bytes_[dataByte + shiftOffset - 1] = (byte) ((twoByteContDssHeader >>> 8) & 0xff); |
674 | bytes_[dataByte + shiftOffset] = (byte) (twoByteContDssHeader & 0xff); |
675 | |
676 | // adjust the bytesRequiringContDssHeader and the amount to shift for |
677 | // data in upstream headers. |
678 | bytesRequiringContDssHeader -= dataToShift; |
679 | shiftOffset -= 2; |
680 | |
681 | // shift and insert another header for more data. |
682 | } while (bytesRequiringContDssHeader > 0); |
683 | |
684 | // set the continuation dss header flag on for the first header |
685 | totalSize = 0xFFFF; |
686 | |
687 | } |
688 | |
689 | // insert the length bytes in the 6 byte dss header. |
690 | bytes_[dssLengthLocation_] = (byte) ((totalSize >>> 8) & 0xff); |
691 | bytes_[dssLengthLocation_ + 1] = (byte) (totalSize & 0xff); |
692 | } |
693 | |
694 | // mark the location of a two byte ddm length field in the buffer, |
695 | // skip the length bytes for later update, and insert a ddm codepoint |
696 | // into the buffer. The value of the codepoint is not checked. |
697 | // this length will be automatically updated when construction of |
698 | // the ddm object is complete (see updateLengthBytes method). |
699 | // Note: this mechanism handles extended length ddms. |
700 | protected final void markLengthBytes(int codePoint) { |
701 | ensureLength(offset_ + 4); |
702 | |
703 | // save the location of length bytes in the mark stack. |
704 | mark(); |
705 | |
706 | // skip the length bytes and insert the codepoint |
707 | offset_ += 2; |
708 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
709 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
710 | } |
711 | |
712 | // mark an offest into the buffer by placing the current offset value on |
713 | // a stack. |
714 | private final void mark() { |
715 | markStack_[top_++] = offset_; |
716 | } |
717 | |
718 | // remove and return the top offset value from mark stack. |
719 | private final int popMark() { |
720 | return markStack_[--top_]; |
721 | } |
722 | |
723 | protected final void markForCachingPKGNAMCSN() { |
724 | mark(); |
725 | } |
726 | |
727 | protected final int popMarkForCachingPKGNAMCSN() { |
728 | return popMark(); |
729 | } |
730 | |
731 | // Called to update the last ddm length bytes marked (lengths are updated |
732 | // in the reverse order that they are marked). It is up to the caller |
733 | // to make sure length bytes were marked before calling this method. |
734 | // If the length requires ddm extended length bytes, the data will be |
735 | // shifted as needed and the extended length bytes will be automatically |
736 | // inserted. |
737 | protected final void updateLengthBytes() throws SqlException { |
738 | // remove the top length location offset from the mark stack\ |
739 | // calculate the length based on the marked location and end of data. |
740 | int lengthLocation = popMark(); |
741 | int length = offset_ - lengthLocation; |
742 | |
743 | // determine if any extended length bytes are needed. the value returned |
744 | // from calculateExtendedLengthByteCount is the number of extended length |
745 | // bytes required. 0 indicates no exteneded length. |
746 | int extendedLengthByteCount = calculateExtendedLengthByteCount(length); |
747 | if (extendedLengthByteCount != 0) { |
748 | |
749 | // ensure there is enough room in the buffer for the extended length bytes. |
750 | ensureLength(offset_ + extendedLengthByteCount); |
751 | |
752 | // calculate the length to be placed in the extended length bytes. |
753 | // this length does not include the 4 byte llcp. |
754 | int extendedLength = length - 4; |
755 | |
756 | // shift the data to the right by the number of extended length bytes needed. |
757 | int extendedLengthLocation = lengthLocation + 4; |
758 | System.arraycopy(bytes_, |
759 | extendedLengthLocation, |
760 | bytes_, |
761 | extendedLengthLocation + extendedLengthByteCount, |
762 | extendedLength); |
763 | |
764 | // write the extended length |
765 | int shiftSize = (extendedLengthByteCount - 1) * 8; |
766 | for (int i = 0; i < extendedLengthByteCount; i++) { |
767 | bytes_[extendedLengthLocation++] = (byte) ((extendedLength >>> shiftSize) & 0xff); |
768 | shiftSize -= 8; |
769 | } |
770 | // adjust the offset to account for the shift and insert |
771 | offset_ += extendedLengthByteCount; |
772 | |
773 | // the two byte length field before the codepoint contains the length |
774 | // of itself, the length of the codepoint, and the number of bytes used |
775 | // to hold the extended length. the 2 byte length field also has the first |
776 | // bit on to indicate extended length bytes were used. |
777 | length = extendedLengthByteCount + 4; |
778 | length |= 0x8000; |
779 | } |
780 | |
781 | // write the 2 byte length field (2 bytes before codepoint). |
782 | bytes_[lengthLocation] = (byte) ((length >>> 8) & 0xff); |
783 | bytes_[lengthLocation + 1] = (byte) (length & 0xff); |
784 | } |
785 | |
786 | // helper method to calculate the minimum number of extended length bytes needed |
787 | // for a ddm. a return value of 0 indicates no extended length needed. |
788 | private final int calculateExtendedLengthByteCount(long ddmSize) //throws SqlException |
789 | { |
790 | // according to Jim and some tests perfomred on Lob data, |
791 | // the extended length bytes are signed. Assume that |
792 | // if this is the case for Lobs, it is the case for |
793 | // all extended length scenarios. |
794 | if (ddmSize <= 0x7FFF) { |
795 | return 0; |
796 | } else if (ddmSize <= 0x7FFFFFFFL) { |
797 | return 4; |
798 | } else if (ddmSize <= 0x7FFFFFFFFFFFL) { |
799 | return 6; |
800 | } else { |
801 | return 8; |
802 | } |
803 | } |
804 | |
805 | // insert the padByte into the buffer by length number of times. |
806 | final void padBytes(byte padByte, int length) { |
807 | ensureLength(offset_ + length); |
808 | for (int i = 0; i < length; i++) { |
809 | bytes_[offset_++] = padByte; |
810 | } |
811 | } |
812 | |
813 | // insert an unsigned single byte value into the buffer. |
814 | final void write1Byte(int value) { |
815 | ensureLength(offset_ + 1); |
816 | bytes_[offset_++] = (byte) (value & 0xff); |
817 | } |
818 | |
819 | // insert 3 unsigned bytes into the buffer. this was |
820 | // moved up from NetStatementRequest for performance |
821 | final void buildTripletHeader(int tripletLength, |
822 | int tripletType, |
823 | int tripletId) { |
824 | ensureLength(offset_ + 3); |
825 | bytes_[offset_++] = (byte) (tripletLength & 0xff); |
826 | bytes_[offset_++] = (byte) (tripletType & 0xff); |
827 | bytes_[offset_++] = (byte) (tripletId & 0xff); |
828 | } |
829 | |
830 | final void writeLidAndLengths(int[][] lidAndLengthOverrides, int count, int offset) { |
831 | ensureLength(offset_ + (count * 3)); |
832 | for (int i = 0; i < count; i++, offset++) { |
833 | bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][0] & 0xff); |
834 | bytes_[offset_++] = (byte) ((lidAndLengthOverrides[offset][1] >>> 8) & 0xff); |
835 | bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][1] & 0xff); |
836 | } |
837 | } |
838 | |
839 | // if mdd overrides are not required, lids and lengths are copied straight into the |
840 | // buffer. |
841 | // otherwise, lookup the protocolType in the map. if an entry exists, substitute the |
842 | // protocolType with the corresponding override lid. |
843 | final void writeLidAndLengths(int[][] lidAndLengthOverrides, |
844 | int count, |
845 | int offset, |
846 | boolean mddRequired, |
847 | java.util.Hashtable map) { |
848 | if (!mddRequired) { |
849 | writeLidAndLengths(lidAndLengthOverrides, count, offset); |
850 | } |
851 | // if mdd overrides are required, lookup the protocolType in the map, and substitute |
852 | // the protocolType with the override lid. |
853 | else { |
854 | ensureLength(offset_ + (count * 3)); |
855 | int protocolType, overrideLid; |
856 | Object entry; |
857 | for (int i = 0; i < count; i++, offset++) { |
858 | protocolType = lidAndLengthOverrides[offset][0]; |
859 | // lookup the protocolType in the protocolType->overrideLid map |
860 | // if an entry exists, replace the protocolType with the overrideLid |
861 | entry = map.get(new Integer(protocolType)); |
862 | overrideLid = (entry == null) ? protocolType : ((Integer) entry).intValue(); |
863 | bytes_[offset_++] = (byte) (overrideLid & 0xff); |
864 | bytes_[offset_++] = (byte) ((lidAndLengthOverrides[offset][1] >>> 8) & 0xff); |
865 | bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][1] & 0xff); |
866 | } |
867 | } |
868 | } |
869 | |
870 | // perf end |
871 | |
872 | // insert a big endian unsigned 2 byte value into the buffer. |
873 | final void write2Bytes(int value) { |
874 | ensureLength(offset_ + 2); |
875 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
876 | bytes_[offset_++] = (byte) (value & 0xff); |
877 | } |
878 | |
879 | // insert a big endian unsigned 4 byte value into the buffer. |
880 | final void write4Bytes(long value) { |
881 | ensureLength(offset_ + 4); |
882 | bytes_[offset_++] = (byte) ((value >>> 24) & 0xff); |
883 | bytes_[offset_++] = (byte) ((value >>> 16) & 0xff); |
884 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
885 | bytes_[offset_++] = (byte) (value & 0xff); |
886 | } |
887 | |
888 | // copy length number of bytes starting at offset 0 of the byte array, buf, |
889 | // into the buffer. it is up to the caller to make sure buf has at least length |
890 | // number of elements. no checking will be done by this method. |
891 | final void writeBytes(byte[] buf, int length) { |
892 | ensureLength(offset_ + length); |
893 | System.arraycopy(buf, 0, bytes_, offset_, length); |
894 | offset_ += length; |
895 | } |
896 | |
897 | final void writeBytes(byte[] buf) { |
898 | ensureLength(offset_ + buf.length); |
899 | System.arraycopy(buf, 0, bytes_, offset_, buf.length); |
900 | offset_ += buf.length; |
901 | } |
902 | |
903 | // insert a pair of unsigned 2 byte values into the buffer. |
904 | final void writeCodePoint4Bytes(int codePoint, int value) { // should this be writeCodePoint2Bytes |
905 | ensureLength(offset_ + 4); |
906 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
907 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
908 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
909 | bytes_[offset_++] = (byte) (value & 0xff); |
910 | } |
911 | |
912 | // insert a 4 byte length/codepoint pair and a 1 byte unsigned value into the buffer. |
913 | // total of 5 bytes inserted in buffer. |
914 | protected final void writeScalar1Byte(int codePoint, int value) { |
915 | ensureLength(offset_ + 5); |
916 | bytes_[offset_++] = 0x00; |
917 | bytes_[offset_++] = 0x05; |
918 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
919 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
920 | bytes_[offset_++] = (byte) (value & 0xff); |
921 | } |
922 | |
923 | // insert a 4 byte length/codepoint pair and a 2 byte unsigned value into the buffer. |
924 | // total of 6 bytes inserted in buffer. |
925 | final void writeScalar2Bytes(int codePoint, int value) { |
926 | ensureLength(offset_ + 6); |
927 | bytes_[offset_++] = 0x00; |
928 | bytes_[offset_++] = 0x06; |
929 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
930 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
931 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
932 | bytes_[offset_++] = (byte) (value & 0xff); |
933 | } |
934 | |
935 | // insert a 4 byte length/codepoint pair and a 4 byte unsigned value into the |
936 | // buffer. total of 8 bytes inserted in the buffer. |
937 | protected final void writeScalar4Bytes(int codePoint, long value) { |
938 | ensureLength(offset_ + 8); |
939 | bytes_[offset_++] = 0x00; |
940 | bytes_[offset_++] = 0x08; |
941 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
942 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
943 | bytes_[offset_++] = (byte) ((value >>> 24) & 0xff); |
944 | bytes_[offset_++] = (byte) ((value >>> 16) & 0xff); |
945 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
946 | bytes_[offset_++] = (byte) (value & 0xff); |
947 | } |
948 | |
949 | // insert a 4 byte length/codepoint pair and a 8 byte unsigned value into the |
950 | // buffer. total of 12 bytes inserted in the buffer. |
951 | final void writeScalar8Bytes(int codePoint, long value) { |
952 | ensureLength(offset_ + 12); |
953 | bytes_[offset_++] = 0x00; |
954 | bytes_[offset_++] = 0x0C; |
955 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
956 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
957 | bytes_[offset_++] = (byte) ((value >>> 56) & 0xff); |
958 | bytes_[offset_++] = (byte) ((value >>> 48) & 0xff); |
959 | bytes_[offset_++] = (byte) ((value >>> 40) & 0xff); |
960 | bytes_[offset_++] = (byte) ((value >>> 32) & 0xff); |
961 | bytes_[offset_++] = (byte) ((value >>> 24) & 0xff); |
962 | bytes_[offset_++] = (byte) ((value >>> 16) & 0xff); |
963 | bytes_[offset_++] = (byte) ((value >>> 8) & 0xff); |
964 | bytes_[offset_++] = (byte) (value & 0xff); |
965 | } |
966 | |
967 | // insert a 4 byte length/codepoint pair into the buffer. |
968 | // total of 4 bytes inserted in buffer. |
969 | // Note: the length value inserted in the buffer is the same as the value |
970 | // passed in as an argument (this value is NOT incremented by 4 before being |
971 | // inserted). |
972 | final void writeLengthCodePoint(int length, int codePoint) { |
973 | ensureLength(offset_ + 4); |
974 | bytes_[offset_++] = (byte) ((length >>> 8) & 0xff); |
975 | bytes_[offset_++] = (byte) (length & 0xff); |
976 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
977 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
978 | } |
979 | |
980 | final byte[] writeEXTDTALengthCodePointForEncryption(int length, int codePoint) { |
981 | //how to encure length and offset later? |
982 | byte[] clearedBytes = new byte[4]; |
983 | clearedBytes[0] = (byte) ((length >>> 8) & 0xff); |
984 | clearedBytes[1] = (byte) (length & 0xff); |
985 | clearedBytes[2] = (byte) ((codePoint >>> 8) & 0xff); |
986 | clearedBytes[3] = (byte) (codePoint & 0xff); |
987 | return clearedBytes; |
988 | } |
989 | |
990 | // insert a 4 byte length/codepoint pair into the buffer followed |
991 | // by length number of bytes copied from array buf starting at offset 0. |
992 | // the length of this scalar must not exceed the max for the two byte length |
993 | // field. This method does not support extended length. The length |
994 | // value inserted in the buffer includes the number of bytes to copy plus |
995 | // the size of the llcp (or length + 4). It is up to the caller to make sure |
996 | // the array, buf, contains at least length number of bytes. |
997 | final void writeScalarBytes(int codePoint, byte[] buf, int length) { |
998 | ensureLength(offset_ + length + 4); |
999 | bytes_[offset_++] = (byte) (((length + 4) >>> 8) & 0xff); |
1000 | bytes_[offset_++] = (byte) ((length + 4) & 0xff); |
1001 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1002 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1003 | for (int i = 0; i < length; i++) { |
1004 | bytes_[offset_++] = buf[i]; |
1005 | } |
1006 | } |
1007 | |
1008 | // insert a 4 byte length/codepoint pair into the buffer. |
1009 | // total of 4 bytes inserted in buffer. |
1010 | // Note: datalength will be incremented by the size of the llcp, 4, |
1011 | // before being inserted. |
1012 | final void writeScalarHeader(int codePoint, int dataLength) { |
1013 | ensureLength(offset_ + dataLength + 4); |
1014 | bytes_[offset_++] = (byte) (((dataLength + 4) >>> 8) & 0xff); |
1015 | bytes_[offset_++] = (byte) ((dataLength + 4) & 0xff); |
1016 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1017 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1018 | } |
1019 | |
1020 | // insert a 4 byte length/codepoint pair plus ddm character data into |
1021 | // the buffer. This method assumes that the String argument can be |
1022 | // converted by the ccsid manager. This should be fine because usually |
1023 | // there are restrictions on the characters which can be used for ddm |
1024 | // character data. This method also assumes that the string.length() will |
1025 | // be the number of bytes following the conversion. |
1026 | // The two byte length field will contain the length of the character data |
1027 | // and the length of the 4 byte llcp. This method does not handle |
1028 | // scenarios which require extended length bytes. |
1029 | final void writeScalarString(int codePoint, String string) throws SqlException { |
1030 | int stringLength = string.length(); |
1031 | ensureLength(offset_ + stringLength + 4); |
1032 | bytes_[offset_++] = (byte) (((stringLength + 4) >>> 8) & 0xff); |
1033 | bytes_[offset_++] = (byte) ((stringLength + 4) & 0xff); |
1034 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1035 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1036 | offset_ = ccsidManager_.convertFromUCS2(string, bytes_, offset_, netAgent_); |
1037 | } |
1038 | |
1039 | // insert a 4 byte length/codepoint pair plus ddm character data into the |
1040 | // buffer. The ddm character data is padded if needed with the ccsid manager's |
1041 | // space character if the length of the character data is less than paddedLength. |
1042 | // Note: this method is not to be used for String truncation and the string length |
1043 | // must be <= paddedLength. |
1044 | // This method assumes that the String argument can be |
1045 | // converted by the ccsid manager. This should be fine because usually |
1046 | // there are restrictions on the characters which can be used for ddm |
1047 | // character data. This method also assumes that the string.length() will |
1048 | // be the number of bytes following the conversion. The two byte length field |
1049 | // of the llcp will contain the length of the character data including the pad |
1050 | // and the length of the llcp or 4. This method will not handle extended length |
1051 | // scenarios. |
1052 | final void writeScalarPaddedString(int codePoint, String string, int paddedLength) throws SqlException { |
1053 | int stringLength = string.length(); |
1054 | ensureLength(offset_ + paddedLength + 4); |
1055 | bytes_[offset_++] = (byte) (((paddedLength + 4) >>> 8) & 0xff); |
1056 | bytes_[offset_++] = (byte) ((paddedLength + 4) & 0xff); |
1057 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1058 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1059 | offset_ = ccsidManager_.convertFromUCS2(string, bytes_, offset_, netAgent_); |
1060 | for (int i = 0; i < paddedLength - stringLength; i++) { |
1061 | bytes_[offset_++] = ccsidManager_.space_; |
1062 | } |
1063 | } |
1064 | |
1065 | // this method inserts ddm character data into the buffer and pad's the |
1066 | // data with the ccsid manager's space character if the character data length |
1067 | // is less than paddedLength. |
1068 | // Not: this method is not to be used for String truncation and the string length |
1069 | // must be <= paddedLength. |
1070 | // This method assumes that the String argument can be |
1071 | // converted by the ccsid manager. This should be fine because usually |
1072 | // there are restrictions on the characters which can be used for ddm |
1073 | // character data. This method also assumes that the string.length() will |
1074 | // be the number of bytes following the conversion. |
1075 | final void writeScalarPaddedString(String string, int paddedLength) throws SqlException { |
1076 | int stringLength = string.length(); |
1077 | ensureLength(offset_ + paddedLength); |
1078 | offset_ = ccsidManager_.convertFromUCS2(string, bytes_, offset_, netAgent_); |
1079 | for (int i = 0; i < paddedLength - stringLength; i++) { |
1080 | bytes_[offset_++] = ccsidManager_.space_; |
1081 | } |
1082 | } |
1083 | |
1084 | // this method writes a 4 byte length/codepoint pair plus the bytes contained |
1085 | // in array buff to the buffer. |
1086 | // the 2 length bytes in the llcp will contain the length of the data plus |
1087 | // the length of the llcp. This method does not handle scenarios which |
1088 | // require extended length bytes. |
1089 | final void writeScalarBytes(int codePoint, byte[] buff) { |
1090 | int buffLength = buff.length; |
1091 | ensureLength(offset_ + buffLength + 4); |
1092 | bytes_[offset_++] = (byte) (((buffLength + 4) >>> 8) & 0xff); |
1093 | bytes_[offset_++] = (byte) ((buffLength + 4) & 0xff); |
1094 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1095 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1096 | System.arraycopy(buff, 0, bytes_, offset_, buffLength); |
1097 | offset_ += buffLength; |
1098 | } |
1099 | |
1100 | // this method inserts a 4 byte length/codepoint pair plus length number of bytes |
1101 | // from array buff starting at offset start. |
1102 | // Note: no checking will be done on the values of start and length with respect |
1103 | // the actual length of the byte array. The caller must provide the correct |
1104 | // values so an array index out of bounds exception does not occur. |
1105 | // the length will contain the length of the data plus the length of the llcp. |
1106 | // This method does not handle scenarios which require extended length bytes. |
1107 | final void writeScalarBytes(int codePoint, byte[] buff, int start, int length) { |
1108 | ensureLength(offset_ + length + 4); |
1109 | bytes_[offset_++] = (byte) (((length + 4) >>> 8) & 0xff); |
1110 | bytes_[offset_++] = (byte) ((length + 4) & 0xff); |
1111 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1112 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1113 | System.arraycopy(buff, start, bytes_, offset_, length); |
1114 | offset_ += length; |
1115 | } |
1116 | |
1117 | // insert a 4 byte length/codepoint pair plus ddm binary data into the |
1118 | // buffer. The binary data is padded if needed with the padByte |
1119 | // if the data is less than paddedLength. |
1120 | // Note: this method is not to be used for truncation and buff.length |
1121 | // must be <= paddedLength. |
1122 | // The llcp length bytes will contain the length of the data plus |
1123 | // the length of the llcp or 4. |
1124 | // This method does not handle scenarios which require extended length bytes. |
1125 | final void writeScalarPaddedBytes(int codePoint, byte[] buff, int paddedLength, byte padByte) { |
1126 | int buffLength = buff.length; |
1127 | ensureLength(offset_ + paddedLength + 4); |
1128 | bytes_[offset_++] = (byte) (((paddedLength + 4) >>> 8) & 0xff); |
1129 | bytes_[offset_++] = (byte) ((paddedLength + 4) & 0xff); |
1130 | bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff); |
1131 | bytes_[offset_++] = (byte) (codePoint & 0xff); |
1132 | System.arraycopy(buff, 0, bytes_, offset_, buffLength); |
1133 | offset_ += buffLength; |
1134 | |
1135 | for (int i = 0; i < paddedLength - buffLength; i++) { |
1136 | bytes_[offset_++] = padByte; |
1137 | } |
1138 | } |
1139 | |
1140 | // this method inserts binary data into the buffer and pads the |
1141 | // data with the padByte if the data length is less than the paddedLength. |
1142 | // Not: this method is not to be used for truncation and buff.length |
1143 | // must be <= paddedLength. |
1144 | final void writeScalarPaddedBytes(byte[] buff, int paddedLength, byte padByte) { |
1145 | int buffLength = buff.length; |
1146 | ensureLength(offset_ + paddedLength); |
1147 | System.arraycopy(buff, 0, bytes_, offset_, buffLength); |
1148 | offset_ += buffLength; |
1149 | |
1150 | for (int i = 0; i < paddedLength - buffLength; i++) { |
1151 | bytes_[offset_++] = padByte; |
1152 | } |
1153 | } |
1154 | |
1155 | // write the request to the OutputStream and flush the OutputStream. |
1156 | // trace the send if PROTOCOL trace is on. |
1157 | protected void flush(java.io.OutputStream socketOutputStream) throws java.io.IOException { |
1158 | if (doesRequestContainData()) { |
1159 | finalizeDssLength(); |
1160 | sendBytes(socketOutputStream); |
1161 | } |
1162 | } |
1163 | |
1164 | protected void sendBytes(java.io.OutputStream socketOutputStream) throws java.io.IOException { |
1165 | try { |
1166 | socketOutputStream.write(bytes_, 0, offset_); |
1167 | socketOutputStream.flush(); |
1168 | } finally { |
1169 | if (netAgent_.logWriter_ != null && passwordIncluded_) { |
1170 | // if password is in the buffer, need to mask it out. |
1171 | maskOutPassword(); |
1172 | passwordIncluded_ = false; |
1173 | } |
1174 | if (netAgent_.loggingEnabled()) { |
1175 | ((NetLogWriter) netAgent_.logWriter_).traceProtocolFlow(bytes_, |
1176 | 0, |
1177 | offset_, |
1178 | NetLogWriter.TYPE_TRACE_SEND, |
1179 | "Request", |
1180 | "flush", |
1181 | 1); // tracepoint |
1182 | } |
1183 | clearBuffer(); |
1184 | } |
1185 | } |
1186 | |
1187 | final void maskOutPassword() { |
1188 | try { |
1189 | String maskChar = "*"; |
1190 | // construct a mask using the maskChar. |
1191 | StringBuffer mask = new StringBuffer(); |
1192 | for (int i = 0; i < passwordLength_; i++) { |
1193 | mask.append(maskChar); |
1194 | } |
1195 | // try to write mask over password. |
1196 | ccsidManager_.convertFromUCS2(mask.toString(), bytes_, passwordStart_, netAgent_); |
1197 | } catch (SqlException sqle) { |
1198 | // failed to convert mask, |
1199 | // them simply replace with 0xFF. |
1200 | for (int i = 0; i < passwordLength_; i++) { |
1201 | bytes_[passwordStart_ + i] = (byte) 0xFF; |
1202 | } |
1203 | } |
1204 | } |
1205 | |
1206 | // insert a java byte into the buffer. |
1207 | final void writeByte(byte v) { |
1208 | ensureLength(offset_ + 1); |
1209 | bytes_[offset_++] = v; |
1210 | } |
1211 | |
1212 | // insert a java short into the buffer. |
1213 | final void writeShort(short v) { |
1214 | ensureLength(offset_ + 2); |
1215 | org.apache.derby.client.am.SignedBinary.shortToBigEndianBytes(bytes_, offset_, v); |
1216 | offset_ += 2; |
1217 | } |
1218 | |
1219 | // insert a java int into the buffer. |
1220 | void writeInt(int v) { |
1221 | ensureLength(offset_ + 4); |
1222 | org.apache.derby.client.am.SignedBinary.intToBigEndianBytes(bytes_, offset_, v); |
1223 | offset_ += 4; |
1224 | } |
1225 | |
1226 | // insert a java long into the buffer. |
1227 | final void writeLong(long v) { |
1228 | ensureLength(offset_ + 8); |
1229 | org.apache.derby.client.am.SignedBinary.longToBigEndianBytes(bytes_, offset_, v); |
1230 | offset_ += 8; |
1231 | } |
1232 | |
1233 | //-- The following are the write short/int/long in bigEndian byte ordering -- |
1234 | |
1235 | // when writing Fdoca data. |
1236 | protected void writeShortFdocaData(short v) { |
1237 | ensureLength(offset_ + 2); |
1238 | org.apache.derby.client.am.SignedBinary.shortToBigEndianBytes(bytes_, offset_, v); |
1239 | offset_ += 2; |
1240 | } |
1241 | |
1242 | // when writing Fdoca data. |
1243 | protected void writeIntFdocaData(int v) { |
1244 | ensureLength(offset_ + 4); |
1245 | org.apache.derby.client.am.SignedBinary.intToBigEndianBytes(bytes_, offset_, v); |
1246 | offset_ += 4; |
1247 | } |
1248 | |
1249 | // when writing Fdoca data. |
1250 | protected void writeLongFdocaData(long v) { |
1251 | ensureLength(offset_ + 8); |
1252 | org.apache.derby.client.am.SignedBinary.longToBigEndianBytes(bytes_, offset_, v); |
1253 | offset_ += 8; |
1254 | } |
1255 | |
1256 | // insert a java float into the buffer. |
1257 | protected void writeFloat(float v) { |
1258 | ensureLength(offset_ + 4); |
1259 | org.apache.derby.client.am.FloatingPoint.floatToIeee754Bytes(bytes_, offset_, v); |
1260 | offset_ += 4; |
1261 | } |
1262 | |
1263 | // insert a java double into the buffer. |
1264 | protected void writeDouble(double v) { |
1265 | ensureLength(offset_ + 8); |
1266 | org.apache.derby.client.am.FloatingPoint.doubleToIeee754Bytes(bytes_, offset_, v); |
1267 | offset_ += 8; |
1268 | } |
1269 | |
1270 | // insert a java.math.BigDecimal into the buffer. |
1271 | final void writeBigDecimal(java.math.BigDecimal v, |
1272 | int declaredPrecision, |
1273 | int declaredScale) throws SqlException { |
1274 | ensureLength(offset_ + 16); |
1275 | int length = org.apache.derby.client.am.Decimal.bigDecimalToPackedDecimalBytes(bytes_, offset_, v, declaredPrecision, declaredScale); |
1276 | offset_ += length; |
1277 | } |
1278 | |
1279 | final void writeDate(java.sql.Date date) throws SqlException { |
1280 | try |
1281 | { |
1282 | ensureLength(offset_ + 10); |
1283 | org.apache.derby.client.am.DateTime.dateToDateBytes(bytes_, offset_, date); |
1284 | offset_ += 10; |
1285 | } catch (java.io.UnsupportedEncodingException e) { |
1286 | throw new SqlException(netAgent_.logWriter_, |
1287 | new ClientMessageId(SQLState.UNSUPPORTED_ENCODING), |
1288 | "java.sql.Date", "DATE", e); |
1289 | } |
1290 | } |
1291 | |
1292 | final void writeTime(java.sql.Time time) throws SqlException { |
1293 | try{ |
1294 | ensureLength(offset_ + 8); |
1295 | org.apache.derby.client.am.DateTime.timeToTimeBytes(bytes_, offset_, time); |
1296 | offset_ += 8; |
1297 | } catch(UnsupportedEncodingException e) { |
1298 | throw new SqlException(netAgent_.logWriter_, |
1299 | new ClientMessageId(SQLState.UNSUPPORTED_ENCODING), |
1300 | "java.sql.Time", "TIME", e); |
1301 | } |
1302 | } |
1303 | |
1304 | final void writeTimestamp(java.sql.Timestamp timestamp) throws SqlException { |
1305 | try{ |
1306 | ensureLength(offset_ + 26); |
1307 | org.apache.derby.client.am.DateTime.timestampToTimestampBytes(bytes_, offset_, timestamp); |
1308 | offset_ += 26; |
1309 | }catch(UnsupportedEncodingException e) { |
1310 | throw new SqlException(netAgent_.logWriter_, |
1311 | new ClientMessageId(SQLState.UNSUPPORTED_ENCODING), |
1312 | "java.sql.Timestamp", "TIMESTAMP", e); |
1313 | } |
1314 | } |
1315 | |
1316 | // insert a java boolean into the buffer. the boolean is written |
1317 | // as a signed byte having the value 0 or 1. |
1318 | final void writeBoolean(boolean v) { |
1319 | ensureLength(offset_ + 1); |
1320 | bytes_[offset_++] = (byte) ((v ? 1 : 0) & 0xff); |
1321 | } |
1322 | |
1323 | // follows the TYPDEF rules (note: don't think ddm char data is ever length |
1324 | // delimited) |
1325 | // should this throw SqlException |
1326 | // Will write a varchar mixed or single |
1327 | // this was writeLDString |
1328 | final void writeSingleorMixedCcsidLDString(String s, String encoding) throws SqlException { |
1329 | byte[] b; |
1330 | try { |
1331 | b = s.getBytes(encoding); |
1332 | } catch (UnsupportedEncodingException e) { |
1333 | throw new SqlException(netAgent_.logWriter_, |
1334 | new ClientMessageId(SQLState.UNSUPPORTED_ENCODING), |
1335 | "String", "byte", e); |
1336 | } |
1337 | if (b.length > 0x7FFF) { |
1338 | throw new SqlException(netAgent_.logWriter_, |
1339 | new ClientMessageId(SQLState.LANG_STRING_TOO_LONG), |
1340 | "32767"); |
1341 | } |
1342 | ensureLength(offset_ + b.length + 2); |
1343 | writeLDBytesX(b.length, b); |
1344 | } |
1345 | |
1346 | |
1347 | final void writeLDBytes(byte[] bytes) { |
1348 | ensureLength(offset_ + bytes.length + 2); |
1349 | writeLDBytesX(bytes.length, bytes); |
1350 | } |
1351 | |
1352 | // private helper method which should only be called by a Request method. |
1353 | // must call ensureLength before calling this method. |
1354 | // added for code reuse and helps perf by reducing ensureLength calls. |
1355 | // ldSize and bytes.length may not be the same. this is true |
1356 | // when writing graphic ld strings. |
1357 | private final void writeLDBytesX(int ldSize, byte[] bytes) { |
1358 | bytes_[offset_++] = (byte) ((ldSize >>> 8) & 0xff); |
1359 | bytes_[offset_++] = (byte) (ldSize & 0xff); |
1360 | System.arraycopy(bytes, 0, bytes_, offset_, bytes.length); |
1361 | offset_ += bytes.length; |
1362 | } |
1363 | |
1364 | // does it follows |
1365 | // ccsid manager or typdef rules. should this method write ddm character |
1366 | // data or fodca data right now it is coded for ddm char data only |
1367 | final void writeDDMString(String s) throws SqlException { |
1368 | ensureLength(offset_ + s.length()); |
1369 | offset_ = ccsidManager_.convertFromUCS2(s, bytes_, offset_, netAgent_); |
1370 | } |
1371 | |
1372 | |
1373 | private byte[] buildLengthAndCodePointForEncryptedLob(int codePoint, |
1374 | int leftToRead, |
1375 | boolean writeNullByte, |
1376 | int extendedLengthByteCount) throws DisconnectException { |
1377 | byte[] lengthAndCodepoint = new byte[4]; |
1378 | byte[] extendedLengthBytes = new byte[extendedLengthByteCount]; |
1379 | |
1380 | if (extendedLengthByteCount > 0) { |
1381 | // method should never ensure length |
1382 | lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(0x8004 + extendedLengthByteCount, codePoint); |
1383 | |
1384 | if (writeNullByte) { |
1385 | |
1386 | extendedLengthBytes = writeExtendedLengthBytesForEncryption(extendedLengthByteCount, leftToRead + 1); |
1387 | } else { |
1388 | extendedLengthBytes = writeExtendedLengthBytesForEncryption(extendedLengthByteCount, leftToRead); |
1389 | } |
1390 | } else { |
1391 | if (writeNullByte) { |
1392 | lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(leftToRead + 4 + 1, codePoint); |
1393 | } else { |
1394 | lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(leftToRead + 4, codePoint); |
1395 | } |
1396 | } |
1397 | |
1398 | if (extendedLengthByteCount > 0) { |
1399 | byte[] newLengthAndCodepoint = new byte[4 + extendedLengthBytes.length]; |
1400 | System.arraycopy(lengthAndCodepoint, 0, newLengthAndCodepoint, 0, lengthAndCodepoint.length); |
1401 | System.arraycopy(extendedLengthBytes, 0, newLengthAndCodepoint, lengthAndCodepoint.length, extendedLengthBytes.length); |
1402 | lengthAndCodepoint = newLengthAndCodepoint; |
1403 | } |
1404 | |
1405 | if (writeNullByte) { |
1406 | byte[] nullByte = new byte[1 + lengthAndCodepoint.length]; |
1407 | System.arraycopy(lengthAndCodepoint, 0, nullByte, 0, lengthAndCodepoint.length); |
1408 | nullByte[lengthAndCodepoint.length] = 0; |
1409 | lengthAndCodepoint = nullByte; |
1410 | } |
1411 | return lengthAndCodepoint; |
1412 | } |
1413 | |
1414 | |
1415 | private void buildLengthAndCodePointForLob(int codePoint, |
1416 | int leftToRead, |
1417 | boolean writeNullByte, |
1418 | int extendedLengthByteCount) throws DisconnectException { |
1419 | if (extendedLengthByteCount > 0) { |
1420 | // method should never ensure length |
1421 | writeLengthCodePoint(0x8004 + extendedLengthByteCount, codePoint); |
1422 | |
1423 | if (writeNullByte) { |
1424 | writeExtendedLengthBytes(extendedLengthByteCount, leftToRead + 1); |
1425 | } else { |
1426 | writeExtendedLengthBytes(extendedLengthByteCount, leftToRead); |
1427 | } |
1428 | } else { |
1429 | if (writeNullByte) { |
1430 | writeLengthCodePoint(leftToRead + 4 + 1, codePoint); |
1431 | } else { |
1432 | writeLengthCodePoint(leftToRead + 4, codePoint); |
1433 | } |
1434 | } |
1435 | |
1436 | // write the null byte, if necessary |
1437 | if (writeNullByte) { |
1438 | write1Byte(0x0); |
1439 | } |
1440 | |
1441 | } |
1442 | |
1443 | public void setDssLengthLocation(int location) { |
1444 | dssLengthLocation_ = location; |
1445 | } |
1446 | |
1447 | public void setCorrelationID(int id) { |
1448 | correlationID_ = id; |
1449 | } |
1450 | } |