bes Updated for version 3.20.13
HDF5CFArray.cc
Go to the documentation of this file.
1// This file is part of the hdf5_handler implementing for the CF-compliant
2// Copyright (c) 2011-2016 The HDF Group, Inc. and OPeNDAP, Inc.
3//
4// This is free software; you can redistribute it and/or modify it under the
5// terms of the GNU Lesser General Public License as published by the Free
6// Software Foundation; either version 2.1 of the License, or (at your
7// option) any later version.
8//
9// This software is distributed in the hope that it will be useful, but
10// WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
12// License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public
15// License along with this library; if not, write to the Free Software
16// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17//
18// You can contact OPeNDAP, Inc. at PO Box 112, Saunderstown, RI. 02874-0112.
19// You can contact The HDF Group, Inc. at 1800 South Oak Street,
20// Suite 203, Champaign, IL 61820
21
30
31#include "config_hdf5.h"
32#include <iostream>
33#include <sstream>
34#include <cassert>
35#include <BESDebug.h>
36#include <sys/stat.h>
37#include <libdap/InternalErr.h>
38
39#include <libdap/Str.h>
40#include "HDF5RequestHandler.h"
41#include "HDF5CFArray.h"
42#include "h5cfdaputil.h"
43#include "ObjMemCache.h"
44
45using namespace std;
46using namespace libdap;
47
48
49BaseType *HDF5CFArray::ptr_duplicate()
50{
51 return new HDF5CFArray(*this);
52}
53
54// Read in an HDF5 Array
55bool HDF5CFArray::read()
56{
57
58 BESDEBUG("h5","Coming to HDF5CFArray read "<<endl);
59 if(length() == 0)
60 return true;
61
62 if((nullptr == HDF5RequestHandler::get_lrdata_mem_cache()) &&
63 nullptr == HDF5RequestHandler::get_srdata_mem_cache()){
64 read_data_NOT_from_mem_cache(false,nullptr);
65 return true;
66 }
67
68 // Flag to check if using large raw data cache or small raw data cache.
69 short use_cache_flag = 0;
70
71 // The small data cache is checked first to reduce the resources to operate the big data cache.
72 if(HDF5RequestHandler::get_srdata_mem_cache() != nullptr) {
73 if(((cvtype == CV_EXIST) && (islatlon != true)) || (cvtype == CV_NONLATLON_MISS)
74 || (cvtype == CV_FILLINDEX) ||(cvtype == CV_MODIFY) ||(cvtype == CV_SPECIAL)){
75
76 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
77 use_cache_flag = 1;
78 }
79 }
80
81 // If this varible doesn't fit the small data cache, let's check if it fits the large data cache.
82 if(use_cache_flag !=1) {
83
84 if(HDF5RequestHandler::get_lrdata_mem_cache() != nullptr) {
85
86 // This is the trival case.
87 // If no information is provided in the configuration file of large data cache,
88 // just cache the lat/lon varible per file.
89 if(HDF5RequestHandler::get_common_cache_dirs() == false) {
90 if(cvtype == CV_LAT_MISS || cvtype == CV_LON_MISS
91 || (cvtype == CV_EXIST && islatlon == true)) {
92#if 0
93//cerr<<"coming to use_cache_flag =2 "<<endl;
94#endif
95 // Only the data with the numeric datatype DAP2 and CF support are cached.
96 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
97 use_cache_flag = 2;
98 }
99 }
100 else {// Have large data cache configuration info.
101
102 // Need to check if we don't want to cache some CVs, now
103 // this only applies to lat/lon CV.
104 if(cvtype == CV_LAT_MISS || cvtype == CV_LON_MISS
105 || (cvtype == CV_EXIST && islatlon == true)) {
106
107 vector<string> cur_lrd_non_cache_dir_list;
108 HDF5RequestHandler::get_lrd_non_cache_dir_list(cur_lrd_non_cache_dir_list);
109
110 // Check if this file is included in the non-cache directory
111 if( (cur_lrd_non_cache_dir_list.empty()) ||
112 ("" == check_str_sect_in_list(cur_lrd_non_cache_dir_list,filename,'/'))) {
113
114 // Only data with the numeric datatype DAP2 and CF support are cached.
115 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
116 use_cache_flag = 3;
117 }
118 }
119 // Here we allow all the variable names to be cached.
120 // The file path that includes the variables can also included.
121 vector<string> cur_lrd_var_cache_file_list;
122 HDF5RequestHandler::get_lrd_var_cache_file_list(cur_lrd_var_cache_file_list);
123 if(cur_lrd_var_cache_file_list.empty() == false){
124#if 0
126//cerr<<"lrd var cache is "<<cur_lrd_var_cache_file_list[i]<<endl;
127#endif
128 if(true == check_var_cache_files(cur_lrd_var_cache_file_list,filename,varname)){
129#if 0
130//cerr<<"varname is "<<varname <<endl;
131//cerr<<"have var cached "<<endl;
132#endif
133
134 // Only the data with the numeric datatype DAP2 and CF support are cached.
135 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
136 use_cache_flag = 4;
137 }
138 }
139 }
140 }
141 }
142
143 if(0 == use_cache_flag)
144 read_data_NOT_from_mem_cache(false,nullptr);
145 else {// memory cache cases
146
147 string cache_key;
148
149 // Possibly we have common lat/lon dirs,so check here.
150 if( 3 == use_cache_flag){
151 vector<string> cur_cache_dlist;
152 HDF5RequestHandler::get_lrd_cache_dir_list(cur_cache_dlist);
153 string cache_dir = check_str_sect_in_list(cur_cache_dlist,filename,'/');
154 if(cache_dir != "")
155 cache_key = cache_dir + varname;
156 else {
157 cache_key = filename + varname;
158 // If this lat/lon is not in the common dir. list, it is still cached as a general lat/lon.
159 // Change the flag to 2.
160 use_cache_flag = 2;
161 }
162
163 }
164 else
165 cache_key = filename + varname;
166
167 handle_data_with_mem_cache(dtype,total_elems,use_cache_flag,cache_key,is_dap4);
168
169 }
170
171 return true;
172}
173
174// Reading data not from memory cache: The data can be read from the disk cache or can be read via the HDF5 APIs
175void HDF5CFArray::read_data_NOT_from_mem_cache(bool add_mem_cache,void*buf) {
176
177 vector<int>offset;
178 vector<int>count;
179 vector<int>step;
180 vector<hsize_t> hoffset;
181 vector<hsize_t>hcount;
182 vector<hsize_t>hstep;
183 int nelms = 1;
184
185 if (rank <= 0)
186 throw InternalErr (__FILE__, __LINE__,
187 "The number of dimension of the variable is <=0 for an array.");
188 else {
189
190 offset.resize(rank);
191 count.resize(rank);
192 step.resize(rank);
193 hoffset.resize(rank);
194 hcount.resize(rank);
195 hstep.resize(rank);
196 nelms = format_constraint (offset.data(), step.data(), count.data());
197 for (int i = 0; i <rank; i++) {
198 hoffset[i] = (hsize_t) offset[i];
199 hcount[i] = (hsize_t) count[i];
200 hstep[i] = (hsize_t) step[i];
201 }
202 }
203
204 hid_t dsetid = -1;
205 hid_t dspace = -1;
206 hid_t mspace = -1;
207 hid_t dtypeid = -1;
208 hid_t memtype = -1;
209
210 bool data_from_disk_cache = false;
211 bool data_to_disk_cache = false;
212
213 // Check if the disk cache can be applied.
214 bool use_disk_cache = valid_disk_cache();
215
216 string cache_fpath;
217
218 if(true == use_disk_cache) {
219
220 BESDEBUG("h5","Coming to use disk cache "<<endl);
221
222 unsigned long long disk_cache_size = HDF5RequestHandler::get_disk_cache_size();
223 string diskcache_dir = HDF5RequestHandler::get_disk_cache_dir();
224 string diskcache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
225
226 string cache_fname=HDF5CFUtil::obtain_cache_fname(diskcache_prefix,filename,varname);
227 cache_fpath = diskcache_dir + "/"+ cache_fname;
228
229 int temp_total_elems = 1;
230 for (unsigned int i = 0; i <dimsizes.size();i++)
231 temp_total_elems = temp_total_elems*dimsizes[i];
232 short dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
233 // CHECK: I think when signed 8-bit needs to be converted to int16, dtype_size should also change.
234 if(is_dap4 == false && dtype==H5CHAR)
235 dtype_size = 2;
236
237 int expected_file_size = dtype_size *temp_total_elems;
238 int fd = 0;
239 HDF5DiskCache *disk_cache = HDF5DiskCache::get_instance(disk_cache_size,diskcache_dir,diskcache_prefix);
240 if( true == disk_cache->get_data_from_cache(cache_fpath, expected_file_size,fd)) {
241
242 vector<size_t> offset_size_t;
243 offset_size_t.resize(rank);
244 for(int i = 0; i <rank;i++)
245 offset_size_t[i] = (size_t)offset[i];
246 size_t offset_1st = INDEX_nD_TO_1D(dimsizes,offset_size_t);
247 vector<size_t>end;
248 end.resize(rank);
249 for (int i = 0; i < rank; i++)
250 end[i] = offset[i] +(count[i]-1)*step[i];
251 size_t offset_last = INDEX_nD_TO_1D(dimsizes,end);
252#if 0
253//cerr<<"offset_1d is "<<offset_1st <<endl;
254//cerr<<"offset_last is "<<offset_last <<endl;
255#endif
256 size_t total_read = dtype_size*(offset_last-offset_1st+1);
257
258 off_t fpos = lseek(fd,dtype_size*offset_1st,SEEK_SET);
259 if (-1 == fpos) {
260 disk_cache->unlock_and_close(cache_fpath);
261 disk_cache->purge_file(cache_fpath);
262 }
263
265 else
266 data_from_disk_cache = obtain_cached_data(disk_cache,cache_fpath,fd, step,count,total_read,dtype_size);
267
268 }
269
270 if(true == data_from_disk_cache)
271 return;
272 else
273 data_to_disk_cache = true;
274
275 }
276
277// END CACHE
278
279 bool pass_fileid = HDF5RequestHandler::get_pass_fileid();
280 if(false == pass_fileid) {
281 if ((fileid = H5Fopen(filename.c_str(),H5F_ACC_RDONLY,H5P_DEFAULT))<0) {
282 ostringstream eherr;
283 eherr << "HDF5 File " << filename
284 << " cannot be opened. "<<endl;
285 throw InternalErr (__FILE__, __LINE__, eherr.str ());
286 }
287 }
288
289 if ((dsetid = H5Dopen(fileid,varname.c_str(),H5P_DEFAULT))<0) {
290 HDF5CFUtil::close_fileid(fileid,pass_fileid);
291 ostringstream eherr;
292 eherr << "HDF5 dataset " << varname
293 << " cannot be opened. "<<endl;
294 throw InternalErr (__FILE__, __LINE__, eherr.str ());
295 }
296
297 if ((dspace = H5Dget_space(dsetid))<0) {
298
299 H5Dclose(dsetid);
300 HDF5CFUtil::close_fileid(fileid,pass_fileid);
301 ostringstream eherr;
302 eherr << "Space id of the HDF5 dataset " << varname
303 << " cannot be obtained. "<<endl;
304 throw InternalErr (__FILE__, __LINE__, eherr.str ());
305 }
306
307 if (H5Sselect_hyperslab(dspace, H5S_SELECT_SET,
308 hoffset.data(), hstep.data(),
309 hcount.data(), nullptr) < 0) {
310
311 H5Sclose(dspace);
312 H5Dclose(dsetid);
313 HDF5CFUtil::close_fileid(fileid,pass_fileid);
314 ostringstream eherr;
315 eherr << "The selection of hyperslab of the HDF5 dataset " << varname
316 << " fails. "<<endl;
317 throw InternalErr (__FILE__, __LINE__, eherr.str ());
318 }
319
320 mspace = H5Screate_simple(rank, hcount.data(),nullptr);
321 if (mspace < 0) {
322 H5Sclose(dspace);
323 H5Dclose(dsetid);
324 HDF5CFUtil::close_fileid(fileid,pass_fileid);
325 ostringstream eherr;
326 eherr << "The creation of the memory space of the HDF5 dataset " << varname
327 << " fails. "<<endl;
328 throw InternalErr (__FILE__, __LINE__, eherr.str ());
329 }
330
331
332 if ((dtypeid = H5Dget_type(dsetid)) < 0) {
333
334 H5Sclose(mspace);
335 H5Sclose(dspace);
336 H5Dclose(dsetid);
337 HDF5CFUtil::close_fileid(fileid,pass_fileid);
338 ostringstream eherr;
339 eherr << "Obtaining the datatype of the HDF5 dataset " << varname
340 << " fails. "<<endl;
341 throw InternalErr (__FILE__, __LINE__, eherr.str ());
342
343 }
344
345 if ((memtype = H5Tget_native_type(dtypeid, H5T_DIR_ASCEND))<0) {
346
347 H5Sclose(mspace);
348 H5Tclose(dtypeid);
349 H5Sclose(dspace);
350 H5Dclose(dsetid);
351 HDF5CFUtil::close_fileid(fileid,pass_fileid);
352 ostringstream eherr;
353 eherr << "Obtaining the memory type of the HDF5 dataset " << varname
354 << " fails. "<<endl;
355 throw InternalErr (__FILE__, __LINE__, eherr.str ());
356
357 }
358
359 hid_t read_ret = -1;
360
361 // Before reading the data, we will check if the memory cache is turned on,
362 // The add_mem_cache is only true when the data memory cache keys are on and used.
363 if(true == add_mem_cache) {
364 if(buf== nullptr) {
365 H5Sclose(mspace);
366 H5Tclose(dtypeid);
367 H5Sclose(dspace);
368 H5Dclose(dsetid);
369 HDF5CFUtil::close_fileid(fileid,pass_fileid);
370 throw InternalErr(__FILE__,__LINE__,"The memory data cache buffer needs to be set");
371 }
372 read_ret= H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,buf);
373 if(read_ret <0){
374 H5Sclose(mspace);
375 H5Tclose(dtypeid);
376 H5Sclose(dspace);
377 H5Dclose(dsetid);
378 HDF5CFUtil::close_fileid(fileid,pass_fileid);
379 throw InternalErr(__FILE__,__LINE__,"Cannot read the data to the buffer.");
380 }
381 }
382
383
384 // Now reading the data, note dtype is not dtypeid.
385 // dtype is an enum defined by the handler.
386
387 switch (dtype) {
388
389 case H5CHAR:
390 {
391
392 vector<char> val;
393 val.resize(nelms);
394
395 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
396 if (read_ret < 0) {
397
398 H5Sclose(mspace);
399 H5Tclose(memtype);
400 H5Tclose(dtypeid);
401 H5Sclose(dspace);
402 H5Dclose(dsetid);
403 HDF5CFUtil::close_fileid(fileid,pass_fileid);
404 ostringstream eherr;
405 eherr << "Cannot read the HDF5 dataset " << varname
406 << " with the type of H5T_NATIVE_CHAR "<<endl;
407 throw InternalErr (__FILE__, __LINE__, eherr.str ());
408
409 }
410
411 if(is_dap4 == true)
412 set_value((dods_int8 *)val.data(),nelms);
413 else {
414
415 vector<short>newval;
416 newval.resize(nelms);
417
418 for (int counter = 0; counter < nelms; counter++)
419 newval[counter] = (short) (val[counter]);
420
421 set_value ((dods_int16 *) newval.data(), nelms);
422 }
423
424 if(true == data_to_disk_cache) {
425 try {
426 BESDEBUG("h5","writing data to disk cache "<<endl);
427 write_data_to_cache(dsetid,dspace,mspace,memtype,cache_fpath,2,val,nelms);
428 }
429 catch(...) {
430 H5Sclose(mspace);
431 H5Tclose(memtype);
432 H5Tclose(dtypeid);
433 H5Sclose(dspace);
434 H5Dclose(dsetid);
435 HDF5CFUtil::close_fileid(fileid,pass_fileid);
436 ostringstream eherr;
437 eherr << "write data to cache failed.";
438 throw InternalErr (__FILE__, __LINE__, eherr.str ());
439
440 }
441 }
442
443 } // case H5CHAR
444 break;
445
446 // Note: for DAP2, H5INT64,H5UINT64 will be ignored.
447 case H5UCHAR:
448 case H5UINT16:
449 case H5INT16:
450 case H5INT32:
451 case H5UINT32:
452 case H5INT64:
453 case H5UINT64:
454 case H5FLOAT32:
455 case H5FLOAT64:
456
457
458 {
459 size_t dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
460 vector<char> val;
461 val.resize(nelms*dtype_size);
462
463 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
464 if (read_ret < 0) {
465 H5Sclose(mspace);
466 H5Tclose(memtype);
467 H5Tclose(dtypeid);
468 H5Sclose(dspace);
469 H5Dclose(dsetid);
470 HDF5CFUtil::close_fileid(fileid,pass_fileid);
471 ostringstream eherr;
472 eherr << "Cannot read the HDF5 dataset " << varname
473 << " with the type of H5T_NATIVE_UCHAR "<<endl;
474 throw InternalErr (__FILE__, __LINE__, eherr.str ());
475
476 }
477 // Not sure if "set_value ((dods_byte *) val.data(), nelms);" works.
478 val2buf(val.data());
479 set_read_p(true);
480
481 if(true == data_to_disk_cache) {
482 BESDEBUG("h5","writing data to disk cache "<<endl);
483 try {
484 write_data_to_cache(dsetid,dspace,mspace,memtype,cache_fpath,dtype_size,val,nelms);
485 }
486 catch(...) {
487 H5Sclose(mspace);
488 H5Tclose(memtype);
489 H5Tclose(dtypeid);
490 H5Sclose(dspace);
491 H5Dclose(dsetid);
492 HDF5CFUtil::close_fileid(fileid,pass_fileid);
493 ostringstream eherr;
494 eherr << "Write data to cache failed."
495 << "It is very possible the error is caused by the server failure"
496 << " such as filled disk partition at the server rather than Hyrax. Please contact "
497 << " the corresponding data center first. If the issue is not due to "
498 << " the server,";
499 throw InternalErr (__FILE__, __LINE__, eherr.str ());
500
501 }
502
503 }
504 } // case H5UCHAR...
505 break;
506
507
508
509#if 0
510 case H5INT16:
511 {
512 vector<short>val;
513 val.resize(nelms);
514
515 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
516 if (read_ret < 0) {
517
518 H5Sclose(mspace);
519 H5Tclose(memtype);
520 H5Tclose(dtypeid);
521 H5Sclose(dspace);
522 H5Dclose(dsetid);
523 HDF5CFUtil::close_fileid(fileid,pass_fileid);
524 //H5Fclose(fileid);
525 ostringstream eherr;
526 eherr << "Cannot read the HDF5 dataset " << varname
527 << " with the type of H5T_NATIVE_SHORT "<<endl;
528 throw InternalErr (__FILE__, __LINE__, eherr.str ());
529
530 }
531 set_value ((dods_int16 *) val.data(), nelms);
532 }// H5INT16
533 break;
534
535
536 case H5UINT16:
537 {
538 vector<unsigned short> val;
539 val.resize(nelms);
540 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
541 if (read_ret < 0) {
542
543 H5Sclose(mspace);
544 H5Tclose(memtype);
545 H5Tclose(dtypeid);
546 H5Sclose(dspace);
547 H5Dclose(dsetid);
548 HDF5CFUtil::close_fileid(fileid,pass_fileid);
549 ostringstream eherr;
550 eherr << "Cannot read the HDF5 dataset " << varname
551 << " with the type of H5T_NATIVE_USHORT "<<endl;
552 throw InternalErr (__FILE__, __LINE__, eherr.str ());
553
554 }
555 set_value ((dods_uint16 *) val.data(), nelms);
556 } // H5UINT16
557 break;
558
559
560 case H5INT32:
561 {
562 vector<int>val;
563 val.resize(nelms);
564 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
565 if (read_ret < 0) {
566 H5Sclose(mspace);
567 H5Tclose(memtype);
568 H5Tclose(dtypeid);
569 H5Sclose(dspace);
570 H5Dclose(dsetid);
571 HDF5CFUtil::close_fileid(fileid,pass_fileid);
572 ostringstream eherr;
573 eherr << "Cannot read the HDF5 dataset " << varname
574 << " with the type of H5T_NATIVE_INT "<<endl;
575 throw InternalErr (__FILE__, __LINE__, eherr.str ());
576
577 }
578 set_value ((dods_int32 *) val.data(), nelms);
579 } // case H5INT32
580 break;
581
582 case H5UINT32:
583 {
584 vector<unsigned int>val;
585 val.resize(nelms);
586 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
587 if (read_ret < 0) {
588 H5Sclose(mspace);
589 H5Tclose(memtype);
590 H5Tclose(dtypeid);
591 H5Sclose(dspace);
592 H5Dclose(dsetid);
593 HDF5CFUtil::close_fileid(fileid,pass_fileid);
594 ostringstream eherr;
595 eherr << "Cannot read the HDF5 dataset " << varname
596 << " with the type of H5T_NATIVE_UINT "<<endl;
597 throw InternalErr (__FILE__, __LINE__, eherr.str ());
598
599 }
600 set_value ((dods_uint32 *) val.data(), nelms);
601 }
602 break;
603
604 case H5FLOAT32:
605 {
606
607 vector<float>val;
608 val.resize(nelms);
609
610 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
611 if (read_ret < 0) {
612 H5Sclose(mspace);
613 H5Tclose(memtype);
614 H5Tclose(dtypeid);
615 H5Sclose(dspace);
616 H5Dclose(dsetid);
617 HDF5CFUtil::close_fileid(fileid,pass_fileid);
618 ostringstream eherr;
619 eherr << "Cannot read the HDF5 dataset " << varname
620 << " with the type of H5T_NATIVE_FLOAT "<<endl;
621 throw InternalErr (__FILE__, __LINE__, eherr.str ());
622
623 }
624 set_value ((dods_float32 *) val.data(), nelms);
625 }
626 break;
627
628
629 case H5FLOAT64:
630 {
631
632 vector<double>val;
633 val.resize(nelms);
634 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
635
636 if (read_ret < 0) {
637 H5Sclose(mspace);
638 H5Tclose(memtype);
639 H5Tclose(dtypeid);
640 H5Sclose(dspace);
641 H5Dclose(dsetid);
642 HDF5CFUtil::close_fileid(fileid,pass_fileid);
643 ostringstream eherr;
644 eherr << "Cannot read the HDF5 dataset " << varname
645 << " with the type of H5T_NATIVE_DOUBLE "<<endl;
646 throw InternalErr (__FILE__, __LINE__, eherr.str ());
647
648 }
649 set_value ((dods_float64 *) val.data(), nelms);
650 } // case H5FLOAT64
651 break;
652
653#endif
654
655 case H5FSTRING:
656 {
657 size_t ty_size = H5Tget_size(dtypeid);
658 if (ty_size == 0) {
659 H5Sclose(mspace);
660 H5Tclose(memtype);
661 H5Tclose(dtypeid);
662 H5Sclose(dspace);
663 H5Dclose(dsetid);
664 HDF5CFUtil::close_fileid(fileid,pass_fileid);
665 ostringstream eherr;
666 eherr << "Cannot obtain the size of the fixed size HDF5 string of the dataset "
667 << varname <<endl;
668 throw InternalErr (__FILE__, __LINE__, eherr.str ());
669 }
670
671 vector <char> strval;
672 strval.resize(nelms*ty_size);
673 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,(void*)strval.data());
674
675 if (read_ret < 0) {
676 H5Sclose(mspace);
677 H5Tclose(memtype);
678 H5Tclose(dtypeid);
679 H5Sclose(dspace);
680 H5Dclose(dsetid);
681 HDF5CFUtil::close_fileid(fileid,pass_fileid);
682 ostringstream eherr;
683 eherr << "Cannot read the HDF5 dataset " << varname
684 << " with the type of the fixed size HDF5 string "<<endl;
685 throw InternalErr (__FILE__, __LINE__, eherr.str ());
686 }
687
688 string total_string(strval.begin(),strval.end());
689 strval.clear(); // May not be necessary
690 vector <string> finstrval;
691 finstrval.resize(nelms);
692 for (int i = 0; i<nelms; i++)
693 finstrval[i] = total_string.substr(i*ty_size,ty_size);
694
695 // Check if we should drop the long string
696
697 // If the size of an individual element is longer than the current netCDF JAVA
698 // string and the "EnableDropLongString" key is turned on,
699 // No string is generated.
700 if ((true == HDF5RequestHandler::get_drop_long_string()) &&
701 ty_size > NC_JAVA_STR_SIZE_LIMIT) {
702 for (int i = 0; i<nelms; i++)
703 finstrval[i] = "";
704 }
705 set_value(finstrval,nelms);
706 total_string.clear();
707 }
708 break;
709
710
711 case H5VSTRING:
712 {
713 size_t ty_size = H5Tget_size(memtype);
714 if (ty_size == 0) {
715 H5Sclose(mspace);
716 H5Tclose(memtype);
717 H5Tclose(dtypeid);
718 H5Sclose(dspace);
719 H5Dclose(dsetid);
720 HDF5CFUtil::close_fileid(fileid,pass_fileid);
721 ostringstream eherr;
722 eherr << "Cannot obtain the size of the fixed size HDF5 string of the dataset "
723 << varname <<endl;
724 throw InternalErr (__FILE__, __LINE__, eherr.str ());
725 }
726 vector <char> strval;
727 strval.resize(nelms*ty_size);
728 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,(void*)strval.data());
729
730 if (read_ret < 0) {
731 H5Sclose(mspace);
732 H5Tclose(memtype);
733 H5Tclose(dtypeid);
734 H5Sclose(dspace);
735 H5Dclose(dsetid);
736 HDF5CFUtil::close_fileid(fileid,pass_fileid);
737 ostringstream eherr;
738 eherr << "Cannot read the HDF5 dataset " << varname
739 << " with the type of the HDF5 variable length string "<<endl;
740 throw InternalErr (__FILE__, __LINE__, eherr.str ());
741 }
742
743 vector<string>finstrval;
744 finstrval.resize(nelms);
745 char*temp_bp = strval.data();
746 char*onestring = nullptr;
747 for (int i =0;i<nelms;i++) {
748 onestring = *(char**)temp_bp;
749 if(onestring!=nullptr )
750 finstrval[i] =string(onestring);
751
752 else // We will add a nullptr if onestring is nullptr.
753 finstrval[i]="";
754 temp_bp +=ty_size;
755 }
756
757 if (false == strval.empty()) {
758 herr_t ret_vlen_claim;
759 ret_vlen_claim = H5Dvlen_reclaim(memtype,mspace,H5P_DEFAULT,(void*)strval.data());
760 if (ret_vlen_claim < 0){
761 H5Sclose(mspace);
762 H5Tclose(memtype);
763 H5Tclose(dtypeid);
764 H5Sclose(dspace);
765 H5Dclose(dsetid);
766 HDF5CFUtil::close_fileid(fileid,pass_fileid);
767 ostringstream eherr;
768 eherr << "Cannot reclaim the memory buffer of the HDF5 variable length string of the dataset "
769 << varname <<endl;
770 throw InternalErr (__FILE__, __LINE__, eherr.str ());
771
772 }
773 }
774
775 // If the size of one string element is longer than the current netCDF JAVA
776 // string and the "EnableDropLongString" key is turned on,
777 // No string is generated.
778 if (true == HDF5RequestHandler::get_drop_long_string()) {
779 bool drop_long_str = false;
780 for (int i =0;i<nelms;i++) {
781 if(finstrval[i].size() >NC_JAVA_STR_SIZE_LIMIT){
782 drop_long_str = true;
783 break;
784 }
785 }
786 if (drop_long_str == true) {
787 for (int i =0;i<nelms;i++)
788 finstrval[i] = "";
789 }
790 }
791 set_value(finstrval,nelms);
792
793 }
794 break;
795
796 default:
797 {
798 H5Tclose(memtype);
799 H5Tclose(dtypeid);
800 H5Sclose(mspace);
801 H5Sclose(dspace);
802 H5Dclose(dsetid);
803 HDF5CFUtil::close_fileid(fileid,pass_fileid);
804 ostringstream eherr;
805 eherr << "Cannot read the HDF5 dataset " << varname
806 << " with the unsupported HDF5 datatype"<<endl;
807 throw InternalErr (__FILE__, __LINE__, eherr.str ());
808 }
809 }
810
811 H5Tclose(memtype);
812 H5Tclose(dtypeid);
813 H5Sclose(mspace);
814 H5Sclose(dspace);
815 H5Dclose(dsetid);
816 HDF5CFUtil::close_fileid(fileid,pass_fileid);
817
818 return;
819}
820
821bool HDF5CFArray::valid_disk_cache() {
822
823 bool ret_value = false;
824 if(true == HDF5RequestHandler::get_use_disk_cache()) {
825
826 BESDEBUG("h5","Coming to disk cache "<<endl);
827 // Check if this is a valid numeric datatype we want to support
828 if(dtype == H5CHAR || dtype ==H5UCHAR || dtype==H5INT16 || dtype ==H5UINT16 ||
829 dtype == H5INT32 || dtype ==H5UINT32 || dtype ==H5FLOAT32 || dtype==H5FLOAT64 ||
830 dtype == H5INT64 || dtype ==H5UINT64){
831
832 BESDEBUG("h5","Coming to disk cache datatype block"<<endl);
833
834 string diskcache_dir = HDF5RequestHandler::get_disk_cache_dir();
835 string diskcache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
836 long diskcache_size = HDF5RequestHandler::get_disk_cache_size();
837
838 if(("" == diskcache_dir)||(""==diskcache_prefix)||(diskcache_size <=0))
839 throw InternalErr (__FILE__, __LINE__, "Either the cached dir is empty or the prefix is nullptr or the cache size is not set.");
840 else {
841 struct stat sb;
842 if(stat(diskcache_dir.c_str(),&sb) !=0) {
843 string err_mesg="The cached directory " + diskcache_dir;
844 err_mesg = err_mesg + " doesn't exist. ";
845 throw InternalErr(__FILE__,__LINE__,err_mesg);
846 }
847 else {
848 if(true == S_ISDIR(sb.st_mode)) {
849 if(access(diskcache_dir.c_str(),R_OK|W_OK|X_OK) == -1) {
850 string err_mesg="The cached directory " + diskcache_dir;
851 err_mesg = err_mesg + " can NOT be read,written or executable.";
852 throw InternalErr(__FILE__,__LINE__,err_mesg);
853 }
854 }
855 else {
856 string err_mesg="The cached directory " + diskcache_dir;
857 err_mesg = err_mesg + " is not a directory.";
858 throw InternalErr(__FILE__,__LINE__,err_mesg);
859 }
860 }
861 }
862
863 short dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
864 // Check if we only need to cache the specific compressed dat
865 if(true == HDF5RequestHandler::get_disk_cache_comp_data()){
866 BESDEBUG("h5","Compression disk cache key is true"<<endl);
867 ret_value = valid_disk_cache_for_compressed_data(dtype_size);
868 BESDEBUG("h5","variable disk cache passes the compression parameter check"<<endl);
869 }
870 else {
871 BESDEBUG("h5","Compression disk cache key is NOT set, disk cache key is true."<<endl);
872 ret_value = true;
873 }
874
875 }
876
877 }
878 return ret_value;
879}
880
881bool HDF5CFArray:: valid_disk_cache_for_compressed_data(short dtype_size) const {
882
883 bool ret_value = false;
884 // The compression ratio should be smaller then the threshold(hard to compress)
885 // and the total var size should be bigger than the defined size(bigger)
886#if 0
887 size_t total_byte = total_elems*dtype_size;
888#endif
889 if((comp_ratio < HDF5RequestHandler::get_disk_comp_threshold())
890 && (total_elems*dtype_size >= HDF5RequestHandler::get_disk_var_size())) {
891 if( true == HDF5RequestHandler::get_disk_cache_float_only_comp()) {
892 if(dtype==H5FLOAT32 || dtype == H5FLOAT64)
893 ret_value = true;
894 }
895 else
896 ret_value = true;
897 }
898 return ret_value;
899
900}
901
902bool HDF5CFArray::obtain_cached_data(HDF5DiskCache *disk_cache,const string & cache_fpath, int fd,vector<int> &cd_step, vector<int>&cd_count,size_t total_read,short dtype_size) {
903
904 ssize_t ret_read_val = -1;
905 vector<char>buf;
906
907 buf.resize(total_read);
908 ret_read_val = HDF5CFUtil::read_buffer_from_file(fd,(void*)buf.data(),total_read);
909 disk_cache->unlock_and_close(cache_fpath);
910 if((-1 == ret_read_val) || (ret_read_val != (ssize_t)total_read)) {
911 disk_cache->purge_file(cache_fpath);
912 return false;
913 }
914 else {
915 unsigned int nele_to_read = 1;
916 for(int i = 0; i<rank;i++)
917 nele_to_read *=cd_count[i];
918
919 if(nele_to_read == (total_read/dtype_size)) {
920 val2buf(buf.data());
921 set_read_p(true);
922 }
923 else { // Need to re-assemble the buffer according to different datatype
924
925 vector<int>cd_start(rank,0);
926 vector<size_t>cd_pos(rank,0);
927 int nelms_to_send = 1;
928 for(int i = 0; i <rank; i++)
929 nelms_to_send = nelms_to_send*cd_count[i];
930
931 switch (dtype) {
932
933 case H5CHAR:
934 {
935#if 0
936 vector<int>total_val;
937 total_val.resize(total_read/dtype_size);
938 memcpy(total_val.data(),(void*)buf.data(),total_read);
939
940 vector<int>final_val;
941 subset<int>(
942 total_val.data(),
943 rank,
944 dimsizes,
945 cd_start.data(),
946 cd_step.data(),
947 cd_count.data(),
948 &final_val,
949 cd_pos,
950 0
951 );
952
953#endif
954
955 if(is_dap4 == false) {
956 vector<short>final_val;
957 subset<short>(
958 buf.data(),
959 rank,
960 dimsizes,
961 cd_start.data(),
962 cd_step.data(),
963 cd_count.data(),
964 &final_val,
965 cd_pos,
966 0
967 );
968 set_value((dods_int16*)final_val.data(),nelms_to_send);
969 }
970 else {
971 vector<char>final_val;
972 subset<char>(
973 buf.data(),
974 rank,
975 dimsizes,
976 cd_start.data(),
977 cd_step.data(),
978 cd_count.data(),
979 &final_val,
980 cd_pos,
981 0
982 );
983 set_value((dods_int8*)final_val.data(),nelms_to_send);
984 }
985
986 }
987
988 break;
989 case H5UCHAR:
990 {
991#if 0
992 vector<unsigned char>total_val;
993 total_val.resize(total_read/dtype_size);
994 memcpy(total_val.data(),(void*)buf.data(),total_read);
995
996 vector<unsigned char>final_val;
997 subset<unsigned char>(
998 total_val.data(),
999 rank,
1000 dimsizes,
1001 cd_start.data(),
1002 cd_step.data(),
1003 cd_count.data(),
1004 &final_val,
1005 cd_pos,
1006 0
1007 );
1008
1009#endif
1010 vector<unsigned char>final_val;
1011 subset<unsigned char>(
1012 buf.data(),
1013 rank,
1014 dimsizes,
1015 cd_start.data(),
1016 cd_step.data(),
1017 cd_count.data(),
1018 &final_val,
1019 cd_pos,
1020 0
1021 );
1022
1023 set_value ((dods_byte *) final_val.data(), nelms_to_send);
1024 }
1025 break;
1026
1027 case H5INT16:
1028 {
1029#if 0
1030 vector<short>total_val;
1031 total_val.resize(total_read/dtype_size);
1032 memcpy(total_val.data(),(void*)buf.data(),total_read);
1033
1034 vector<short>final_val;
1035 subset<short>(
1036 total_val.data(),
1037 rank,
1038 dimsizes,
1039 cd_start.data(),
1040 cd_step.data(),
1041 cd_count.data(),
1042 &final_val,
1043 cd_pos,
1044 0
1045 );
1046#endif
1047
1048 vector<short>final_val;
1049 subset<short>(
1050 buf.data(),
1051 rank,
1052 dimsizes,
1053 cd_start.data(),
1054 cd_step.data(),
1055 cd_count.data(),
1056 &final_val,
1057 cd_pos,
1058 0
1059 );
1060
1061 set_value ((dods_int16 *) final_val.data(), nelms_to_send);
1062 }
1063 break;
1064
1065 case H5UINT16:
1066 {
1067#if 0
1068 vector<unsigned short>total_val;
1069 total_val.resize(total_read/dtype_size);
1070 memcpy(total_val.data(),(void*)buf.data(),total_read);
1071
1072 vector<unsigned short>final_val;
1073 subset<unsigned short>(
1074 total_val.data(),
1075 rank,
1076 dimsizes,
1077 cd_start.data(),
1078 cd_step.data(),
1079 cd_count.data(),
1080 &final_val,
1081 cd_pos,
1082 0
1083 );
1084#endif
1085
1086 vector<unsigned short>final_val;
1087 subset<unsigned short>(
1088 buf.data(),
1089 rank,
1090 dimsizes,
1091 cd_start.data(),
1092 cd_step.data(),
1093 cd_count.data(),
1094 &final_val,
1095 cd_pos,
1096 0
1097 );
1098
1099 set_value ((dods_uint16 *) final_val.data(), nelms_to_send);
1100 }
1101 break;
1102
1103 case H5INT32:
1104 {
1105#if 0
1106 vector<int>total_val;
1107 total_val.resize(total_read/dtype_size);
1108 memcpy(total_val.data(),(void*)buf.data(),total_read);
1109
1110 vector<int>final_val;
1111 subset<int>(
1112 total_val.data(),
1113 rank,
1114 dimsizes,
1115 cd_start.data(),
1116 cd_step.data(),
1117 cd_count.data(),
1118 &final_val,
1119 cd_pos,
1120 0
1121 );
1122
1123#endif
1124
1125 vector<int>final_val;
1126 subset<int>(
1127 buf.data(),
1128 rank,
1129 dimsizes,
1130 cd_start.data(),
1131 cd_step.data(),
1132 cd_count.data(),
1133 &final_val,
1134 cd_pos,
1135 0
1136 );
1137
1138
1139 set_value ((dods_int32 *) final_val.data(), nelms_to_send);
1140 }
1141 break;
1142
1143 case H5UINT32:
1144 {
1145#if 0
1146 vector<unsigned int>total_val;
1147 total_val.resize(total_read/dtype_size);
1148 memcpy(total_val.data(),(void*)buf.data(),total_read);
1149
1150 vector<unsigned int>final_val;
1151 subset<unsigned int>(
1152 total_val.data(),
1153 rank,
1154 dimsizes,
1155 cd_start.data(),
1156 cd_step.data(),
1157 cd_count.data(),
1158 &final_val,
1159 cd_pos,
1160 0
1161 );
1162#endif
1163
1164 vector<unsigned int>final_val;
1165 subset<unsigned int>(
1166 buf.data(),
1167 rank,
1168 dimsizes,
1169 cd_start.data(),
1170 cd_step.data(),
1171 cd_count.data(),
1172 &final_val,
1173 cd_pos,
1174 0
1175 );
1176
1177 set_value ((dods_uint32 *) final_val.data(), nelms_to_send);
1178 }
1179 break;
1180
1181 case H5INT64: // Only for DAP4 CF
1182 {
1183#if 0
1184 vector<unsigned int>total_val;
1185 total_val.resize(total_read/dtype_size);
1186 memcpy(total_val.data(),(void*)buf.data(),total_read);
1187
1188 vector<unsigned int>final_val;
1189 subset<unsigned int>(
1190 total_val.data(),
1191 rank,
1192 dimsizes,
1193 cd_start.data(),
1194 cd_step.data(),
1195 cd_count.data(),
1196 &final_val,
1197 cd_pos,
1198 0
1199 );
1200#endif
1201
1202 vector<long long >final_val;
1203 subset<long long >(
1204 buf.data(),
1205 rank,
1206 dimsizes,
1207 cd_start.data(),
1208 cd_step.data(),
1209 cd_count.data(),
1210 &final_val,
1211 cd_pos,
1212 0
1213 );
1214
1215 set_value ((dods_int64 *) final_val.data(), nelms_to_send);
1216 }
1217 break;
1218
1219
1220
1221 case H5UINT64: // Only for DAP4 CF
1222 {
1223#if 0
1224 vector<unsigned int>total_val;
1225 total_val.resize(total_read/dtype_size);
1226 memcpy(total_val.data(),(void*)buf.data(),total_read);
1227
1228 vector<unsigned int>final_val;
1229 subset<unsigned int>(
1230 total_val.data(),
1231 rank,
1232 dimsizes,
1233 cd_start.data(),
1234 cd_step.data(),
1235 cd_count.data(),
1236 &final_val,
1237 cd_pos,
1238 0
1239 );
1240#endif
1241
1242 vector<unsigned long long >final_val;
1243 subset<unsigned long long >(
1244 buf.data(),
1245 rank,
1246 dimsizes,
1247 cd_start.data(),
1248 cd_step.data(),
1249 cd_count.data(),
1250 &final_val,
1251 cd_pos,
1252 0
1253 );
1254
1255 set_value ((dods_uint64 *) final_val.data(), nelms_to_send);
1256 }
1257 break;
1258
1259
1260 case H5FLOAT32:
1261 {
1262#if 0
1263 vector<float>total_val;
1264 total_val.resize(total_read/dtype_size);
1265 memcpy(total_val.data(),(void*)buf.data(),total_read);
1266
1267 vector<float>final_val;
1268 subset<float>(
1269 total_val.data(),
1270 rank,
1271 dimsizes,
1272 cd_start.data(),
1273 cd_step.data(),
1274 cd_count.data(),
1275 &final_val,
1276 cd_pos,
1277 0
1278 );
1279#endif
1280
1281 vector<float>final_val;
1282 subset<float>(
1283 buf.data(),
1284 rank,
1285 dimsizes,
1286 cd_start.data(),
1287 cd_step.data(),
1288 cd_count.data(),
1289 &final_val,
1290 cd_pos,
1291 0
1292 );
1293
1294
1295 set_value ((dods_float32 *) final_val.data(), nelms_to_send);
1296 }
1297 break;
1298 case H5FLOAT64:
1299 {
1300#if 0
1301 vector<double>total_val;
1302 total_val.resize(total_read/dtype_size);
1303 memcpy(total_val.data(),(void*)buf.data(),total_read);
1304
1305 vector<double>final_val;
1306 subset<double>(
1307 total_val.data(),
1308 rank,
1309 dimsizes,
1310 cd_start.data(),
1311 cd_step.data(),
1312 cd_count.data(),
1313 &final_val,
1314 cd_pos,
1315 0
1316 );
1317#endif
1318 vector<double>final_val;
1319 subset<double>(
1320 buf.data(),
1321 rank,
1322 dimsizes,
1323 cd_start.data(),
1324 cd_step.data(),
1325 cd_count.data(),
1326 &final_val,
1327 cd_pos,
1328 0
1329 );
1330
1331 set_value ((dods_float64 *) final_val.data(), nelms_to_send);
1332 }
1333 break;
1334 default:
1335 throw InternalErr (__FILE__, __LINE__, "unsupported data type.");
1336
1337 }// "end switch(dtype)"
1338 }// "end else (stride is not 1)"
1339 return true;
1340 }// "end else(full_read = true)"
1341}
1342
1343
1344void
1345HDF5CFArray::write_data_to_cache(hid_t dset_id, hid_t /*dspace_id*/, hid_t /*mspace_id*/, hid_t memtype,
1346 const string& cache_fpath, short dtype_size, const vector<char> &buf, int nelms) {
1347
1348 unsigned long long disk_cache_size = HDF5RequestHandler::get_disk_cache_size();
1349 string disk_cache_dir = HDF5RequestHandler::get_disk_cache_dir();
1350 string disk_cache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
1351 HDF5DiskCache *disk_cache = HDF5DiskCache::get_instance(disk_cache_size,disk_cache_dir,disk_cache_prefix);
1352 int total_nelem = 1;
1353 for(int i = 0; i <rank; i++)
1354 total_nelem = total_nelem*dimsizes[i];
1355
1356 vector<char>val;
1357
1358 if(H5CHAR == dtype && is_dap4 == false) {
1359
1360 vector<short>newval;
1361 newval.resize(total_nelem);
1362 if(total_nelem == nelms) {
1363 for (int i = 0; i < total_nelem;i++)
1364 newval[i] = (short)buf[i];
1365 disk_cache->write_cached_data2(cache_fpath,sizeof(short)*total_nelem,(const void*)newval.data());
1366 }
1367 else {
1368 vector<char>val2;
1369 val2.resize(total_nelem);
1370 if(H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL,H5P_DEFAULT, val2.data())<0)
1371 throw InternalErr (__FILE__, __LINE__, "Cannot read the whole HDF5 dataset for the disk cache.");
1372 for (int i = 0; i < total_nelem;i++)
1373 newval[i] = (short)val2[i];
1374 disk_cache->write_cached_data2(cache_fpath,sizeof(short)*total_nelem,(const void*)newval.data());
1375 }
1376 }
1377 else {
1378 if(total_nelem == nelms) {
1379 disk_cache->write_cached_data2(cache_fpath,dtype_size*total_nelem,(const void*)buf.data());
1380 }
1381 else {
1382 val.resize(dtype_size*total_nelem);
1383 if(H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL,H5P_DEFAULT, val.data())<0)
1384 throw InternalErr (__FILE__, __LINE__, "Cannot read the whole SDS for cache.");
1385
1386 disk_cache->write_cached_data2(cache_fpath,dtype_size*total_nelem,(const void*)val.data());
1387 }
1388 }
1389}
1390
1391#if 0
1392void HDF5CFArray::read_data_from_mem_cache(void*buf) {
1393
1394 vector<int>offset;
1395 vector<int>count;
1396 vector<int>step;
1397 int nelms = format_constraint (offset.data(), step.data(), count.data());
1398 // set the original position to the starting point
1399 vector<int>at_pos(at_ndims,0);
1400 for (int i = 0; i< rank; i++)
1401 at_pos[i] = at_offset[i];
1402
1403
1404 switch (dtype) {
1405
1406 case H5UCHAR:
1407
1408 {
1409 vector<unsigned char> val;
1410 val.resize(nelms);
1411 subset<unsigned char>(
1412 total_val.data(),
1413 rank,
1414 dimsizes,
1415 offset,
1416 step,
1417 count,
1418 &final_val,
1419 at_pos,
1420 0
1421 );
1422
1423
1424 set_value ((dods_byte *) val.data(), nelms);
1425 } // case H5UCHAR
1426 break;
1427
1428
1429 case H5CHAR:
1430 {
1431
1432 vector<char> val;
1433 val.resize(nelms);
1434
1435 if (0 == rank)
1436 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1437 else
1438 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1439
1440 if (read_ret < 0) {
1441
1442 if (rank > 0)
1443 H5Sclose(mspace);
1444 H5Tclose(memtype);
1445 H5Tclose(dtypeid);
1446 H5Sclose(dspace);
1447 H5Dclose(dsetid);
1448 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1449 ostringstream eherr;
1450 eherr << "Cannot read the HDF5 dataset " << varname
1451 << " with the type of H5T_NATIVE_CHAR "<<endl;
1452 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1453
1454 }
1455
1456 vector<short>newval;
1457 newval.resize(nelms);
1458
1459 for (int counter = 0; counter < nelms; counter++)
1460 newval[counter] = (short) (val[counter]);
1461
1462 set_value ((dods_int16 *) newval.data(), nelms);
1463 } // case H5CHAR
1464 break;
1465
1466
1467 case H5INT16:
1468 {
1469 vector<short>val;
1470 val.resize(nelms);
1471
1472 if (0 == rank)
1473 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1474 else
1475 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1476
1477 if (read_ret < 0) {
1478
1479 if (rank > 0)
1480 H5Sclose(mspace);
1481 H5Tclose(memtype);
1482 H5Tclose(dtypeid);
1483 H5Sclose(dspace);
1484 H5Dclose(dsetid);
1485 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1486 //H5Fclose(fileid);
1487 ostringstream eherr;
1488 eherr << "Cannot read the HDF5 dataset " << varname
1489 << " with the type of H5T_NATIVE_SHORT "<<endl;
1490 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1491
1492 }
1493 set_value ((dods_int16 *) val.data(), nelms);
1494 }// H5INT16
1495 break;
1496
1497
1498 case H5UINT16:
1499 {
1500 vector<unsigned short> val;
1501 val.resize(nelms);
1502 if (0 == rank)
1503 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1504 else
1505 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1506
1507 if (read_ret < 0) {
1508
1509 if (rank > 0) H5Sclose(mspace);
1510 H5Tclose(memtype);
1511 H5Tclose(dtypeid);
1512 H5Sclose(dspace);
1513 H5Dclose(dsetid);
1514 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1515 ostringstream eherr;
1516 eherr << "Cannot read the HDF5 dataset " << varname
1517 << " with the type of H5T_NATIVE_USHORT "<<endl;
1518 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1519
1520 }
1521 set_value ((dods_uint16 *) val.data(), nelms);
1522 } // H5UINT16
1523 break;
1524
1525
1526 case H5INT32:
1527 {
1528 vector<int>val;
1529 val.resize(nelms);
1530 if (0 == rank)
1531 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1532 else
1533 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1534
1535 if (read_ret < 0) {
1536 if (rank > 0)
1537 H5Sclose(mspace);
1538 H5Tclose(memtype);
1539 H5Tclose(dtypeid);
1540 H5Sclose(dspace);
1541 H5Dclose(dsetid);
1542 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1543 ostringstream eherr;
1544 eherr << "Cannot read the HDF5 dataset " << varname
1545 << " with the type of H5T_NATIVE_INT "<<endl;
1546 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1547
1548 }
1549 set_value ((dods_int32 *) val.data(), nelms);
1550 } // case H5INT32
1551 break;
1552
1553 case H5UINT32:
1554 {
1555 vector<unsigned int>val;
1556 val.resize(nelms);
1557 if (0 == rank)
1558 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1559 else
1560 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1561
1562 if (read_ret < 0) {
1563
1564 if (rank > 0)
1565 H5Sclose(mspace);
1566 H5Tclose(memtype);
1567 H5Tclose(dtypeid);
1568 H5Sclose(dspace);
1569 H5Dclose(dsetid);
1570 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1571 ostringstream eherr;
1572 eherr << "Cannot read the HDF5 dataset " << varname
1573 << " with the type of H5T_NATIVE_UINT "<<endl;
1574 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1575
1576 }
1577 set_value ((dods_uint32 *) val.data(), nelms);
1578 }
1579 break;
1580
1581 case H5FLOAT32:
1582 {
1583
1584 vector<float>val;
1585 val.resize(nelms);
1586
1587 if (0 == rank)
1588 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1589 else
1590 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1591
1592 if (read_ret < 0) {
1593 if (rank > 0)
1594 H5Sclose(mspace);
1595 H5Tclose(memtype);
1596 H5Tclose(dtypeid);
1597 H5Sclose(dspace);
1598 H5Dclose(dsetid);
1599 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1600 ostringstream eherr;
1601 eherr << "Cannot read the HDF5 dataset " << varname
1602 << " with the type of H5T_NATIVE_FLOAT "<<endl;
1603 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1604
1605 }
1606 set_value ((dods_float32 *) val.data(), nelms);
1607 }
1608 break;
1609
1610
1611 case H5FLOAT64:
1612 {
1613
1614 vector<double>val;
1615 val.resize(nelms);
1616 if (0 == rank)
1617 read_ret = H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,val.data());
1618 else
1619 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
1620
1621 if (read_ret < 0) {
1622 if (rank > 0)
1623 H5Sclose(mspace);
1624 H5Tclose(memtype);
1625 H5Tclose(dtypeid);
1626 H5Sclose(dspace);
1627 H5Dclose(dsetid);
1628 HDF5CFUtil::close_fileid(fileid,pass_fileid);
1629 ostringstream eherr;
1630 eherr << "Cannot read the HDF5 dataset " << varname
1631 << " with the type of H5T_NATIVE_DOUBLE "<<endl;
1632 throw InternalErr (__FILE__, __LINE__, eherr.str ());
1633
1634 }
1635 set_value ((dods_float64 *) val.data(), nelms);
1636 } // case H5FLOAT64
1637 break;
1638
1639
1640
1641 // Just see if it works.
1642 val2buf(buf);
1643 set_read_p(true);
1644 return;
1645}
1646#endif
1647
1648#if 0
1649// We don't inherit libdap Array Class's transform_to_dap4 method since it also transforms attributes.
1650BaseType* HDF5CFArray::h5cfdims_transform_to_dap4(D4Group *grp) {
1651
1652 if(grp == nullptr)
1653 return nullptr;
1654 Array *dest = static_cast<HDF5CFArray*>(ptr_duplicate());
1655
1656 // If there is just a size, don't make
1657 // a D4Dimension (In DAP4 you cannot share a dimension unless it has
1658 // a name). jhrg 3/18/14
1659
1660 D4Dimensions *grp_dims = grp->dims();
1661 for (Array::Dim_iter dap2_dim = dest->dim_begin(), e = dest->dim_end(); dap2_dim != e; ++dap2_dim) {
1662 if (!(*dap2_dim).name.empty()) {
1663
1664 // If a D4Dimension with the name already exists, use it.
1665 D4Dimension *d4_dim = grp_dims->find_dim((*dap2_dim).name);
1666 if (!d4_dim) {
1667 d4_dim = new D4Dimension((*dap2_dim).name, (*dap2_dim).size);
1668 grp_dims->add_dim_nocopy(d4_dim);
1669 }
1670 // At this point d4_dim's name and size == those of (*d) so just set
1671 // the D4Dimension pointer so it matches the one in the D4Group.
1672 (*dap2_dim).dim = d4_dim;
1673 }
1674 }
1675
1676 return dest;
1677
1678}
1679#endif
1680
1681// We don't inherit libdap Array Class's transform_to_dap4 method since CF option is still using it.
1682// This function is used for 64-bit integer mapping to DAP4 for the CF option. largely borrowed from
1683// DAP4 code.
1684BaseType* HDF5CFArray::h5cfdims_transform_to_dap4_int64(D4Group *grp) {
1685
1686 if(grp == nullptr)
1687 return nullptr;
1688 Array *dest = static_cast<HDF5CFArray*>(ptr_duplicate());
1689
1690 // If there is just a size, don't make
1691 // a D4Dimension (In DAP4 you cannot share a dimension unless it has
1692 // a name). jhrg 3/18/14
1693
1694 for (Array::Dim_iter d = dest->dim_begin(), e = dest->dim_end(); d != e; ++d) {
1695 if (false == (*d).name.empty()) {
1696
1697 D4Group *temp_grp = grp;
1698 D4Dimension *d4_dim = nullptr;
1699 while(temp_grp) {
1700
1701 D4Dimensions *temp_dims = temp_grp->dims();
1702
1703 // Check if the dimension is defined in this group
1704 d4_dim = temp_dims->find_dim((*d).name);
1705 if(d4_dim) {
1706 (*d).dim = d4_dim;
1707 break;
1708 }
1709
1710 if(temp_grp->get_parent())
1711 temp_grp = static_cast<D4Group*>(temp_grp->get_parent());
1712 else
1713 temp_grp = nullptr;
1714
1715 }
1716
1717 // Not find this dimension in any of the ancestor groups, add it to this group.
1718 // The following block is fine, but to avoid the complaint from sonarcloud.
1719 // Use a bool.
1720 bool d4_dim_null = ((d4_dim==nullptr)?true:false);
1721#if 0
1722 //if(d4_dim == nullptr) {
1723#endif
1724 // Not find this dimension in any of the ancestor groups, add it to this group.
1725 if(d4_dim_null == true) {
1726
1727 d4_dim = new D4Dimension((*d).name, (*d).size);
1728 D4Dimensions * dims = grp->dims();
1729 dims->add_dim_nocopy(d4_dim);
1730 (*d).dim = d4_dim;
1731 }
1732 }
1733 }
1734
1735 dest->set_is_dap4(true);
1736
1737 return dest;
1738
1739}
1740#if 0
1741// parse constraint expr. and make hdf5 coordinate point location.
1742// return number of elements to read.
1743int
1744HDF5CFArray::format_constraint (int *offset, int *step, int *count)
1745{
1746
1747 long nels = 1;
1748 int id = 0;
1749
1750 Dim_iter p = dim_begin ();
1751
1752 while (p != dim_end ()) {
1753
1754 int start = dimension_start (p, true);
1755 int stride = dimension_stride (p, true);
1756 int stop = dimension_stop (p, true);
1757
1758 // Check for illegal constraint
1759 if (start > stop) {
1760 ostringstream oss;
1761
1762 oss << "Array/Grid hyperslab start point "<< start <<
1763 " is greater than stop point " << stop <<".";
1764 throw Error(malformed_expr, oss.str());
1765 }
1766
1767 offset[id] = start;
1768 step[id] = stride;
1769 count[id] = ((stop - start) / stride) + 1; // count of elements
1770 nels *= count[id]; // total number of values for variable
1771
1772 BESDEBUG ("h5",
1773 "=format_constraint():"
1774 << "id=" << id << " offset=" << offset[id]
1775 << " step=" << step[id]
1776 << " count=" << count[id]
1777 << endl);
1778
1779 id++;
1780 p++;
1781 }
1782
1783 return nels;
1784}
1785
1786#endif
This class includes the methods to read data array into DAP buffer from an HDF5 dataset for the CF op...
include the entry functions to execute the handlers
virtual void unlock_and_close(const std::string &target)
virtual void purge_file(const std::string &file)
Purge a single file from the cache.
void read_data_NOT_from_mem_cache(bool add_cache, void *buf) override
Definition: HDF5CFArray.cc:175
static HDF5DiskCache * get_instance(const long, const std::string &, const std::string &)
Helper functions for generating DAS attributes and a function to check BES Key.
static ssize_t read_buffer_from_file(int fd, void *buf, size_t)
Getting a subset of a variable.
Definition: HDF5CFUtil.cc:1194