bes Updated for version 3.20.13
hc2dap.cc
Go to the documentation of this file.
1
8// This file is part of the hdf4 data handler for the OPeNDAP data server.
9
10// Copyright (c) 2008-2012 The HDF Group
11// Author: Hyo-Kyung Lee <hyoklee@hdfgroup.org>
12//
13// Copyright (c) 2005 OPeNDAP, Inc.
14// Author: James Gallagher <jgallagher@opendap.org>
15//
16// This is free software; you can redistribute it and/or modify it under the
17// terms of the GNU Lesser General Public License as published by the Free
18// Software Foundation; either version 2.1 of the License, or (at your
19// option) any later version.
20//
21// This software is distributed in the hope that it will be useful, but
22// WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
23// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
24// License for more details.
25//
26// You should have received a copy of the GNU Lesser General Public License
27// along with this software; if not, write to the Free Software Foundation,
28// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29//
30// You can contact OPeNDAP, Inc. at PO Box 112, Saunderstown, RI. 02874-0112.
31
33// Copyright 1996, by the California Institute of Technology.
34// ALL RIGHTS RESERVED. United States Government Sponsorship
35// acknowledged. Any commercial use must be negotiated with the
36// Office of Technology Transfer at the California Institute of
37// Technology. This software may be subject to U.S. export control
38// laws and regulations. By accepting this software, the user
39// agrees to comply with all applicable U.S. export laws and
40// regulations. User has the responsibility to obtain export
41// licenses, or other export authority as may be required before
42// exporting such information to foreign countries or providing
43// access to foreign persons.
44
45// Author: Todd Karakashian, NASA/Jet Propulsion Laboratory
46// Todd.K.Karakashian@jpl.nasa.gov
47//
49
50
51#include "config_hdf.h"
52
53// STL includes
54#include <fstream>
55#include <sstream>
56#include <string>
57#include <vector>
58#include <algorithm>
59#include <iostream>
60#include <BESDebug.h>
61#include <libdap/debug.h>
62
63
64using namespace std;
65// HDF and HDFClass includes
66// Include this on linux to suppres an annoying warning about multiple
67// definitions of MIN and MAX.
68#ifdef HAVE_SYS_PARAM_H
69#include <sys/param.h>
70#endif
71#include <mfhdf.h>
72#include <hdfclass.h>
73#include <hcstream.h>
74
75// DODS/HDF includes
76#include <libdap/escaping.h>
77#include "HDFInt32.h"
78#include "HDFInt16.h"
79#include "HDFUInt32.h"
80#include "HDFUInt16.h"
81#include "HDFFloat64.h"
82#include "HDFFloat32.h"
83#include "HDFByte.h"
84#include "HDFStr.h"
85#include "HDFArray.h"
86#include "HDFGrid.h"
87#include "HDFSequence.h"
88#include "HDFStructure.h"
89#include "hdfutil.h"
90#include "dhdferr.h"
91#include "hdf-maps.h"
92#include <libdap/debug.h>
93
94
95// Undefine the following to send signed bytes using unsigned bytes. 1/13/98
96// jhrg.
97#define SIGNED_BYTE_TO_INT32 1
98
99BaseType *NewDAPVar(const string &varname,
100 const string &dataset,
101 int32 hdf_type);
102void LoadStructureFromField(HDFStructure * stru, hdf_field & f, int row);
103
104// STL predicate comparing equality of hdf_field objects based on their names
105class fieldeq {
106public:
107 explicit fieldeq(const string & s) {
108 _val = s;
109 }
110
111 bool operator() (const hdf_field & f) const {
112 return (f.name == _val);
113 }
114
115private:
116 string _val;
117};
118
119// Create a DAP HDFSequence from an hdf_vdata.
120HDFSequence *NewSequenceFromVdata(const hdf_vdata &vd, const string &dataset)
121{
122 // check to make sure hdf_vdata object is set up properly
123 // Vdata must have a name
124 if (!vd || vd.fields.size() == 0 || vd.name.empty())
125 return 0;
126
127 // construct HDFSequence
128 HDFSequence *seq = new HDFSequence(vd.name, dataset);
129
130 // step through each field and create a variable in the DAP Sequence
131 for (int i = 0; i < (int) vd.fields.size(); ++i) {
132 if (!vd.fields[i] || vd.fields[i].vals.size() < 1 ||
133 vd.fields[i].name.empty()) {
134 delete seq; // problem with the field
135 return 0;
136 }
137 HDFStructure *st = 0;
138 try {
139 st = new HDFStructure(vd.fields[i].name, dataset);
140
141 // for each subfield add the subfield to st
142 if (vd.fields[i].vals[0].number_type() == DFNT_CHAR8
143 || vd.fields[i].vals[0].number_type() == DFNT_UCHAR8) {
144
145 // collapse char subfields into one string
146 string subname = vd.fields[i].name + "__0";
147 BaseType *bt = new HDFStr(subname, dataset);
148 st->add_var(bt); // *st now manages *bt
149 delete bt;
150 }
151 else {
152 // create a DODS variable for each subfield
153 for (int j = 0; j < (int) vd.fields[i].vals.size(); ++j) {
154 ostringstream strm;
155 strm << vd.fields[i].name << "__" << j;
156 BaseType *bt =
157 NewDAPVar(strm.str(), dataset,
158 vd.fields[i].vals[j].number_type());
159 st->add_var(bt); // *st now manages *bt
160 delete bt;
161 }
162 }
163 seq->add_var(st); // *seq now manages *st
164 delete st;
165 }
166 catch (...) {
167 delete seq;
168 delete st;
169 throw;
170 }
171 }
172
173 return seq;
174}
175
176// Create a DAP HDFStructure from an hdf_vgroup.
177HDFStructure *NewStructureFromVgroup(const hdf_vgroup &vg, vg_map &vgmap,
178 sds_map &sdmap, vd_map &vdmap,
179 gr_map &grmap, const string &dataset)
180{
181 // check to make sure hdf_vgroup object is set up properly
182 if (vg.name.length() == 0) // Vgroup must have a name
183 return 0;
184 if (!vg) // Vgroup must have some tagrefs
185 return 0;
186
187 // construct HDFStructure
188 HDFStructure *str = new HDFStructure(vg.name, dataset);
189 bool nonempty = false;
190
191 // I think coverity is unreasonable on this one. The code is sound. KY 2016-05-12
192 BaseType *bt = 0;
193 try {
194 // step through each tagref and copy its contents to DAP
195 for (int i = 0; i < (int) vg.tags.size(); ++i) {
196 int32 tag = vg.tags[i];
197 int32 ref = vg.refs[i];
198
199 switch (tag) {
200 case DFTAG_VH:
201 bt = NewSequenceFromVdata(vdmap[ref].vdata, dataset);
202 break;
203 case DFTAG_NDG:
204 if (sdmap[ref].sds.has_scale()) {
205 bt = NewGridFromSDS(sdmap[ref].sds, dataset);
206 } else {
207 bt = NewArrayFromSDS(sdmap[ref].sds, dataset);
208 }
209 break;
210 case DFTAG_VG:
211 // GR's are also stored as Vgroups
212 if (grmap.find(ref) != grmap.end()){
213 bt = NewArrayFromGR(grmap[ref].gri, dataset);
214 }
215 else
216 bt = NewStructureFromVgroup(vgmap[ref].vgroup, vgmap,
217 sdmap, vdmap, grmap, dataset);
218 break;
219 default:
220 break;
221 }
222 if (bt) {
223 str->add_var(bt); // *st now manages *bt
224 delete bt;
225 bt = 0; // See if coverity scan can pass this.
226 nonempty = true;
227 }
228 }
229 }
230 catch(...) {
231 delete str;
232 delete bt;
233 throw;
234 }
235
236 if (nonempty) {
237 return str;
238 } else {
239 delete str;
240 return 0;
241 }
242}
243
244// Create a DAP HDFArray out of the primary array in an hdf_sds
245HDFArray *NewArrayFromSDS(const hdf_sds & sds, const string &dataset)
246{
247 if (sds.name.length() == 0) // SDS must have a name
248 return 0;
249 if (sds.dims.size() == 0) // SDS must have rank > 0
250 return 0;
251
252 // construct HDFArray, assign data type
253 BaseType *bt = NewDAPVar(sds.name, dataset, sds.data.number_type());
254 if (bt == 0) { // something is not right with SDS number type?
255 return 0;
256 }
257 try {
258 HDFArray *ar = 0;
259 ar = new HDFArray(sds.name,dataset,bt);
260 delete bt;
261
262 // add dimension info to HDFArray
263 for (int i = 0; i < (int) sds.dims.size(); ++i)
264 ar->append_dim(sds.dims[i].count, sds.dims[i].name);
265
266 return ar;
267 }
268 catch (...) {
269 delete bt;
270 throw;
271 }
272}
273
274// Create a DAP HDFArray out of a general raster
275HDFArray *NewArrayFromGR(const hdf_gri & gr, const string &dataset)
276{
277 if (gr.name.length() == 0) // GR must have a name
278 return 0;
279
280 // construct HDFArray, assign data type
281 BaseType *bt = NewDAPVar(gr.name, dataset, gr.image.number_type());
282 if (bt == 0) { // something is not right with GR number type?
283 return 0;
284 }
285
286 try {
287 HDFArray *ar = 0;
288 ar = new HDFArray(gr.name, dataset, bt);
289
290 // Array duplicates the base type passed, so delete here
291 delete bt;
292
293 // add dimension info to HDFArray
294 if (gr.num_comp > 1)
295 ar->append_dim(gr.num_comp, gr.name + "__comps");
296 ar->append_dim(gr.dims[1], gr.name + "__Y");
297 ar->append_dim(gr.dims[0], gr.name + "__X");
298 return ar;
299 }
300 catch (...) {
301 delete bt;
302 throw;
303 }
304}
305
306// Create a DAP HDFGrid out of the primary array and dim scale in an hdf_sds
307HDFGrid *NewGridFromSDS(const hdf_sds & sds, const string &dataset)
308{
309 BESDEBUG("h4", "NewGridFromSDS" << endl);
310 if (!sds.has_scale()) // we need a dim scale to make a Grid
311 return 0;
312
313 // Create the HDFGrid and the primary array. Add the primary array to
314 // the HDFGrid.
315 HDFArray *ar = NewArrayFromSDS(sds, dataset);
316 if (ar == 0)
317 return 0;
318
319 HDFGrid *gr = 0;
320 HDFArray *dmar = 0;
321 BaseType *dsbt = 0;
322 try {
323 gr = new HDFGrid(sds.name, dataset);
324 gr->add_var(ar, libdap::array); // note: gr now manages ar
325 delete ar;
326
327 // create dimension scale HDFArrays (i.e., maps) and
328 // add them to the HDFGrid
329 string mapname;
330 for (int i = 0; i < (int) sds.dims.size(); ++i) {
331 if (sds.dims[i].name.length() == 0) { // the dim must be named
332 delete gr;
333 return 0;
334 }
335 mapname = sds.dims[i].name;
336 if ((dsbt = NewDAPVar(mapname, dataset,
337 sds.dims[i].scale.number_type())) == 0) {
338 delete gr; // note: ~HDFGrid() cleans up the attached ar
339 return 0;
340 }
341 dmar = new HDFArray(mapname, dataset, dsbt);
342 delete dsbt;
343 dmar->append_dim(sds.dims[i].count); // set dimension size
344 gr->add_var(dmar, maps); // add dimension map to grid;
345 delete dmar;
346 }
347 return gr;
348 }
349 catch (...) {
350 delete dmar;
351 delete dsbt;
352 delete gr;
353 delete ar;
354 throw;
355 }
356}
357
358// Return a ptr to DAP atomic data object corresponding to an HDF Type, or
359// return 0 if the HDF Type is invalid or not supported.
360BaseType *NewDAPVar(const string &varname,
361 const string &dataset,
362 int32 hdf_type)
363{
364 switch (hdf_type) {
365 case DFNT_FLOAT32:
366 return new HDFFloat32(varname, dataset);
367
368 case DFNT_FLOAT64:
369 return new HDFFloat64(varname, dataset);
370
371 case DFNT_INT16:
372 return new HDFInt16(varname, dataset);
373
374#ifdef SIGNED_BYTE_TO_INT32
375 case DFNT_INT8:
376#endif
377 case DFNT_INT32:
378 return new HDFInt32(varname, dataset);
379
380 case DFNT_UINT16:
381 return new HDFUInt16(varname, dataset);
382
383 case DFNT_UINT32:
384 return new HDFUInt32(varname, dataset);
385
386 // INT8 and UINT8 *should* be grouped under Int32 and UInt32, but
387 // that breaks too many programs. jhrg 12/30/97
388#ifndef SIGNED_BYTE_TO_INT32
389 case DFNT_INT8:
390#endif
391 case DFNT_UINT8:
392 case DFNT_UCHAR8:
393 case DFNT_CHAR8:
394 return new HDFByte(varname, dataset);
395
396 default:
397 return 0;
398 }
399}
400
401// Return the DAP type name that corresponds to an HDF data type
402string DAPTypeName(int32 hdf_type)
403{
404 switch (hdf_type) {
405 case DFNT_FLOAT32:
406 return string("Float32");
407
408 case DFNT_FLOAT64:
409 return string("Float64");
410
411 case DFNT_INT16:
412 return string("Int16");
413
414#ifdef SIGNED_BYTE_TO_INT32
415 case DFNT_INT8:
416#endif
417 case DFNT_INT32:
418 return string("Int32");
419
420 case DFNT_UINT16:
421 return string("UInt16");
422
423 case DFNT_UINT32:
424 return string("UInt32");
425
426 // See the note above about INT8 and UINT8. jhrg 12/30/97.
427#ifndef SIGNED_BYTE_TO_INT32
428 case DFNT_INT8:
429#endif
430 case DFNT_UINT8:
431 return string("Byte");
432
433 case DFNT_CHAR8:
434 case DFNT_UCHAR8:
435 // note: DFNT_CHAR8 is Byte in DDS but String in DAS
436 return string("String");
437
438 default:
439 return string("");
440 }
441}
442
443// load an HDFArray from an SDS
444void LoadArrayFromSDS(HDFArray * ar, const hdf_sds & sds)
445{
446#ifdef SIGNED_BYTE_TO_INT32
447 switch (sds.data.number_type()) {
448 case DFNT_INT8:{
449 char *data = static_cast < char *>(ExportDataForDODS(sds.data));
450 ar->val2buf(data);
451 delete[]data;
452 break;
453 }
454 default:
455 ar->val2buf(const_cast < char *>(sds.data.data()));
456 }
457#else
458 ar->val2buf(const_cast < char *>(sds.data.data()));
459#endif
460 return;
461}
462
463// load an HDFArray from a GR image
464void LoadArrayFromGR(HDFArray * ar, const hdf_gri & gr)
465{
466#ifdef SIGNED_BYTE_TO_INT32
467 switch (gr.image.number_type()) {
468 case DFNT_INT8:{
469 char *data = static_cast < char *>(ExportDataForDODS(gr.image));
470 ar->val2buf(data);
471 delete[]data;
472 break;
473 }
474
475 default:
476 ar->val2buf(const_cast < char *>(gr.image.data()));
477 }
478#else
479 ar->val2buf(const_cast < char *>(gr.image.data()));
480#endif
481 return;
482}
483
484// load an HDFGrid from an SDS
485// I modified Todd's code so that only the parts of a Grid that are marked as
486// to be sent will be read. 1/29/2002 jhrg
487void LoadGridFromSDS(HDFGrid * gr, const hdf_sds & sds)
488{
489
490 // load data into primary array
491 HDFArray & primary_array = static_cast < HDFArray & >(*gr->array_var());
492 if (primary_array.send_p()) {
493 LoadArrayFromSDS(&primary_array, sds);
494 primary_array.set_read_p(true);
495 }
496 // load data into maps
497 if (primary_array.dimensions() != sds.dims.size())
498 THROW(dhdferr_consist); // # of dims of SDS and HDFGrid should agree!
499
500 Grid::Map_iter p = gr->map_begin();
501 for (unsigned int i = 0;
502 i < sds.dims.size() && p != gr->map_end(); ++i, ++p) {
503 if ((*p)->send_p()) {
504#ifdef SIGNED_BYTE_TO_INT32
505 switch (sds.dims[i].scale.number_type()) {
506 case DFNT_INT8:{
507 char *data = static_cast < char *>(ExportDataForDODS(sds.dims[i].scale));
508 (*p)->val2buf(data);
509 delete[]data;
510 break;
511 }
512 default:
513 (*p)->val2buf(const_cast < char *>
514 (sds.dims[i].scale.data()));
515 }
516#else
517 (*p)->val2buf(const_cast < char *>(sds.dims[i].scale.data()));
518#endif
519 (*p)->set_read_p(true);
520 }
521 }
522 return;
523}
524
525// load an HDFSequence from a row of an hdf_vdata
526void LoadSequenceFromVdata(HDFSequence * seq, hdf_vdata & vd, int row)
527{
528 Constructor::Vars_iter p;
529 for (p = seq->var_begin(); p != seq->var_end(); ++p) {
530 HDFStructure & stru = static_cast < HDFStructure & >(**p);
531
532 // find corresponding field in vd
533 vector < hdf_field >::iterator vf =
534 find_if(vd.fields.begin(), vd.fields.end(),
535 fieldeq(stru.name()));
536 if (vf == vd.fields.end())
537 THROW(dhdferr_consist);
538
539 // for each field component of field, extract the proper data element
540 // for the current row being requested and load into the Structure
541 // variable
542 LoadStructureFromField(&stru, *vf, row);
543 stru.set_read_p(true);
544 }
545}
546
547// Load an HDFStructure with the components of a row of an hdf_field. If the
548// field is made of char8 components, collapse these into one String component
549void LoadStructureFromField(HDFStructure * stru, hdf_field & f, int row)
550{
551
552 if (row < 0 || f.vals.size() <= 0 || row > (int) f.vals[0].size())
553 THROW(dhdferr_conv);
554
555 BaseType *firstp = *stru->var_begin();
556 if (firstp->type() == dods_str_c) {
557 // If the Structure contains a String, then that is all it will
558 // contain. In that case, concatenate the different char8
559 // components of the field and load the DODS String with the value.
560 string str = "";
561 for (unsigned int i = 0; i < f.vals.size(); ++i) {
562 //DBG(cerr << i << ": " << f.vals[i].elt_char8(row) << endl);
563 str += f.vals[i].elt_char8(row);
564 }
565
566 firstp->val2buf(static_cast < void *>(&str)); // data);
567 firstp->set_read_p(true);
568 } else {
569 // for each component of the field, load the corresponding component
570 // of the DODS Structure.
571 int i = 0;
572 Constructor::Vars_iter q;
573 for (q = stru->var_begin(); q != stru->var_end(); ++q, ++i) {
574 char *val = static_cast <char *>(ExportDataForDODS(f.vals[i], row));
575 (*q)->val2buf(val);
576#if 0
577 delete[] val;
578#endif
579 delete val;
580 (*q)->set_read_p(true);
581 }
582
583 }
584 return;
585}
586
587// Load an HDFStructure with the contents of a vgroup.
588void LoadStructureFromVgroup(HDFStructure * str, const hdf_vgroup & vg,
589 const string & /*hdf_file //unused SBL 2/7/20 */)
590{
591 int i = 0;
592 int err = 0;
593 Constructor::Vars_iter q;
594 for (q = str->var_begin(); err == 0 && q != str->var_end(); ++q, ++i) {
595 BaseType *p = *q;
596 BESDEBUG("h4", "Reading within LoadStructureFromVgroup: " << p->name()
597 << ", send_p: " << p->send_p() << ", vg.names[" << i << "]: "
598 << vg.vnames[i] << endl);
599 if (p && p->send_p() && p->name() == vg.vnames[i]) {
600 (dynamic_cast < ReadTagRef & >(*p)).read_tagref(vg.tags[i],
601 vg.refs[i],
602 err);
603 }
604 }
605}
Definition: HDFStr.h:51