Project

General

Profile

Download (58.4 KB) Statistics
| Branch: | Revision:

git_sitools_idoc / common / table_creation / edit_FITS_table.py @ master

1 97d86a8f Alessandro_N
#!/usr/bin/python
2
3
# ******************************************************************************
4
#    Copyright 2015 IAS - IDOC
5
#
6
#    This program is free software: you can redistribute it and/or modify
7
#    it under the terms of the GNU General Public License as published by
8
#    the Free Software Foundation, either version 3 of the License, or
9
#    (at your option) any later version.
10
#
11
#    This program is distributed in the hope that it will be useful,
12
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
13
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
#    GNU General Public License for more details.
15
#
16
#    You should have received a copy of the GNU General Public License
17
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
18
#
19
# ******************************************************************************
20
'''
21
This script updates the content of a fits table, adding new columns
22
and/or rows (i.e. objects) to it, by considering as input a user-defined ASCII table.                       
23

24
The new columns (rows) defined in the ascii file are appended at the end
25
(bottom) of the fits table.                                             
26
                                                                           
27
IMPORTANT: The 1st line of the ASCII table must contain the names   
28
of the columns, and must be UNCOMMENTED!                            
29

30
NOTE: Ra and DEC must be in **decimal degrees**, both in FITS and                                          
31
ASCII tables.
32

33
The syntax is:
34

35
$ python edit_FITS.py <table>.fits <ascii_file>
36

37
@author: Alessandro NASTASI for IAS - IDOC 
38
@date: 21/05/2015
39
'''
40
__author__ = "Alessandro Nastasi"
41
__credits__ = ["Alessandro Nastasi"]
42
__license__ = "GPL"
43
__version__ = "1.0"
44
__date__ = "21/05/2015"
45
46
import numpy as np
47
import os, sys, re, time
48
import string
49
import asciidata
50
import pyfits
51
from datetime import date
52
53
#Use the provided astCoords.py file rather than the default module of astLib, 
54
#since the calcAngSepDeg() of the latter works only for separation <90 deg 
55
#(tangent plane projection approximation)
56
import astCoords
57
58
class bcolors:
59
    HEADER = '\033[95m'
60
    OKBLUE = '\033[94m'
61
    OKGREEN = '\033[92m'
62
    WARNING = '\033[93m'
63
    FAIL = '\033[91m'
64
    ENDC = '\033[0m'
65
    
66
#Dictionary containing the FORMAT and UNITS of all (or most of) the fields
67
_FIELDS_DICTIONARY = { 
68
  'INDEX': { 'format': 'I', 'unit': 'None' },
69
  'COORD_SOURCE': { 'format': '5A', 'unit': 'None' },
70
  'x':{ 'format': 'E', 'unit': 'None' },
71
  'y':{ 'format': 'E', 'unit': 'None' },
72
  'z':{ 'format': 'E', 'unit': 'None' },
73
  
74
  # ****** ACT ******
75
  'ACT_INDEX': { 'format': 'I', 'unit': 'None' },
76
  'INDEX_ACT': { 'format': 'I', 'unit': 'None' },
77
  'CATALOG': { 'format': '7A', 'unit': 'None' },
78
  #'NAME': { 'format': '18A', 'unit': 'None' },
79
  #'GLON': { 'format': 'E', 'unit': 'degrees' },
80
  #'GLAT': { 'format': 'E', 'unit': 'degrees' },
81
  #'RA': { 'format': 'E', 'unit': 'degrees' },
82
  #'DEC': { 'format': 'E', 'unit': 'degrees' },
83
  'SNR': { 'format': 'E', 'unit': 'None' },
84
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
85
  'ERR_REDSHIFT': { 'format': 'E', 'unit': 'None' },
86
  #'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
87
  'M500': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
88
  'ERR_M500': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
89
  'YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
90
  'ERR_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
91
  'THETA': { 'format': 'E', 'unit': 'arcmin' },
92
  #'PAPER': { 'format': '56A', 'unit': 'None' },      # Use PAPER in SPT as bigger format '59A'
93
94
  'ACT_CATALOG': { 'format': '7A', 'unit': 'None' },
95
  'ACT_NAME': { 'format': '18A', 'unit': 'None' },
96
  'ACT_GLON': { 'format': 'E', 'unit': 'degrees' },
97
  'ACT_GLAT': { 'format': 'E', 'unit': 'degrees' },
98
  'ACT_RA': { 'format': 'E', 'unit': 'degrees' },
99
  'ACT_DEC': { 'format': 'E', 'unit': 'degrees' },
100
  'ACT_SNR': { 'format': 'E', 'unit': 'None' },
101
  'ACT_REDSHIFT': { 'format': 'E', 'unit': 'None' },
102
  'ACT_ERR_REDSHIFT': { 'format': 'E', 'unit': 'None' },
103
  'ACT_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
104
  'ACT_REDSHIFT_REF': { 'format': '19A', 'unit': 'None' },
105
  'ACT_M500': { 'format': 'E', 'unit': '10^14 h^-1 solar mass' },
106
  'ACT_ERR_M500': { 'format': 'E', 'unit': '10^14 h^-1 solar mass' },
107
  'ACT_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
108
  'ACT_ERR_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
109
  'ACT_THETA': { 'format': 'E', 'unit': 'arcmin' },
110
  'ACT_PAPER': { 'format': '56A', 'unit': 'None' },
111
112
  # ****** AMI ******
113
  'INDEX_AMI': { 'format': 'I', 'unit': 'None' },
114
  'AMI_INDEX': { 'format': 'I', 'unit': 'None' },
115
  #'NAME': { 'format': '18A', 'unit': 'None' },
116
  #'RA': { 'format': 'E', 'unit': 'Degrees' },
117
  #'DEC': { 'format': 'E', 'unit': 'Degrees' },
118
  #'GLON': { 'format': 'E', 'unit': 'Degrees' },
119
  #'GLAT': { 'format': 'E', 'unit': 'Degrees' },
120
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
121
  #'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
122
  #'REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
123
  #'ALT_NAME': { 'format': '60A', 'unit': 'None' },
124
  #'COORD_SOURCE': { 'format': '5A', 'unit': 'None' },
125
126
  'AMI_NAME': { 'format': '18A', 'unit': 'None' },
127
  'AMI_RA': { 'format': 'E', 'unit': 'Degrees' },
128
  'AMI_DEC': { 'format': 'E', 'unit': 'Degrees' },
129
  'AMI_GLON': { 'format': 'E', 'unit': 'Degrees' },
130
  'AMI_GLAT': { 'format': 'E', 'unit': 'Degrees' },
131
  'AMI_REDSHIFT': { 'format': 'E', 'unit': 'None' },
132
  'AMI_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
133
  'AMI_REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
134
  'AMI_ALT_NAME': { 'format': '60A', 'unit': 'None' },
135
  
136
  # ****** CARMA ******
137
  'INDEX_CARMA': { 'format': 'I', 'unit': 'None' },
138
  'CARMA_INDEX': { 'format': 'I', 'unit': 'None' },
139
  #'NAME': { 'format': '18A', 'unit': 'None' },
140
  #'RA': { 'format': 'E', 'unit': 'Degrees' },
141
  #'DEC': { 'format': 'E', 'unit': 'Degrees' },
142
  #'GLON': { 'format': 'E', 'unit': 'Degrees' },
143
  #'GLAT': { 'format': 'E', 'unit': 'Degrees' },
144
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
145
  #'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
146
  #'REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
147
  #'COORD_SOURCE': { 'format': '5A', 'unit': 'None' },  
148
  
149
  'CARMA_NAME': { 'format': '18A', 'unit': 'None' },
150
  'CARMA_RA': { 'format': 'E', 'unit': 'Degrees' },
151
  'CARMA_DEC': { 'format': 'E', 'unit': 'Degrees' },
152
  'CARMA_GLON': { 'format': 'E', 'unit': 'Degrees' },
153
  'CARMA_GLAT': { 'format': 'E', 'unit': 'Degrees' },
154
  'CARMA_REDSHIFT': { 'format': 'E', 'unit': 'None' },
155
  'CARMA_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
156
  'CARMA_REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
157
  'CARMA_M500': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
158
  'CARMA_ERR_M500': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
159
160
  #****** PSZ1 ******
161
  'PSZ1_INDEX': { 'format': 'I', 'unit': 'None' },
162
  'INDEX_PSZ1': { 'format': 'I', 'unit': 'None' },
163
  'NAME': { 'format': '18A', 'unit': 'None' },
164
  'GLON': { 'format': 'D', 'unit': 'degrees' },
165
  'GLAT': { 'format': 'D', 'unit': 'degrees' },
166
  'RA': { 'format': 'D', 'unit': 'degrees' },
167
  'DEC': { 'format': 'D', 'unit': 'degrees' },
168
  'RA_MCXC': { 'format': 'E', 'unit': 'degrees' },
169
  'DEC_MCXC': { 'format': 'E', 'unit': 'degrees' },
170
  'REDSHIFT': { 'format': 'E', 'unit': 'None' },
171
  'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
172
  'REDSHIFT_SOURCE': { 'format': 'I', 'unit': 'None' },
173
  'REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
174
  'ALT_NAME': { 'format': '66A', 'unit': 'None' },
175
  'YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
176
  'ERRP_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
177
  'ERRM_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
178
  'M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
179
  'ERRP_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
180
  'ERRM_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
181
  'S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
182
  'ERR_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
183
  'Y_PSX_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
184
  'SN_PSX': { 'format': 'E', 'unit': 'None' },
185
  'PIPELINE': { 'format': 'I', 'unit': 'None' },
186
  'PIPE_DET': { 'format': 'I', 'unit': 'None' },
187
  'PCCS': { 'format': 'L', 'unit': 'None' },
188
  'VALIDATION': { 'format': 'I', 'unit': 'None' },
189
  'ID_EXT': { 'format': '25A', 'unit': 'None' },
190
  'POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
191
  #'SNR': { 'format': 'E', 'unit': 'None' },
192
  'COSMO': { 'format': 'L', 'unit': 'None' },
193
  'COMMENT': { 'format': 'L', 'unit': 'None' },
194
  'QN': { 'format': 'E', 'unit': 'None' }, 
195
196
  'PSZ1_NAME': { 'format': '18A', 'unit': 'None' },
197
  'PSZ1_GLON': { 'format': 'D', 'unit': 'degrees' },
198
  'PSZ1_GLAT': { 'format': 'D', 'unit': 'degrees' },
199
  'PSZ1_RA': { 'format': 'D', 'unit': 'degrees' },
200
  'PSZ1_DEC': { 'format': 'D', 'unit': 'degrees' },
201
  'PSZ1_RA_MCXC': { 'format': 'E', 'unit': 'degrees' },
202
  'PSZ1_DEC_MCXC': { 'format': 'E', 'unit': 'degrees' },
203
  'PSZ1_REDSHIFT': { 'format': 'E', 'unit': 'None' },
204
  'PSZ1_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
205
  'PSZ1_REDSHIFT_SOURCE': { 'format': 'I', 'unit': 'None' },
206
  'PSZ1_REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
207
  'PSZ1_ALT_NAME': { 'format': '66A', 'unit': 'None' },
208
  'PSZ1_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
209
  'PSZ1_ERRP_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
210
  'PSZ1_ERRM_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
211
  'PSZ1_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
212
  'PSZ1_ERRP_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
213
  'PSZ1_ERRM_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
214
  'PSZ1_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
215
  'PSZ1_ERR_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
216
  'PSZ1_Y_PSX_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
217
  'PSZ1_SN_PSX': { 'format': 'E', 'unit': 'None' },
218
  'PSZ1_PIPELINE': { 'format': 'I', 'unit': 'None' },
219
  'PSZ1_PIPE_DET': { 'format': 'I', 'unit': 'None' },
220
  'PSZ1_PCCS': { 'format': 'L', 'unit': 'None' },
221
  'PSZ1_VALIDATION': { 'format': 'I', 'unit': 'None' },
222
  'PSZ1_ID_EXT': { 'format': '25A', 'unit': 'None' },
223
  'PSZ1_POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
224
  'PSZ1_SNR': { 'format': 'E', 'unit': 'None' },
225
  'PSZ1_COSMO': { 'format': 'L', 'unit': 'None' },
226
  'PSZ1_COMMENT': { 'format': 'L', 'unit': 'None' },
227
  'PSZ1_QN': { 'format': 'E', 'unit': 'None' }, 
228
229
  # ****** PSZ2 ******
230
  'PSZ2_INDEX': { 'format': 'I', 'unit': 'None' },
231
  'INDEX_PSZ2': { 'format': 'I', 'unit': 'None' },
232
  #'NAME': { 'format': '18A', 'unit': 'None' },
233
  #'GLON': { 'format': 'D', 'unit': 'degrees' },
234
  #'GLAT': { 'format': 'D', 'unit': 'degrees' },
235
  #'RA': { 'format': 'D', 'unit': 'degrees' },
236
  #'DEC': { 'format': 'D', 'unit': 'degrees' },
237
  #'POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
238
  #'SNR': { 'format': 'E', 'unit': 'None' },
239
  #'PIPELINE': { 'format': 'I', 'unit': 'None' },
240
  #'PIPE_DET': { 'format': 'I', 'unit': 'None' },
241
  'PCCS2': { 'format': 'L', 'unit': 'None' },
242
  'PSZ': { 'format': 'I', 'unit': 'None' },
243
  'IR_FLAG': { 'format': 'I', 'unit': 'None' },
244
  'Q_NEURAL': { 'format': 'E', 'unit': 'None' },
245
  'Y5R500': { 'format': 'E', 'unit': '10^-3 arcmin^2' },
246
  'Y5R500_ERR': { 'format': 'E', 'unit': '10^-3 arcmin^2' },
247
  'PSZ2_VALIDATION': { 'format': 'I', 'unit': 'None' },
248
  'REDSHIFT_ID': { 'format': '25A', 'unit': 'None' },
249
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
250
  'MSZ': { 'format': 'E', 'unit': '10^14 Msol' },
251
  'MSZ_ERR_UP': { 'format': 'E', 'unit': '10^14 Msol' },
252
  'MSZ_ERR_LOW': { 'format': 'E', 'unit': '10^14 Msol' },
253
  'MCXC': { 'format': '25A', 'unit': 'None' },
254
  'REDMAPPER': { 'format': '25A', 'unit': 'None' },
255
  'ACT': { 'format': '25A', 'unit': 'None' },
256
  'SPT': { 'format': '25A', 'unit': 'None' },
257
  'WISE_SIGNF': { 'format': 'E', 'unit': 'None' },
258
  'WISE_FLAG': { 'format': 'I', 'unit': 'None' },
259
  'AMI_EVIDENCE': { 'format': 'E', 'unit': 'None' },
260
  #'COSMO': { 'format': 'L', 'unit': 'None' },
261
  'PSZ2_COMMENT': { 'format': '128A', 'unit': 'None' },
262
263
  'PSZ2_NAME': { 'format': '18A', 'unit': 'None' },
264
  'PSZ2_GLON': { 'format': 'D', 'unit': 'degrees' },
265
  'PSZ2_GLAT': { 'format': 'D', 'unit': 'degrees' },
266
  'PSZ2_RA': { 'format': 'D', 'unit': 'degrees' },
267
  'PSZ2_DEC': { 'format': 'D', 'unit': 'degrees' },
268
  'PSZ2_POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
269
  'PSZ2_SNR': { 'format': 'E', 'unit': 'None' },
270
  'PSZ2_PIPELINE': { 'format': 'I', 'unit': 'None' },
271
  'PSZ2_PIPE_DET': { 'format': 'I', 'unit': 'None' },
272
  'PSZ2_PCCS2': { 'format': 'L', 'unit': 'None' },
273
  'PSZ2_PSZ': { 'format': 'I', 'unit': 'None' },
274
  'PSZ2_IR_FLAG': { 'format': 'I', 'unit': 'None' },
275
  'PSZ2_Q_NEURAL': { 'format': 'E', 'unit': 'None' },
276
  'PSZ2_Y5R500': { 'format': 'E', 'unit': '10^-3 arcmin^2' },
277
  'PSZ2_Y5R500_ERR': { 'format': 'E', 'unit': '10^-3 arcmin^2' },
278
  #'PSZ2_VALIDATION': { 'format': 'I', 'unit': 'None' },
279
  'PSZ2_REDSHIFT_ID': { 'format': '25A', 'unit': 'None' },
280
  'PSZ2_REDSHIFT': { 'format': 'E', 'unit': 'None' },
281
  'PSZ2_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
282
  'PSZ2_MSZ': { 'format': 'E', 'unit': '10^14 Msol' },
283
  'PSZ2_MSZ_ERR_UP': { 'format': 'E', 'unit': '10^14 Msol' },
284
  'PSZ2_MSZ_ERR_LOW': { 'format': 'E', 'unit': '10^14 Msol' },
285
  'PSZ2_MCXC': { 'format': '25A', 'unit': 'None' },
286
  'PSZ2_REDMAPPER': { 'format': '25A', 'unit': 'None' },
287
  'PSZ2_ACT': { 'format': '25A', 'unit': 'None' },
288
  'PSZ2_SPT': { 'format': '25A', 'unit': 'None' },
289
  'PSZ2_WISE_SIGNF': { 'format': 'E', 'unit': 'None' },
290
  'PSZ2_WISE_FLAG': { 'format': 'I', 'unit': 'None' },
291
  'PSZ2_AMI_EVIDENCE': { 'format': 'E', 'unit': 'None' },
292
  'PSZ2_COSMO': { 'format': 'L', 'unit': 'None' },
293
  #'PSZ2_COMMENT': { 'format': '128A', 'unit': 'None' },
294
295
  # ****** PLCK ******
296
  'PLCK_INDEX': { 'format': 'I', 'unit': 'None' },
297
  'INDEX_PLCK': { 'format': 'I', 'unit': 'None' },
298
299
  #'NAME': { 'format': '18A', 'unit': 'None' },
300
  #'GLON': { 'format': 'D', 'unit': 'degrees' },
301
  #'GLAT': { 'format': 'D', 'unit': 'degrees' },
302
  #'RA': { 'format': 'D', 'unit': 'degrees' },
303
  #'DEC': { 'format': 'D', 'unit': 'degrees' },
304
  #'RA_MCXC': { 'format': 'E', 'unit': 'degrees' },
305
  #'DEC_MCXC': { 'format': 'E', 'unit': 'degrees' },
306
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
307
  #'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
308
  #'REDSHIFT_SOURCE': { 'format': 'I', 'unit': 'None' },
309
  #'REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
310
  #'ALT_NAME': { 'format': '66A', 'unit': 'None' },
311
  #'YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
312
  #'ERRP_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
313
  #'ERRM_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
314
  #'M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
315
  #'ERRP_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
316
  #'ERRM_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
317
  #'S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
318
  #'ERR_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
319
  #'Y_PSX_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
320
  #'SN_PSX': { 'format': 'E', 'unit': 'None' },
321
  #'PIPELINE': { 'format': 'I', 'unit': 'None' },
322
  #'PIPE_DET': { 'format': 'I', 'unit': 'None' },
323
  #'PCCS': { 'format': 'L', 'unit': 'None' },
324
  #'VALIDATION': { 'format': 'I', 'unit': 'None' },
325
  #'ID_EXT': { 'format': '25A', 'unit': 'None' },
326
  #'POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
327
  #'SNR': { 'format': 'E', 'unit': 'None' },
328
  #'COSMO': { 'format': 'L', 'unit': 'None' },
329
  #'COMMENT': { 'format': 'L', 'unit': 'None' },
330
  #'QN': { 'format': 'E', 'unit': 'None' }, 
331
332
  'PLCK_NAME': { 'format': '18A', 'unit': 'None' },
333
  'PLCK_GLON': { 'format': 'D', 'unit': 'degrees' },
334
  'PLCK_GLAT': { 'format': 'D', 'unit': 'degrees' },
335
  'PLCK_RA': { 'format': 'D', 'unit': 'degrees' },
336
  'PLCK_DEC': { 'format': 'D', 'unit': 'degrees' },
337
  'PLCK_RA_MCXC': { 'format': 'E', 'unit': 'degrees' },
338
  'PLCK_DEC_MCXC': { 'format': 'E', 'unit': 'degrees' },
339
  'PLCK_REDSHIFT': { 'format': 'E', 'unit': 'None' },
340
  'PLCK_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
341
  'PLCK_REDSHIFT_SOURCE': { 'format': 'I', 'unit': 'None' },
342
  'PLCK_REDSHIFT_REF': { 'format': '36A', 'unit': 'None' },
343
  'PLCK_ALT_NAME': { 'format': '66A', 'unit': 'None' },
344
  'PLCK_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
345
  'PLCK_ERRP_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
346
  'PLCK_ERRM_YZ_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
347
  'PLCK_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
348
  'PLCK_ERRP_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
349
  'PLCK_ERRM_M_YZ_500': { 'format': 'E', 'unit': '10^14 solar mass' },
350
  'PLCK_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
351
  'PLCK_ERR_S_X': { 'format': 'E', 'unit': 'erg/s/cm2' },
352
  'PLCK_Y_PSX_500': { 'format': 'E', 'unit': '10^-4 arcmin squared' },
353
  'PLCK_SN_PSX': { 'format': 'E', 'unit': 'None' },
354
  'PLCK_PIPELINE': { 'format': 'I', 'unit': 'None' },
355
  'PLCK_PIPE_DET': { 'format': 'I', 'unit': 'None' },
356
  'PLCK_PCCS': { 'format': 'L', 'unit': 'None' },
357
  'PLCK_VALIDATION': { 'format': 'I', 'unit': 'None' },
358
  'PLCK_ID_EXT': { 'format': '25A', 'unit': 'None' },
359
  'PLCK_POS_ERR': { 'format': 'E', 'unit': 'arcmin' },
360
  'PLCK_SNR': { 'format': 'E', 'unit': 'None' },
361
  'PLCK_COSMO': { 'format': 'L', 'unit': 'None' },
362
  'PLCK_COMMENT': { 'format': 'L', 'unit': 'None' },
363
  'PLCK_QN': { 'format': 'E', 'unit': 'None' }, 
364
365
  # ****** SPT ******
366
  'SPT_INDEX': { 'format': 'I', 'unit': 'None' },
367
  'INDEX_SPT': { 'format': 'I', 'unit': 'None' },
368
  #'CATALOG': { 'format': '7A', 'unit': 'None' },
369
  #'NAME': { 'format': '16A', 'unit': 'None' },
370
  #'GLON': { 'format': 'E', 'unit': 'degrees' },
371
  #'GLAT': { 'format': 'E', 'unit': 'degrees' },
372
  #'RA': { 'format': 'E', 'unit': 'degrees' },
373
  #'DEC': { 'format': 'E', 'unit': 'degrees' },
374
  #'SNR': { 'format': 'E', 'unit': 'None' },
375
  #'REDSHIFT': { 'format': 'E', 'unit': 'None' },
376
  #'ERR_REDSHIFT': { 'format': 'E', 'unit': 'None' },
377
  #'REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
378
  'REDSHIFT_LIMIT': { 'format': 'E', 'unit': 'None' },
379
  
380
  'M500_fidCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
381
  'ERR_M500_fidCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
382
  'M500_PlanckCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
383
  'ERR_M500_PlanckCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
384
  'YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
385
  'ERR_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
386
    
387
  'LX': { 'format': 'E', 'unit': '10^44 erg/s' },
388
  'YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
389
  'ERR_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
390
  #'THETA': { 'format': 'E', 'unit': 'arcmin' },
391
  'PAPER': { 'format': '59A', 'unit': 'None' },
392
  'XRAY': { 'format': 'L', 'unit': 'None' },
393
  'STRONG_LENS': { 'format': 'L', 'unit': 'None' },
394
  
395
  'SPT_CATALOG': { 'format': '7A', 'unit': 'None' },
396
  'SPT_NAME': { 'format': '16A', 'unit': 'None' },
397
  'SPT_GLON': { 'format': 'E', 'unit': 'degrees' },
398
  'SPT_GLAT': { 'format': 'E', 'unit': 'degrees' },
399
  'SPT_RA': { 'format': 'E', 'unit': 'degrees' },
400
  'SPT_DEC': { 'format': 'E', 'unit': 'degrees' },
401
  'SPT_SNR': { 'format': 'E', 'unit': 'None' },
402
  'SPT_REDSHIFT': { 'format': 'E', 'unit': 'None' },
403
  'SPT_ERR_REDSHIFT': { 'format': 'E', 'unit': 'None' },
404
  'SPT_REDSHIFT_TYPE': { 'format': '5A', 'unit': 'None' },
405
  'SPT_REDSHIFT_REF': { 'format': '19A', 'unit': 'None' },
406
407
  'SPT_REDSHIFT_LIMIT': { 'format': 'E', 'unit': 'None' },
408
  'SPT_XRAY': { 'format': 'L', 'unit': 'None' },
409
  'SPT_STRONG_LENS': { 'format': 'L', 'unit': 'None' },
410
411
  'SPT_M500_fidCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
412
  'SPT_ERR_M500_fidCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
413
  'SPT_M500_PlanckCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
414
  'SPT_ERR_M500_PlanckCosmo': { 'format': 'E', 'unit': '10^14 h70^-1 solar mass' },
415
  
416
  'SPT_LX': { 'format': 'E', 'unit': '10^44 erg/s' },
417
  'SPT_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
418
  'SPT_ERR_YSZ': { 'format': 'E', 'unit': '10^-6 arcmin squared' },
419
  'SPT_THETA': { 'format': 'E', 'unit': 'arcmin' },
420
  'SPT_PAPER': { 'format': '59A', 'unit': 'None' }
421
  
422
  }
423
  
424
#Name of fields (in FITS/ASCII) sometimes called individually in the script
425
426
#For Mass and Err_Mass, more than one label could be defined just as elements of the arrays
427
name_mass_key = ['M500']
428
name_errMass_key = ['ERR_M500']
429
430
name_ra_key = 'RA'
431
name_dec_key = 'DEC'
432
name_coordinates_keys =  ['RA_MCXC', 'DEC_MCXC', name_ra_key, name_dec_key]
433
434
name_Name_key = 'NAME'  
435
name_index_key = 'INDEX'
436
name_catalog_key = 'CATALOG'
437
name_redshift_key = 'REDSHIFT'
438
name_zLimit_key = 'REDSHIFT_LIMIT'
439
name_zErr_key = 'ERR_REDSHIFT'
440
name_zType_key = 'REDSHIFT_TYPE'
441
name_zRef_key = 'REDSHIFT_REF'
442
name_altName_key = 'ALT_NAME'
443
name_paper_key = 'PAPER'
444
445
#Undef values for some kind of fields
446
_UNDEF_VALUES_ = {
447
  'FLOAT' : {np.nan},
448
  'INT' : {-1},
449
  'STRING' : {'NULL'},
450
  name_zType_key : {'undef'},
451
  'PIPELINE' : {0},
452
  'PIPE_DET' : {0}
453
  }
454
455
def remove_duplicated_names(string):
456
  '''
457
  This function removes duplicated names of a string, assuming they are separated by ';'
458
  In addition, it takes out 'NULL', 'NaN', 'False' from the final, composite string.
459
  It is used for the creation of ALT_NAME field.
460
  '''
461
  string = string.replace('; ',';')
462
  tmp = [item for item in string.split(';') if item.upper() not in ["-", "NULL", "NAN", "NONE", "FALSE"] and len(item)>0 ]
463
  # *** This lines of code help preserving the order ot the names ***
464
  tmp_uniq = []
465
  set_tmp = set()
466
  for item in tmp:
467
    if item not in set_tmp:
468
      tmp_uniq.append(item)
469
      set_tmp.add(item)
470
  # ******************************************************************
471
  
472
  if len(tmp)==0: new_string = '-'
473
  else: new_string =  "; ".join(tmp_uniq)
474
  return new_string
475
 
476
def set_undef_values(fits_data):
477
  '''
478
  Set the proper 'undef' values according to the format/name of the field
479
  '''
480
  print "\n\t>> Checking/setting undefined values for the different fields ..."
481
  for i, name in enumerate(fits_data.names):
482
    sys.stdout.write('\t%i/%i > %s (format %s) : Done                                        \r' % (i+1, len(fits_data.names), name, fits_data.formats[i]))
483
    sys.stdout.flush()
484
    for j in range(fits_data.size):
485
      if name == name_index_key:
486
        if fits_data[name_Name_key][j] <= 0: fits_data[j][i] = -1
487
      elif name == name_redshift_key and fits_data[name][j] == -1.0:
488
        fits_data[j][i] = np.nan
489
      elif name.find(name_zType_key) >= 0 and str(fits_data[name][j]) == 'Null':
490
        fits_data[j][i] = "undef"
491
      elif fits_data.formats[i] in 'EDJ':
492
        if str(fits_data[j][i]) in ['-1.6375e+30','-1.63750e+30', '-1.6375E+30', '-1.63750E+30', 'None', 'NULL']:
493
          fits_data[j][i] = np.nan
494
      elif fits_data.formats[i].find('A') >= 0:
495
        fits_data[j][i] = remove_duplicated_names(fits_data[j][i])
496
        if str(fits_data[j][i]).upper() in ["", "0.0", "NULL", "NAN", "NONE", "FALSE"]  or str(fits_data[j][i]) == 'False':
497
          fits_data[j][i] = "-"
498
      elif name in ['PIPELINE','PIPE_DET']:
499
        if fits_data[j][i] <= 0: fits_data[j][i] = 0
500
  print '\n'
501
  return fits_data
502
503
def recreate_reformatted_column(hdulist, field_name, new_format, new_vector):
504
  '''
505
  Update the length (format) of a 'STRING' (format = 'xA') FIELD. 
506
  The only way, though, is to re-create the column with the new format.
507
  It is used during the creation of NAME, ALT_NAME or REDSHIFT_REF.
508
  '''
509
  name_vec = []
510
  format_vec = []
511
  unit_vec = []
512
  
513
  fits_keywds = hdulist.data.names
514
  coldefs = pyfits.ColDefs(hdulist.columns)
515
  
516
  #Store attributes of the kewyords after FIELD
517
  for j in range(fits_keywds.index(field_name)+1, len(fits_keywds)):
518
    name_vec.append(coldefs.names[j])
519
    format_vec.append(coldefs.formats[j])
520
    unit_vec.append(coldefs.units[j])
521
  
522
  #Delete the kewyords after FIELD
523
  tmp = 0
524
  for j in range(fits_keywds.index(field_name)+1, len(fits_keywds)):
525
    coldefs.del_col(name_vec[tmp])
526
    tmp+=1
527
    
528
  #Delete FIELD
529
  coldefs.del_col(field_name)
530
531
  #Re-create FIELD with the new format
532
  col_tmp = pyfits.Column(name = field_name, format = new_format, unit = 'None', array = new_vector)
533
  coldefs.add_col(col_tmp)
534
  hdulist.columns = coldefs
535
  
536
  #Re-create all the kewyords after FIELD, with their attributes
537
  tmp = 0
538
  data_vec_tmp = []
539
  for j in range(fits_keywds.index(field_name)+1, len(fits_keywds)):
540
    data_vec_tmp = hdulist.data[name_vec[tmp]]
541
    col_tmp = pyfits.Column(name = name_vec[tmp], format = format_vec[tmp], unit = unit_vec[tmp], array = data_vec_tmp)
542
    coldefs.add_col(col_tmp)
543
    tmp +=1
544
    data_vec_tmp = []
545
    
546
  hdulist = pyfits.new_table(coldefs)
547
  return hdulist
548
549
'''
550
  *** >> START << ***
551
'''
552
553
if (len(sys.argv) > 1):
554
    fits_file = sys.argv[1] 
555
    ascii_file = sys.argv[2] 
556
else:
557
    print bcolors.WARNING +  "\n\tSintax:\t$ python edit_FITS.py <fits_file> <ascii_file>\n" + bcolors.ENDC
558
    os._exit(0)
559
560
#Open the output file
561
file_report_name = 'summary_updates.tab'
562
file_report = open(file_report_name, 'w')
563
564
question = bcolors.OKBLUE+ "[Q]" + bcolors.ENDC
565
info = bcolors.WARNING+ "[I]" + bcolors.ENDC
566
error = bcolors.FAIL+ "[ERR]" + bcolors.ENDC
567
568
#User can define the columns delimiter in the ASCII table.
569
delim=raw_input("\n%s Please enter the column delimiter of the ASCII table (default is ','):\t" % question)
570
if not delim:
571
# Read the ascii table, with the structure: ascii_table[COLUMNS][ROWS]
572
  ascii_table=asciidata.open(ascii_file, 'r', delimiter=',')
573
else:
574
  ascii_table=asciidata.open(ascii_file, 'r', delimiter=delim)
575
  
576
Ncol_ascii = ascii_table.ncols
577
Nrows_ascii = (ascii_table.nrows) - 1 #1st excluded because of the header
578
579
print "\n\t\t **** ASCII table details ****"
580
print "\t\t Number of columns: %s" % (Ncol_ascii)
581
print "\t\t Number of rows: %s" % (Nrows_ascii)
582
print "\t\t **** **** **** **** **** ****"
583
584
ascii_keywds=[]
585
keys_form_unit = {}
586
587
for i in range(ascii_table.ncols):
588
  tmpKey = str(ascii_table[i][0]).strip()
589
  ascii_keywds.append(tmpKey)
590
  if tmpKey in _FIELDS_DICTIONARY:
591
    keys_form_unit[tmpKey] = {}
592
    keys_form_unit[tmpKey]['TFORM'] = _FIELDS_DICTIONARY[tmpKey]['format']
593
    keys_form_unit[tmpKey]['TUNIT'] = _FIELDS_DICTIONARY[tmpKey]['unit']
594
595
#Read the fits table
596
hdulist = pyfits.open(fits_file)
597
fits_header = hdulist[1].header        # HEADER 
598
fits_data = hdulist[1].data                 # DATA 
599
600
#Number of columns in FITS table
601
Ncol_fits = int(fits_header['TFIELDS'])
602
603
#Number of rows in FITS table
604
Nrows_fits = fits_header['NAXIS2']
605
606
print "\n\t\t **** FITS table details ****"
607
print "\t\t Number of columns: %s" % (Ncol_fits)
608
print "\t\t Number of rows: %s" % (Nrows_fits)
609
print "\t\t **** *** *** *** *** *** ***"
610
611
#Fits keywords read from the header
612
fits_keywds=[]
613
original_fits_keywds = []
614
615
for i in range(Ncol_fits):
616
  original_fits_keywds.append(fits_data.names[i])
617
  fits_keywds.append(fits_data.names[i])
618
619
#Find the keywords written in the ASCII file and the corresponding columns in the FITS table...
620
common_keywds=[]
621
commonKeywds_index=[]
622
keywds_to_update=[]
623
for j in range(Ncol_fits):
624
  if fits_keywds[j] in ascii_keywds:
625
    common_keywds.append(fits_keywds[j])
626
    commonKeywds_index.append(j+1)
627
628
    #Only the fields with new values will be updated. Also NAME, RA and DEC are allowed to change
629
    keywds_to_update.append(fits_keywds[j])
630
631
print "\n\t%s The following keyword(s) will be updated in the FITS table: " % info , keywds_to_update
632
633
#...also selecting the NEW keywords defined in the ASCII file...
634
keywds_to_add=[item for item in ascii_keywds if item not in fits_keywds]
635
636
print "\n\t%s The following new keyword(s) will be added to the FITS table: " % info , keywds_to_add
637
638
#To associate TFORM and TUNIT to each field, first look into _FIELDS_DICTIONARY
639
#If nothing is found ther, ask the user to enter them manually
640
for i in range(len(keywds_to_add)):
641
  if keywds_to_add[i] not in _FIELDS_DICTIONARY:
642
    keys_form_unit[keywds_to_add[i]] = {}
643
    message = "\n%s Please enter the format (\'TFORM\') of the new field \"%s\" (e.g.: 5A, E, L, ...): " % (question, keywds_to_add[i])
644
    keys_form_unit[keywds_to_add[i]]['TFORM'] = raw_input(message)
645
    message = "\n%s Please enter the unit (\'TUNIT\') of the new field \"%s\" (e.g.: None, arcmin, ...): " % (question, keywds_to_add[i])
646
    keys_form_unit[keywds_to_add[i]]['TUNIT'] = raw_input(message)
647
  else:
648
    keys_form_unit[keywds_to_add[i]] = {}
649
    keys_form_unit[keywds_to_add[i]]['TFORM'] = _FIELDS_DICTIONARY[keywds_to_add[i]]['format']
650
    keys_form_unit[keywds_to_add[i]]['TUNIT'] = _FIELDS_DICTIONARY[keywds_to_add[i]]['unit']
651
        
652
  # ...to be appended into the 'fits_keywds' array
653
  fits_keywds.append(keywds_to_add[i])
654
655
'''
656
  *** Add the NEW COLUMNS to FITS table ***
657
'''
658
659
#Initialize new columns
660
a_tmp = []
661
662
coldefs = pyfits.ColDefs(hdulist[1].columns)
663
columns = []
664
665
for keys in keywds_to_add:
666
  if keys_form_unit[keys]['TFORM'] == 'E' or keys_form_unit[keys]['TFORM'] == 'D':
667
    a_tmp = [-1.6375E+30] * Nrows_fits # Initialize Float with empty array
668
  elif keys_form_unit[keys]['TFORM'] == 'I':
669
    a_tmp = [-1] * Nrows_fits # Initialize Integer with -1 array
670
  elif keys_form_unit[keys]['TFORM'] == 'L':
671
    a_tmp = [False] * Nrows_fits # Initialize logical with True array
672
  elif keys_form_unit[keys]['TFORM'].find('A') >= 0:
673
    a_tmp = ['Null'] * Nrows_fits
674
675
  while True:
676
    #Check between field format and values.
677
    try:
678
      col_tmp = pyfits.Column(name=keys, format=keys_form_unit[keys]['TFORM'], unit=keys_form_unit[keys]['TUNIT'], array=a_tmp)
679
      columns.append(col_tmp)
680
      break
681
    except ValueError:
682
      print bcolors.FAIL+ "\n\t\t*** FORMAT INCONSISTENT WITH DATA ***" + bcolors.ENDC
683
      keys_form_unit[keys]['TFORM'] = raw_input("\n%s Please, enter again the format (\'TFORM\') of the new field \"%s\": " % (question, keys))
684
         
685
'''
686
  *** 1st data UPDATE: new fields added as new columns ***
687
'''
688
689
#New for Pyfits > 2.3
690
for i in columns: coldefs.add_col(i)
691
hdulist = pyfits.new_table(coldefs)
692
693
#Old Python version
694
#hdulist = pyfits.BinTableHDU.from_columns(coldefs)
695
696
fits_data = hdulist.data
697
698
'''
699
  *** Object identification via POSITION matching, NAME or INDEX  ***
700
'''
701
match_option = False
702
match_radius = 300.0 # default = 5 arcmin
703
704
name_index_fits = ''
705
name_index_ascii = ''
706
707
print '\n%s Which method do you want to use for the object matching: by POSITION (1) by NAME (2) or by INDEX (3)?' % question
708
while match_option == False:
709
  message = "\n\t-> Please enter 1, 2 or 3:   "
710
  method = raw_input(message)
711
  if method == '1': 
712
    #Check if RA & DEC are actually in FITS and ASCII tables
713
    if name_ra_key not in fits_data.names or name_dec_key not in fits_data.names or name_ra_key not in ascii_keywds or name_dec_key not in ascii_keywds:
714
      print bcolors.FAIL+ "\n\t>> NO %s and %s found in FITS and ASCII tables: POSITION matching not possible <<" % (name_ra_key, name_dec_key) + bcolors.ENDC
715
    else:
716
      match_option = method
717
      match_radius = float(raw_input('\n\t%s Please enter the match radius (in arcsec): ' % question))
718
  elif method == '2' : match_option = method
719
  elif method == '3' :
720
    check_name_index_fits = False
721
    while check_name_index_fits == False:
722
      name_index_fits = raw_input('\n\t-> Please enter the column name of the INDEX in the FITS file: ')
723
      if name_index_fits not in fits_keywds:
724
        print bcolors.FAIL+ "\n\t*** '%s' NOT in FITS Keywords ***" % name_index_fits+ bcolors.ENDC
725
      else:
726
        check_name_index_fits = True
727
        index_fits = np.array( fits_data[name_index_fits] )
728
729
    check_name_index_ascii = False
730
    while check_name_index_ascii == False:
731
      name_index_ascii = raw_input('\n\t-> Please enter the column name of the INDEX in the ASCII file: ')
732
      if name_index_ascii not in ascii_keywds:
733
        print bcolors.FAIL+ "\n\t*** '%s' NOT in ASCII Keywords ***" % name_index_ascii+ bcolors.ENDC
734
      else:
735
        check_name_index_ascii = True
736
        index_ascii = [ (ascii_table[k][j]) for k in range(ascii_table.ncols) if ascii_table[k][0] == name_index_ascii for j in range(1,ascii_table.nrows) ]
737
      
738
    match_option = method
739
740
  else: print bcolors.FAIL+ "\n\t*** Wrong option ***"+ bcolors.ENDC
741
742
name_fits = np.array(fits_data[name_Name_key])
743
ra_fits = np.array(fits_data[ name_ra_key ])
744
dec_fits = np.array(fits_data[ name_dec_key ])
745
746
name_ascii = []
747
ra_ascii = []
748
dec_ascii = []
749
750
for k in range(ascii_table.ncols):
751
  if ascii_keywds[k]==name_Name_key:
752
    for j in range(ascii_table.nrows -1): name_ascii.append((ascii_table[k][j+1]).strip())
753
  if ascii_keywds[k]==name_ra_key:
754
    for j in range(ascii_table.nrows -1): ra_ascii.append(float(ascii_table[k][j+1]))
755
  elif ascii_keywds[k]==name_dec_key:
756
    for j in range(ascii_table.nrows -1): dec_ascii.append(float(ascii_table[k][j+1]))
757
758
dist_asec = []
759
760
#Two arrays with the indexes of the matching objects
761
rowAscii_match = []        # ASCII rows
762
rowFits_match = []        # FITS rows
763
764
#Array with the indexes of the NEW objects found in the ASCII file (if any)
765
rowAscii_new = []
766
767
method_dict = {
768
  '1' : 'POSITION (dist < %.1f")' % match_radius,
769
  '2' : 'NAME',
770
  '3' : 'INDEX'
771
  }
772
773
print "\n\t>> Matching ASCII/FITS tables by %s ...\n" % method_dict[method]
774
775
num_tot_matches = 0
776
for j in range(Nrows_fits):
777
  num_multiple_matches = 0
778
  id_matches = []
779
  ra_dec_matches = []
780
  
781
  if match_option == '1':
782
    tmp_idxs_matches = []
783
    tmp_dist_matches = []
784
    
785
    for i in range(Nrows_ascii):
786
      dist_tmp = 3600. * astCoords.calcAngSepDeg(float(ra_fits[j]), float(dec_fits[j]), ra_ascii[i], dec_ascii[i])
787
      if dist_tmp <= match_radius:
788
        tmp_idxs_matches.append(i)
789
        tmp_dist_matches.append(round(dist_tmp,1))
790
        num_tot_matches += 1
791
        num_multiple_matches += 1
792
        
793
    idx_match = 0
794
    if len( tmp_idxs_matches ) > 1:
795
      print bcolors.WARNING+ "\n\t! WARNING ! %i objects found within %.1f arcsec from %s \n" % ( len(tmp_idxs_matches), match_radius, name_fits[j]) + bcolors.ENDC
796
      for idx in range( len(tmp_idxs_matches) ): print '\t%i: %s (dist = %s")' % ( (idx+1, name_ascii[ tmp_idxs_matches[idx]], tmp_dist_matches[idx] ) )
797
      tmp_check = False
798
      while tmp_check == False:
799
        tmp_entry = int(raw_input('\t-> Please enter the number of the matching object: '))
800
        if tmp_entry in range(1, len(tmp_idxs_matches)+1 ): 
801
          tmp_check = True
802
          idx_match = tmp_idxs_matches[ tmp_entry - 1 ]
803
        else:
804
          print bcolors.FAIL+ "\n\t*** Wrong option ***\n"+ bcolors.ENDC
805
        
806
      id_matches.append((name_ascii[idx_match]).strip())
807
      ra_dec_matches.append(ra_ascii[idx_match])
808
      ra_dec_matches.append(dec_ascii[idx_match])
809
      
810
      # NOTE: When the ascii_table is called, the corresponding index is (rowAscii_match + 1) because of the additional line for the HEADER
811
      rowAscii_match.append(idx_match)
812
      rowFits_match.append(j)
813
814
    elif len( tmp_idxs_matches ) == 1:
815
      idx_match = tmp_idxs_matches[0]
816
      
817
      id_matches.append((name_ascii[idx_match]).strip())
818
      ra_dec_matches.append(ra_ascii[idx_match])
819
      ra_dec_matches.append(dec_ascii[idx_match])
820
      
821
      # NOTE: When the ascii_table is called, the corresponding index is (rowAscii_match + 1) because of the additional line for the HEADER
822
      rowAscii_match.append(idx_match)
823
      rowFits_match.append(j)
824
      
825
  elif match_option == '2':
826
    for i in range(Nrows_ascii):
827
      if (name_fits[j]).strip() == (name_ascii[i]).strip():
828
        num_multiple_matches += 1
829
        num_tot_matches += 1
830
        if num_multiple_matches > 1: 
831
        
832
          print '%s Found %i objects with the same name : %s\nAborted.\n' % (error, num_multiple_matches, name_fits[j]); os._exit(0)
833
          
834
        # NOTE: When the ascii_table is called, the corresponding index is (rowAscii_match + 1) because of the additional line for the HEADER  
835
        rowAscii_match.append(i)
836
        rowFits_match.append(j)
837
838
  elif match_option == '3':
839
    for i in range(Nrows_ascii):
840
      if int(index_fits[j]) == int(index_ascii[i]) and (int(index_fits[j]) >= 0 and int(index_ascii[i]) >= 0):
841
        num_tot_matches += 1
842
843
        # NOTE: When the ascii_table is called, the corresponding index is (rowAscii_match + 1) because of the additional line for the HEADER
844
        rowAscii_match.append(i)
845
        rowFits_match.append(j)
846
        break
847
848
for i in range(Nrows_ascii):
849
  if i not in rowAscii_match: rowAscii_new.append(i) # Rows numbers of the NEW clusters, in the ASCII file
850
851
print "\n\t%s Found %s matching clusters between FITS/ASCII table to be UPDATED in the FITS table" % (info, len(rowAscii_match))
852
853
print "\n\t%s Found %s NEW clusters in the ASCII table to be ADDED to the FITS table" % (info, len(rowAscii_new))
854
855
#Store the names of the common/new clusters
856
idx_name = fits_keywds.index(name_Name_key)
857
clName_fits=[]
858
859
for k in range(Nrows_fits):
860
  clName_fits.append(fits_data[k][idx_name])
861
862
common_clNames=[]
863
new_clNames=[]
864
865
for i, idx in enumerate(rowAscii_match):
866
  common_clNames.append(clName_fits[rowFits_match[i]])
867
  
868
for idx in rowAscii_new:
869
  idx_name = ascii_keywds.index(name_Name_key)
870
  new_clNames.append( ascii_table[idx_name][idx+1] )
871
872
#Define the MASS conversion factor, only if it is found in ASCII table:
873
h_factor = 1.0
874
tmp_check = False
875
876
mass_in_ascii = set(name_mass_key) & set(ascii_keywds)
877
if mass_in_ascii:
878
  print "\n%s Concerning %s, do you want to:\n\t1) Convert from h70^-1 -> h100^-1\n\t2) Convert from h100^-1 -> h70^-1\n\t3) Keep the original values of the ASCII table" % (question, mass_in_ascii.pop())
879
  while tmp_check == False:
880
    message = "\n\t-> Please enter 1, 2 or 3:   "
881
    h_opt = raw_input(message)
882
    if h_opt == '1': h_factor = 0.7; tmp_check = True
883
    elif h_opt == '2': h_factor = 1./0.7; tmp_check = True
884
    elif h_opt == '3': h_factor = 1.; tmp_check = True
885
    else: print bcolors.FAIL+ "\n\t*** Wrong option ***"+ bcolors.ENDC
886
887
newRow_num = Nrows_fits + len(rowAscii_new)
888
889
'''
890
  *** 2nd data UPDATE: add the new clusters as new (initially empty) rows ***
891
'''
892
hdulist = pyfits.new_table(hdulist, nrows=newRow_num)
893
894
#Add 'CATALOG' to the new clusters (if any)
895
if name_catalog_key in fits_keywds and name_catalog_key not in ascii_keywds and len(rowAscii_new) > 0:
896
  new_catalog = raw_input("\n%s Please enter the value of %s for the new cluster(s): " % (question, name_catalog_key))
897
898
  
899
'''
900
  *** Update the PAPER column ***
901
'''
902
paper_flag = False
903
updated_paper_vec = []
904
max_length_paper = 0
905
cnt = 0
906
907
#If 'PAPER' is defined in the ASCII table, but it is not in the FITS and there are NO NEW objects, the latter is updated with the former
908
if name_paper_key in ascii_keywds and name_paper_key not in fits_keywds and len(rowAscii_new) == 0:
909
  for j in range(Nrows_fits):
910
    
911
    #Update only those clusters specified in the ASCII table
912
    if j in rowFits_match:
913
      paper_tmp = ascii_table[ascii_keywds.index(name_paper_key)][rowAscii_match[cnt]+1]
914
      cnt += 1
915
    else:
916
      paper_tmp = "Null"
917
    
918
    paper_tmp = remove_duplicated_names(paper_tmp)
919
    updated_paper_vec.append(paper_tmp)
920
    if len(paper_tmp) > max_length_paper: max_length_paper = len(paper_tmp)
921
 
922
#If 'PAPER' is defined in the FITS table, it is updated for the common clusters (and created for the new clusters) with the one defined in the ASCII table.
923
#If no 'PAPER' is found in ASCII, user is asked to enter it manually.
924
elif name_paper_key in fits_keywds:
925
  new_paper_vec = []
926
  col_paper_fits = fits_keywds.index(name_paper_key)
927
928
  if name_paper_key in ascii_keywds:
929
    paper_flag = True
930
    for i in range(Nrows_ascii):
931
      new_paper_vec.append( ascii_table[ascii_keywds.index(name_paper_key)][i+1].strip() )
932
  else:
933
    #The new reference is asked to be added manually only if new clusters are found
934
    if len(new_clNames)>0:
935
      tmp_new_paper = raw_input("\n%s Please insert the new reference to add: " % question)
936
      new_paper_vec=[tmp_new_paper for x in range( Nrows_ascii ) ]
937
      paper_flag = True
938
    else:
939
      new_paper_vec=['' for x in range( Nrows_ascii ) ]
940
      
941
  #Update those clusters in common with ASCII and FITS table
942
  for j in range(Nrows_fits):
943
    paper_old = (fits_data[j][col_paper_fits]).strip()
944
    if j in rowFits_match:
945
      if paper_old == "Null":
946
        paper_tmp = new_paper_vec[ rowAscii_match[cnt] ]        #Here the '+1' correction is not necessary because also new_paper_vec[] contains the header line
947
        cnt+=1
948
      else:
949
        paper_tmp = paper_old+"; "+new_paper_vec[ rowAscii_match[cnt] ]        #Here the '+1' correction is not necessary because also new_paper_vec[] contains the header line
950
        cnt += 1
951
    else:
952
      paper_tmp = paper_old
953
  
954
    paper_tmp = remove_duplicated_names(paper_tmp)
955
    updated_paper_vec.append(paper_tmp)
956
    if len(paper_tmp) > max_length_paper: max_length_paper = len(paper_tmp)
957
  
958
#Delete the old 'PAPER' column and update it with a new one defined according to the above case.
959
if name_paper_key in fits_keywds and paper_flag: 
960
  hdulist.columns.del_col(name_paper_key)
961
  
962
  #Add the new PAPER field, as last column
963
  col_tmp = pyfits.Column(name=name_paper_key, format=str(max_length_paper)+'A', unit = 'None', array=updated_paper_vec)
964
  paper_flag = True
965
966
if paper_flag:
967
  coldefs = pyfits.ColDefs(hdulist.columns)
968
  coldefs.add_col(col_tmp)
969
  hdulist = pyfits.new_table(coldefs)
970
971
972
#Update PAPER for common cluster
973
len_ALT_NAME = []
974
new_altName_vec = []
975
old_altName_vec = []
976
new_altName = ""
977
cnt = 0
978
979
altName_flag = False
980
name_in_altName = False
981
replace_altName = False
982
983
#Handle the NAME/ALT_NAME update in case of position/index matching:
984
if len(common_clNames) > 0:
985
    
986
  if name_altName_key not in ascii_keywds and name_altName_key in fits_keywds:
987
    if name_Name_key in fits_keywds and name_Name_key in ascii_keywds:
988
      answer_check = False
989
      tmp = raw_input("\n\t%s Do you want to add the old clusters' %s listed in FITS table to %s? [y/n]: " % (question, name_Name_key, name_altName_key) )
990
      while answer_check == False:
991
        if tmp in 'yesYES1' and tmp != '': 
992
          name_in_altName = True
993
          answer_check = True
994
        elif tmp in 'nN' and tmp != '': answer_check = True
995
        else: tmp = raw_input(bcolors.FAIL+ "\n\t\t*** Please enter a valid answer ***" + bcolors.ENDC + ' [y/n] : ')
996
    
997
    col_altName_fits = fits_keywds.index( name_altName_key )
998
999
    for j in range(Nrows_fits):
1000
1001
      oldVal_fits = (fits_data[j][col_altName_fits]).strip()
1002
      old_altName_vec.append(oldVal_fits)
1003
      if j in rowFits_match:
1004
        
1005
        #Adds the NAME to "ALT_NAME", if it does not exist already
1006
        if name_in_altName:
1007
          altName_flag = True
1008
1009
          name_fits = np.array(fits_data[name_Name_key]) 
1010
          new_altName = oldVal_fits+"; "+name_fits[j]
1011
        
1012
        else: new_altName = oldVal_fits
1013
        
1014
        new_altName = remove_duplicated_names(new_altName)
1015
        new_altName_vec.append(new_altName)
1016
        len_ALT_NAME.append(len(new_altName))
1017
1018
        cnt += 1
1019
          
1020
      else:
1021
        new_altName = remove_duplicated_names(oldVal_fits)
1022
        new_altName_vec.append(oldVal_fits)
1023
1024
  elif name_altName_key in ascii_keywds and name_altName_key in fits_keywds:
1025
    
1026
    answer_check = False
1027
    tmp = raw_input("\n\t%s %s is both in ASCII/FITS tables. Do you want the ASCII values to REPLACE or to be APPENDED to the FITS ones? [r/a]: " % (question, name_altName_key) )
1028
    while answer_check == False:
1029
      if tmp in 'rR' and tmp != '': 
1030
        replace_altName = True
1031
        answer_check = True
1032
      elif tmp in 'aA' and tmp != '': answer_check = True
1033
      else: tmp = raw_input(bcolors.FAIL+ "\n\t\t*** Please enter a valid answer ***" + bcolors.ENDC + ' [r/a] : ')
1034
        
1035
    if name_Name_key in fits_keywds and name_Name_key in ascii_keywds:
1036
      answer_check = False
1037
      tmp = raw_input("\n\t%s Do you want to add the old clusters' %s listed in FITS table to %s? [y/n]: " % (question, name_Name_key, name_altName_key) )
1038
      while answer_check == False:
1039
        if tmp in 'yesYES1' and tmp != '': 
1040
          name_in_altName = True
1041
          answer_check = True
1042
        elif tmp in 'nN' and tmp != '': answer_check = True
1043
        else: tmp = raw_input(bcolors.FAIL+ "\n\t\t*** Please enter a valid answer ***" + bcolors.ENDC + ' [y/n] : ')
1044
   
1045
    col_altName_ascii = ascii_keywds.index( name_altName_key )
1046
    
1047
    if replace_altName:
1048
      altName_flag = True
1049
      print "\n\t%s %s replaced" % (info, name_altName_key)
1050
1051
      if name_in_altName:
1052
        names_fits = fits_data[name_Name_key].strip()
1053
        for j in range(Nrows_fits):
1054
          oldVal_fits = (fits_data[ name_altName_key ][j]).strip()
1055
          old_altName_vec.append(oldVal_fits)
1056
          if j in rowFits_match:
1057
            new_altName = ascii_table[col_altName_ascii][rowAscii_match[cnt]+1]+"; "+name_fits[j]
1058
            cnt+=1  
1059
          else: new_altName = oldVal_fits
1060
          
1061
          new_altName = remove_duplicated_names(new_altName)
1062
          new_altName_vec.append(new_altName)
1063
      else:
1064
        for j in range(Nrows_fits):
1065
          oldVal_fits = (fits_data[ name_altName_key ][j]).strip()
1066
          old_altName_vec.append(oldVal_fits)
1067
          if j in rowFits_match: 
1068
            new_altName = ascii_table[col_altName_ascii][rowAscii_match[cnt]+1]
1069
            cnt+=1  
1070
          else: new_altName = oldVal_fits
1071
          
1072
          new_altName = remove_duplicated_names(new_altName)
1073
          new_altName_vec.append(new_altName)
1074
          
1075
    elif not replace_altName:
1076
      if name_in_altName:
1077
        altName_flag = True
1078
        print '\n\t%s %s appended & %s added' % (info, name_altName_key, name_Name_key)
1079
        names_fits = fits_data[name_Name_key].strip()
1080
        for j in range(Nrows_fits):
1081
          oldVal_fits = (fits_data[ name_altName_key ][j]).strip()
1082
          old_altName_vec.append(oldVal_fits)
1083
          if j in rowFits_match:
1084
            new_altName = "; ".join([ oldVal_fits, ascii_table[col_altName_ascii][rowAscii_match[cnt]+1], name_fits[j] ])
1085
            cnt+=1  
1086
          else:
1087
            new_altName = oldVal_fits
1088
          
1089
          new_altName = remove_duplicated_names(new_altName)
1090
          new_altName_vec.append(new_altName)
1091
      else:
1092
        for j in range(Nrows_fits):
1093
          oldVal_fits = (fits_data[ name_altName_key ][j]).strip()
1094
          old_altName_vec.append(oldVal_fits)
1095
          if j in rowFits_match: 
1096
            if oldVal_fits in [np.nan, "NULL", "NaN", "False"]: new_altName = ascii_table[col_altName_ascii][rowAscii_match[cnt]+1]
1097
            else: 
1098
              new_altName = "%s; %s" % (oldVal_fits, ascii_table[col_altName_ascii][rowAscii_match[cnt]+1])
1099
            cnt+=1  
1100
          else: new_altName = oldVal_fits
1101
          
1102
          new_altName = remove_duplicated_names(new_altName)
1103
          new_altName_vec.append(new_altName)
1104
            
1105
    #Compute the max length of ALT_NAME in fits and ascii
1106
    maxLength_altName_fits = max([len(item) for item in fits_data[ name_altName_key ]])
1107
    maxLength_altName_ascii = max([len(item) for item in ascii_table[col_altName_ascii]])
1108
    
1109
    maxLength_altName_new = max([len(item) for item in new_altName_vec])
1110
    
1111
    len_ALT_NAME = [maxLength_altName_fits, maxLength_altName_ascii, maxLength_altName_new]
1112
1113
'''
1114
  *** Update the length of ALT_NAME. The only way, though, is to re-create the column with the new format ***
1115
'''
1116
if altName_flag:
1117
  
1118
  #Delete the old 'ALT_NAME' column
1119
  name_vec = []
1120
  format_vec = []
1121
  unit_vec = []
1122
  
1123
  #Store attributes of the kewyords after ALT_NAME
1124
  for j in range(fits_keywds.index(name_altName_key)+1, len(fits_keywds)):
1125
    name_vec.append(coldefs.names[j])
1126
    format_vec.append(coldefs.formats[j])
1127
    unit_vec.append(coldefs.units[j])
1128
  
1129
  #Delete the kewyords after ALT_NAME
1130
  tmp = 0
1131
  for j in range(fits_keywds.index(name_altName_key)+1, len(fits_keywds)):
1132
    coldefs.del_col(name_vec[tmp])
1133
    tmp+=1
1134
    
1135
  #Delete ALT_NAME
1136
  coldefs.del_col(name_altName_key)
1137
1138
  #Re-create ALT_NAME with the new format
1139
  col_tmp = pyfits.Column(name=name_altName_key, format=str(max(len_ALT_NAME))+'A', unit = 'None', array=new_altName_vec)
1140
  coldefs.add_col(col_tmp)
1141
  hdulist.columns = coldefs
1142
  
1143
  #Re-create all the kewyords after ALT_NAME, with their attributes
1144
  tmp = 0
1145
  data_vec_tmp = []
1146
  
1147
  for j in range(fits_keywds.index(name_altName_key)+1, len(fits_keywds)):
1148
    data_vec_tmp = hdulist.data[name_vec[tmp]]
1149
    col_tmp = pyfits.Column(name=name_vec[tmp], format=format_vec[tmp], unit = unit_vec[tmp], array=data_vec_tmp)
1150
    coldefs.add_col(col_tmp)
1151
    tmp +=1
1152
    data_vec_tmp = []
1153
    
1154
  hdulist = pyfits.new_table(coldefs)
1155
1156
'''
1157
  *** Write summary report for matching/new clusters ***
1158
  
1159
and also
1160
    
1161
  *** 3rd data UPDATE: the columns of new clusters are filled in with the correct values ***
1162
'''
1163
1164
#Write summary for common clusters (if any)
1165
if len(rowAscii_match) > 0:
1166
  length_new_field = []
1167
  index_ascii_field_vec = []
1168
    
1169
  tmp_lenght = ''
1170
  for fields in ascii_keywds:
1171
    index_ascii_field = ascii_keywds.index(fields)
1172
    index_ascii_field_vec.append(index_ascii_field)
1173
    index_fits_field = coldefs.names.index(fields)
1174
    if coldefs.formats[index_fits_field].find('A') >= 0:
1175
      tmp_lenght = coldefs.formats[index_fits_field].split('A')[0]
1176
    elif coldefs.formats[index_fits_field].find('E') >= 0 or coldefs.formats[index_fits_field].find('D') >= 0:
1177
      tmp_lenght = '15' #For float and double, string size fixed to 15
1178
    elif coldefs.formats[index_fits_field].find('I') >= 0:
1179
      max_len_int =  max(len(str(elem).strip()) for elem in ascii_table[index_ascii_field])
1180
      tmp_lenght = str(max_len_int + 3)
1181
    elif coldefs.formats[index_fits_field].find('L') >= 0:
1182
      tmp_lenght = '6'
1183
    
1184
    length_new_field.append( max( int(tmp_lenght), len(fields)+3 ) )
1185
      
1186
  file_report.write("\n# >>>> CLUSTERS PROPERTIES ** UPDATED ** IN THE FITS TABLE <<<<\n\n")
1187
  to_write = ""
1188
1189
  #Write/format the header of each column
1190
  for tmp, fields in enumerate(ascii_keywds):
1191
    
1192
    max_len_new = length_new_field[tmp]
1193
    
1194
    if fields in fits_keywds:
1195
      max_len_old = max(len(str(elem).strip()) for elem in fits_data[fields])
1196
    else:
1197
      max_len_old = max_len_new
1198
1199
    if fields in [name_Name_key, name_zRef_key]:
1200
      
1201
      #Update the length of NAME or REDSHIFT_REF (increase its TFORM) if necessary
1202
      #by comparing the max length of its values in old (fits) and new (ascii) file
1203
      maxLength_fits = max([len(item) for item in fits_data[fields]])
1204
      maxLength_ascii = max([len(item) for item in ascii_table[index_ascii_field_vec[tmp]] ])
1205
      
1206
      #If ascii names are longer than in fits, NAME is deleted and re-created with a bigger format, but keeping (for the moment) the old values
1207
      if maxLength_ascii > maxLength_fits:
1208
        print '\n\t%s New %ss are longer than ones in fits: recreating the column with larger size (%sA -> %sA)' % (info, fields, maxLength_fits, maxLength_ascii)
1209
        
1210
        new_format = '%sA' % maxLength_ascii
1211
1212
        hdulist = recreate_reformatted_column(hdulist, fields, new_format, hdulist.data[fields] ) #ascii_table[index_ascii_field_vec[tmp]])
1213
1214
    #Define lengths for ALT_NAME
1215
    elif fields == name_altName_key and altName_flag:
1216
      max_len_old = max([len(item) for item in old_altName_vec])
1217
      max_len_new = max([len(item) for item in new_altName_vec])
1218
      
1219
    #Define lengths for PAPER
1220
    elif fields == name_paper_key and paper_flag:
1221
      max_len_new = max_length_paper
1222
1223
    label_tot_length = str(int(max_len_old) + int(max_len_new) +3) #+3 because of  ' | '
1224
    formatting = '{0:^%ss}' % (label_tot_length)
1225
  
1226
    to_write += formatting.format( fields )
1227
    
1228
  file_report.write(to_write+"\n")
1229
1230
  #Write/format the values of each column
1231
  for r, idx in enumerate(rowAscii_match):
1232
    
1233
    #Row numbers in FITS and ASCII of common clusters 
1234
    clRow_fits = rowFits_match[r]
1235
    clRow_ascii = idx #rowAscii_match[r]
1236
    
1237
    to_write = "\n"
1238
1239
    for tmp, fields in enumerate(ascii_keywds): 
1240
      
1241
      kwCol_fits = hdulist.data.names.index(fields)
1242
      kwCol_ascii = ascii_keywds.index(fields)
1243
        
1244
      oldVal_fits = hdulist.data[clRow_fits][kwCol_fits]
1245
      newVal_ascii = ascii_table[kwCol_ascii][clRow_ascii+1] #the '+1' correction is needed because of the additional HEADER line
1246
          
1247
      #Updates values...
1248
1249
      # Set undefined values to  '-' or -1.6375e+30
1250
      if str(newVal_ascii).strip() in ['', '-', '-1.6375E+30', '-1.6375e+30']:
1251
        #String
1252
        if keys_form_unit[fields]['TFORM'].find('A') >=0 : newVal_ascii = '-'
1253
        #Not string
1254
        else :         newVal_ascii = -1.6375e+30 
1255
1256
      if (fields in name_mass_key or fields in name_errMass_key) and newVal_ascii != -1.6375e+30:
1257
        newVal_ascii = h_factor * float(newVal_ascii)
1258
      
1259
      max_len_new = length_new_field[tmp]
1260
      
1261
      if fields in fits_keywds:
1262
        max_len_old = max(len(str(elem).strip()) for elem in fits_data[fields])
1263
      else:
1264
        max_len_old = max_len_new
1265
      
1266
      #if ALT_NAME has changed, write it in the report even if it is not an ASCII field
1267
      if fields == name_altName_key and altName_flag:
1268
        max_len_old = max([len(item) for item in old_altName_vec])
1269
        max_len_new = max([len(item) for item in new_altName_vec])
1270
        
1271
        oldVal_fits = old_altName_vec[clRow_fits]
1272
        newVal_ascii = new_altName_vec[clRow_fits]
1273
          
1274
      elif fields == name_paper_key and paper_flag:
1275
        max_len_new = max_length_paper
1276
        
1277
        oldVal_fits = fits_data[name_paper_key][clRow_fits]
1278
        newVal_ascii = new_paper_vec[clRow_ascii]        
1279
      
1280
      formatting = ' {0:>%ss} | {1:<%ss} ' % (max_len_old, max_len_new)
1281
      to_write += formatting.format(str(oldVal_fits), str(newVal_ascii))
1282
            
1283
      #Update values for Boolean fields...
1284
      if keys_form_unit[fields]['TFORM'] == 'L':
1285
        if str(newVal_ascii).upper() in ["TRUE", "YES", "1.0"]: hdulist.data[clRow_fits][kwCol_fits] = True
1286
        elif str(newVal_ascii).upper() in ["FALSE", "NO", "0.0", "", "NONE", "NULL", "[]", "{}"]: hdulist.data[clRow_fits][kwCol_fits] = False
1287
      else:
1288
        try:
1289
          hdulist.data[clRow_fits][kwCol_fits] = newVal_ascii
1290
        except:
1291
          if str(newVal_ascii) == 'nan': hdulist.data[clRow_fits][kwCol_fits] = np.nan
1292
1293
    file_report.write(to_write)
1294
1295
1296
#Write summary for NEW clusters (if any)
1297
fits_keywds=[]
1298
length_label_vec = []
1299
Ncol_fits=int(hdulist.header['TFIELDS'])
1300
for i in range(Ncol_fits):
1301
  fits_keywds.append(hdulist.data.names[i])
1302
1303
if len(rowAscii_new) > 0: 
1304
  file_report.write("\n\n# >>>> NEW CLUSTERS ** ADDED ** TO THE FITS TABLE <<<<\n\n")
1305
  to_write = ""
1306
  tmp = 0
1307
  for fields in fits_keywds:
1308
    
1309
    format_tmp = coldefs.formats[tmp]
1310
    tmp_length = ''
1311
    
1312
    if format_tmp.find('A') >= 0:
1313
      tmp_length = format_tmp.split('A')[0]
1314
    elif format_tmp.find('E') >= 0 or format_tmp.find('D') >= 0:
1315
      tmp_length = '15' #For float and double, string size fixed to 15
1316
    elif format_tmp.find('I') >= 0:
1317
      index_fits_field = fits_keywds.index(fields)
1318
      max_len_int =  len(str(fits_data[-1][index_fits_field]))
1319
      tmp_length = str(max_len_int+3)
1320
    elif format_tmp.find('L') >= 0:
1321
      tmp_length = '6' #For boolean, string size fixed to 6
1322
      
1323
    length_label_vec.append( max( int(tmp_length), len(fields)+3 ) )
1324
    
1325
    formatting = '{0:^%ss}' % (length_label_vec[-1])
1326
    
1327
    to_write +=formatting.format( fields )
1328
    tmp +=1
1329
    
1330
  file_report.write(to_write+"\n")
1331
1332
  j = 0
1333
1334
  for name in new_clNames:
1335
    
1336
    to_write = "\n"
1337
    for k, field in enumerate(fits_keywds):
1338
      index_field = coldefs.names.index(field)
1339
      format_field = coldefs.formats[index_field]
1340
1341
      kwCol_fits = fits_keywds.index(field)
1342
      oldVal_fits = hdulist.data[Nrows_fits+j][kwCol_fits]        # Add rows after the last one, filling the empty ones...
1343
      
1344
      if field in ascii_keywds:
1345
        kwCol_ascii = ascii_keywds.index(field)      
1346
        
1347
        newVal_ascii = ascii_table[kwCol_ascii][rowAscii_new[j]+1]
1348
        
1349
        if format_field.find('A') >= 0 and (newVal_ascii.strip()).upper() in ['', '-', "NULL", "NAN", "NONE", "FALSE"]: newVal_ascii = '-'
1350
        elif str(newVal_ascii).strip() in ['-1.6375E+30', '-1.6375e+30']:  newVal_ascii = -1.6375e+30
1351
        if (fields in name_mass_key or fields in name_errMass_key) and newVal_ascii != -1.6375e+30:
1352
          newVal_ascii = h_factor * float(newVal_ascii)
1353
      else:
1354
        oldVal_fits = hdulist.data[Nrows_fits+j][kwCol_fits]
1355
        
1356
        if field == name_index_key:
1357
          #The INDEX associated with the new clusters are created by adding +1 to the previous value
1358
          newVal_ascii = 1 + hdulist.data[Nrows_fits+j-1][kwCol_fits]
1359
          
1360
        elif field == name_catalog_key:
1361
          newVal_ascii = str(new_catalog)
1362
          
1363
        #Galactic coordinates are created from RA, DEC (if any)
1364
        elif field == 'GLON':
1365
          if len(ra_ascii) > 0 and len(dec_ascii)>0:  newVal_ascii = round(astCoords.convertCoords('J2000', 'GALACTIC', ra_ascii[rowAscii_new[j]], dec_ascii[rowAscii_new[j]], 2000)[0], 5)
1366
        elif field == 'GLAT':
1367
          if len(ra_ascii) > 0 and len(dec_ascii)>0:  newVal_ascii = round(astCoords.convertCoords('J2000', 'GALACTIC', ra_ascii[rowAscii_new[j]], dec_ascii[rowAscii_new[j]], 2000)[1], 5)
1368
1369
        elif field == name_zErr_key:
1370
          newVal_ascii = np.nan
1371
        elif field == name_zLimit_key:
1372
          newVal_ascii = np.nan
1373
        elif field == name_paper_key:
1374
          newVal_ascii = tmp_new_paper
1375
        elif format_field == 'L':
1376
          newVal_ascii = False
1377
        elif field in name_coordinates_keys:
1378
          newVal_ascii = np.nan
1379
        elif format_field == 'I':
1380
          newVal_ascii = -1
1381
        elif format_field == 'E': #FLOAT
1382
          newVal_ascii = -1.6375E+30
1383
        elif format_field.find("A") >= 0: #STRING
1384
          newVal_ascii = 'Null'
1385
1386
      #Change values...
1387
      if format_field == 'L':
1388
        if str(newVal_ascii).upper() in ["TRUE", "YES", "1.0"]: hdulist.data[Nrows_fits+j][kwCol_fits] = True
1389
        elif str(newVal_ascii).upper() in ["FALSE", "NO", "0.0", "", "NONE", "NULL", "[]", "{}"]:  hdulist.data[Nrows_fits+j][kwCol_fits] = False
1390
      else:  
1391
        try:      
1392
          hdulist.data[Nrows_fits+j][kwCol_fits] = newVal_ascii   
1393
        except:
1394
          if str(newVal_ascii) == 'nan': hdulist.data[Nrows_fits][kwCol_fits] = np.nan
1395
          else: print  '%s A problem occurred for cluster Name = %s : field = %s , value = %s \nAborted.\n' % (error, name, field, newVal_ascii); os._exit(0)
1396
      
1397
      formatting = '{0:^%ss}' % (length_label_vec[k])
1398
      to_write += formatting.format( str(newVal_ascii) )
1399
    j += 1
1400
    file_report.write(to_write)
1401
1402
1403
'''
1404
  *** 4th data UPDATE: update the fits HEADER with the Version number and the creation date ***
1405
'''
1406
1407
hdulist.header.add_comment("", before="TTYPE1")
1408
version = raw_input("\n%s Please enter the Version number of the new table: " % question)
1409
1410
version_check = False
1411
while version_check == False:
1412
  try:
1413
    if float(version): version_check = True
1414
  except  ValueError:
1415
    print bcolors.FAIL+ "\n\t\t*** Version number not valid ***" + bcolors.ENDC
1416
    version = raw_input("\n\t-> Please enter a valid version number: ")
1417
1418
hdulist.header.add_comment("*** Version " +str(version)+" ***", before="TTYPE1")
1419
1420
today = date.today().strftime("%A %d. %B %Y")
1421
comment = "*** Compiled at IDOC/IAS on %s ***" % (today)
1422
hdulist.header.add_comment(comment, before="TTYPE1")
1423
1424
hdulist.header.add_comment("", before="TTYPE1")
1425
extname = raw_input("\n%s Please enter the name of the new FITS table (without extension): " % question)
1426
hdulist.header.update('EXTNAME', extname, before='TTYPE1')
1427
1428
#The new fits table is written in a temporary file, which will be deleted after the proper 'undef' values will be set
1429
hdulist.writeto('new_table.fits')
1430
1431
file_report.close()
1432
1433
#Reopen the temporary fits table to change the undefined values (e.g., -1.6375E+30) to 'NULL' or NaN
1434
hdulist = pyfits.open('new_table.fits')
1435
fits_header = hdulist[1].header
1436
fits_data = hdulist[1].data
1437
1438
command = "rm new_table.fits"
1439
os.system(command)
1440
1441
Ncol_fits = int(fits_header['TFIELDS'])
1442
Nrows_fits = fits_header['NAXIS2']
1443
1444
#Dictionary defining 'undef' values for different kind of fields
1445
_UNDEF_VALUES_ = {
1446
  'FLOAT' : {np.nan},
1447
  'INT' : {-1},
1448
  'STRING' : {'NULL'},
1449
  name_zType_key : {'undef'},
1450
  'PIPELINE' : {0},
1451
  'PIPE_DET' : {0}
1452
  }
1453
1454
'''
1455
  *** 5th data UPDATE: set the proper 'undef' values for the different fields ***
1456
'''
1457
1458
hdulist[1].data = set_undef_values(fits_data)
1459
1460
file_output = extname+'.fits'
1461
print "\n\t>> New updated file:" + bcolors.OKGREEN + " %s " % (file_output) + bcolors.ENDC
1462
print "\t>> Details of the applied updates are reported in:" + bcolors.OKGREEN + " %s " % (file_report_name) + bcolors.ENDC + "\n"
1463
hdulist.writeto(file_output)