-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathparser.py
282 lines (225 loc) · 9.68 KB
/
parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
# metaphlan_tools/parser.py
import os
import pandas as pd
import numpy as np
import re
def parse_metaphlan_file(file_path, taxonomic_level='species'):
"""
Parse a MetaPhlAn output file and extract abundances at a specific taxonomic level.
This is a robust parser that handles various MetaPhlAn file formats and edge cases.
Parameters:
-----------
file_path : str
Path to the MetaPhlAn output file
taxonomic_level : str, optional
Taxonomic level to extract (default: 'species')
Returns:
--------
pandas.DataFrame
DataFrame with species as index and abundance as values
"""
# Define taxonomic prefixes for filtering
level_prefixes = {
'kingdom': 'k__',
'phylum': 'p__',
'class': 'c__',
'order': 'o__',
'family': 'f__',
'genus': 'g__',
'species': 's__'
}
if taxonomic_level not in level_prefixes:
raise ValueError(f"Invalid taxonomic level: {taxonomic_level}. "
f"Must be one of {list(level_prefixes.keys())}")
target_prefix = level_prefixes[taxonomic_level]
abundance_data = {}
# Manually parse the file line by line for maximum robustness
with open(file_path, 'r') as f:
for line_num, line in enumerate(f, 1):
# Skip comment lines except header comment
if line.startswith('#') and not line.startswith('#clade_name'):
continue
# Skip empty lines
if not line.strip():
continue
# Split by tabs
parts = line.strip().split('\t')
# Need at least 2 parts for parsing (taxonomy and abundance)
if len(parts) < 2:
print(f"Warning: Line {line_num} in {file_path} has fewer than 2 columns, skipping")
continue
# First column is taxonomy, last column is abundance (for safety)
taxonomy = parts[0]
try:
# Try parsing with different column positions
if len(parts) >= 3 and re.match(r'^[\d.]+$', parts[2]):
# Standard format: taxonomy, NCBI IDs, abundance
abundance = float(parts[2])
elif len(parts) >= 2 and re.match(r'^[\d.]+$', parts[1]):
# Simplified format: taxonomy, abundance
abundance = float(parts[1])
else:
# Last chance: try last column
abundance = float(parts[-1])
except (ValueError, IndexError):
print(f"Warning: Could not parse abundance in line {line_num} of {file_path}, skipping")
continue
# Check if this line contains the target taxonomic level
if target_prefix in taxonomy:
# Extract the name of the taxon at the target level
taxon_parts = taxonomy.split('|')
for part in taxon_parts:
if part.startswith(target_prefix):
taxon_name = part.replace(target_prefix, '')
abundance_data[taxon_name] = abundance
break
if not abundance_data:
raise ValueError(f"No {taxonomic_level}-level taxa found in {file_path}")
# Convert to DataFrame
df = pd.DataFrame(list(abundance_data.items()), columns=['Taxon', 'abundance'])
df.set_index('Taxon', inplace=True)
return df
def load_metadata(metadata_file, sample_id_col='SampleID'):
"""
Load sample metadata from a CSV or Excel file.
Parameters:
-----------
metadata_file : str or pathlib.Path
Path to the metadata file
sample_id_col : str, optional
Name of the column containing sample IDs
Returns:
--------
pandas.DataFrame
DataFrame containing metadata with sample ID as index
"""
# Convert Path object to string if needed
metadata_file_str = str(metadata_file)
# Determine file type based on extension
if metadata_file_str.endswith('.csv'):
metadata = pd.read_csv(metadata_file)
elif metadata_file_str.endswith('.xlsx') or metadata_file_str.endswith('.xls'):
metadata = pd.read_excel(metadata_file)
else:
raise ValueError(f"Metadata file must be CSV or Excel format, got: {metadata_file_str}")
# Set sample ID as index
if sample_id_col in metadata.columns:
metadata = metadata.set_index(sample_id_col)
else:
raise ValueError(f"Sample ID column '{sample_id_col}' not found in metadata")
return metadata
def combine_samples(files, sample_ids=None, taxonomic_level='species'):
"""
Combine multiple MetaPhlAn output files into a single abundance table.
This patched version handles duplicate species indices and various file formats.
Parameters:
-----------
files : list
List of file paths to MetaPhlAn output files
sample_ids : list, optional
List of sample IDs corresponding to each file
taxonomic_level : str, optional
Taxonomic level to extract (default: 'species')
Returns:
--------
pandas.DataFrame
Combined abundance table with species as rows and samples as columns
"""
dfs = []
successful_files = []
successful_sample_ids = []
if sample_ids is None:
# Use file names as sample IDs
sample_ids = [os.path.basename(f).split('.')[0] for f in files]
if len(files) != len(sample_ids):
raise ValueError("Number of files must match number of sample IDs")
for i, file_path in enumerate(files):
try:
# Parse the MetaPhlAn file using our robust parser
print(f"Processing file {i+1}/{len(files)}: {os.path.basename(file_path)}")
df = parse_metaphlan_file(file_path, taxonomic_level)
# Set column name to sample ID
df.columns = [sample_ids[i]]
# Check for duplicate indices and handle them
if df.index.duplicated().any():
print(f"Warning: Found duplicate taxa in {sample_ids[i]}, keeping first occurrence")
df = df[~df.index.duplicated(keep='first')]
dfs.append(df)
successful_files.append(file_path)
successful_sample_ids.append(sample_ids[i])
print(f"Successfully processed {os.path.basename(file_path)}")
except Exception as e:
print(f"Error processing {file_path}: {str(e)}")
continue
if not dfs:
# Print more helpful diagnostic information
print("\nDiagnostic information:")
print(f"Total files attempted: {len(files)}")
print(f"Files that failed: {len(files)}")
# Try to read the first few lines of the first file
if files:
try:
print(f"\nFirst few lines of {files[0]}:")
with open(files[0], 'r') as f:
for i, line in enumerate(f):
if i < 5: # Print first 5 lines
print(f"Line {i+1}: {line.strip()}")
else:
break
except Exception as e:
print(f"Could not read file for diagnostics: {str(e)}")
raise ValueError("No valid data frames to combine. Check file formats and error messages above.")
print(f"\nSuccessfully processed {len(dfs)}/{len(files)} files")
# Combine along columns (each sample is a column)
combined_df = pd.concat(dfs, axis=1)
# Fill missing values with zeros
combined_df = combined_df.fillna(0)
return combined_df
def join_abundance_with_metadata(abundance_df, metadata_df):
"""
Join the abundance table with metadata.
Parameters:
-----------
abundance_df : pandas.DataFrame
Species abundance DataFrame with samples as columns
metadata_df : pandas.DataFrame
Metadata DataFrame with samples as index
Returns:
--------
pandas.DataFrame
Transposed abundance DataFrame with metadata columns added
"""
# Transpose abundance table to have samples as rows
abundance_transposed = abundance_df.T
# Check for sample ID overlap
common_samples = set(abundance_transposed.index).intersection(set(metadata_df.index))
if not common_samples:
raise ValueError("No common sample IDs found between abundance data and metadata")
# Join with metadata
joined_df = abundance_transposed.join(metadata_df, how='inner')
return joined_df
def diagnostic_file_check(file_path):
"""
Perform a diagnostic check on a MetaPhlAn file to understand its structure.
"""
try:
print(f"\nDiagnostic check of {file_path}:")
# Try to count lines and columns in the file
with open(file_path, 'r') as f:
lines = f.readlines()
print(f"Total lines in file: {len(lines)}")
# Check first 5 lines
for i, line in enumerate(lines[:5]):
stripped = line.strip()
parts = stripped.split('\t')
print(f"Line {i+1}: {len(parts)} columns, Content: {stripped[:80]}{'...' if len(stripped) > 80 else ''}")
# Look for species lines
species_count = 0
for line in lines:
if 's__' in line:
species_count += 1
print(f"Lines containing species entries (s__): {species_count}")
return True
except Exception as e:
print(f"Diagnostic check failed: {str(e)}")
return False