I have many .csv of NYC taxi from nyc.gov, one .csv = year-month. There I grab cca 15 of csvs and make HDF5s from them:
import h5py
import pandas as pd
import os
import glob
import numpy as np
import vaex
from tqdm import tqdm_notebook as tqdm
#hdf = pd.HDFStore('c:/Projekty/H5Edu/NYCTaxi/NYCTaxi.hp')
#df1 = pd.read_csv('path nejake csvcko')
#hdf.put('DF1', df1, format = 'table', data_columns = True)
csv_list = np.sort(np.array(glob.glob('G:\\NYCTaxi\\*.csv')))[::-1]
csv_list = csv_list[20:39]
output_dir = 'c:\\Datasety\\YelowTaxi\\DataH5\\'
for file in tqdm(csv_list, leave=False, desc='Converting to hdf5...'):
# Setting up the files, and directories
#zip_file = ZipFile(file)
output_file = file.split('\\')[-1][:-3]+'hdf5'
output = output_dir + output_file
#output = output_file
# Check if a converted file already exists: if it does skip it, otherwise read in the raw csv and convert it
if (os.path.exists(output) and os.path.isfile(output)):
pass
else:
# Importing the data into pandas
#pandas_df = [pd.read_csv(file, index_col=None, header=0)][0]
pandas_df = [pd.read_csv(file, index_col=None, header=0, low_memory=False)][0]
# Rename some columns to match the more well known dataset from
# http://stat-computing.org/dataexpo/2009/the-data.html
# Importing the data from pandas to vaex
vaex_df = vaex.from_pandas(pandas_df, copy_index=False)
# Export the data with vaex to hdf5
vaex_df.export_hdf5(path=output, progress=False)
Next I make one big HDF5:
import re
import glob
import vaex
import numpy as np
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
hdf5_list = glob.glob('c:\\Datasety\\YelowTaxi\\DataH5\\*.hdf5')
hdf5_list.sort(key=alphanum_key)
hdf5_list = np.array(hdf5_list)
#assert len(hdf5_list) == 3, "Incorrect number of files"
# This is an important step
master_df = vaex.open_many(hdf5_list)
# exporting
#master_df.export_hdf5(path='c:\\Datasety\\YelowTaxi\\DataH5\\Spojene.hd5', progress=True)
master_df.export_hdf5(path='c:\\Datasety\\YelowTaxi\\DataH5\\Spojene.hdf5', progress=True)
So far, everything is ok, I can open output file Spojene.hdf5.
Next, I append new .csv to Spojene.hdf5:
for file in csv_list:
#file = csv_list[0]
df2 = pd.read_csv(file, index_col=None, header=0, low_memory=False)
filename = 'c:\\Datasety\\YelowTaxi\\DataH5\\Spojene.hdf5'
df2.to_hdf(filename, 'data', append=True)
But, when I append new .csv to Spojene.hdf5, I cant open it:
df = vaex.open('c:\\Datasety\\YelowTaxi\\DataH5\\Spojene.hdf5')
ValueError: First columns has length 289184484, while column table has length 60107988
Pls, what can I do?
I think this is linked to how pandas is creating hdf5 files. According to vaex's documentation you can't open a HDF5 file with vaex if it has been created via to_hdf
pandas method. I assume it is the same if you append to an existing HDF5 file.
To avoid this error you can reuse your logic where you convert the pandas dataframe to a vaex dataframe, export it to HDF5 and then use open_many
. Something like this should work:
main_hdf5_file_path = "c:\\Datasety\\YelowTaxi\\DataH5\\Spojene.hdf5"
hdf5_files_created = []
for file in csv_list:
hdf5_file = file.replace(".csv", ".hdf5")
# from_csv can take additional parameters to forward to pd.read_csv
# You can also use convert=True to convert it automatically to hdf5 without the export_hdf5
# Refer to https://vaex.readthedocs.io/en/docs/api.html#vaex.from_csv
df = vaex.from_csv(file)
df.export_hdf5(hdf5_file)
hdf5_files_created.append(hdf5_file)
hdf5_to_read = hdf5_files_created + [main_hdf5_file_path]
final_df = vaex.open_many(hdf5_to_read)
final_df.export_hdf5(main_hdf5_file_path)