Evr2 to nuscribe
In [ ]:
Copied!
import pandas as pd
import numpy as np
import datetime
import time
import pandas as pd
import numpy as np
import datetime
import time
In [ ]:
Copied!
filename = r'c:\Users\OZCAR_GEOPHY\Documents\Data\MontLozere\timestamp\OB_20241107100109.txt'
filename = r'c:\Users\OZCAR_GEOPHY\Documents\Data\MontLozere\timestamp\OB_20241107100109.txt'
In [ ]:
Copied!
OB = pd.read_csv(filename,sep='\t')
OB = pd.read_csv(filename,sep='\t')
In [ ]:
Copied!
# Convert GPS Time in UTC (for checking purposes)
# utc = 1980-01-06UTC + (gps - (leap_count(2017) - leap_count(1980)))
gps = OB['Gps time(us)']
utc = datetime.datetime(1980, 1, 6) + datetime.timedelta(microseconds=int(gps[0]) - (37e6 - 19e6))
def unix_time_seconds(dt):
# Function to convert utc to unix time (microseconds)
# epoch = datetime.datetime.utcfromtimestamp(0)
epoch = datetime.datetime(1970, 1, 1)
return (dt - epoch).total_seconds()
# Convert GPS Time in UTC (for checking purposes)
# utc = 1980-01-06UTC + (gps - (leap_count(2017) - leap_count(1980)))
gps = OB['Gps time(us)']
utc = datetime.datetime(1980, 1, 6) + datetime.timedelta(microseconds=int(gps[0]) - (37e6 - 19e6))
def unix_time_seconds(dt):
# Function to convert utc to unix time (microseconds)
# epoch = datetime.datetime.utcfromtimestamp(0)
epoch = datetime.datetime(1970, 1, 1)
return (dt - epoch).total_seconds()
In [ ]:
Copied!
# Merge utc date and time
OB['UTC'] = OB['UTC Date'].astype(str) + ' ' + OB["UTC Time"]
OB['UTC'] = pd.to_datetime(OB['UTC'], format='%Y-%m-%d %H:%M:%S.%f')
# Change date format
OB['DATE_OK'] = OB['UTC'].dt.strftime('%Y:%m:%d')
# Convert UTC in Unix time and add column
OB['UNIX'] = OB['UTC'].apply(unix_time_seconds)
# Merge utc date and time
OB['UTC'] = OB['UTC Date'].astype(str) + ' ' + OB["UTC Time"]
OB['UTC'] = pd.to_datetime(OB['UTC'], format='%Y-%m-%d %H:%M:%S.%f')
# Change date format
OB['DATE_OK'] = OB['UTC'].dt.strftime('%Y:%m:%d')
# Convert UTC in Unix time and add column
OB['UNIX'] = OB['UTC'].apply(unix_time_seconds)
In [ ]:
Copied!
station = OB['Point'].astype(int)
tb_utc_date = OB['DATE_OK']
tb_utc_time = OB['UTC Time']
tb_utc = OB['UTC']
tb_unix = OB['UNIX']
tb_gps = OB['Gps time(us)']
line = OB['Line'].astype(int)
ffid = OB['FFID'].astype(int)
latitude_sexagesimal = OB['Latitude']
longitude_sexagesimal = OB['Longitude']
# Replace the 0 values with mean lat and lon
latitude_sexagesimal = latitude_sexagesimal.replace(0, latitude_sexagesimal[latitude_sexagesimal != 0].mean())
longitude_sexagesimal = longitude_sexagesimal.replace(0, longitude_sexagesimal[longitude_sexagesimal != 0].mean())
station = OB['Point'].astype(int)
tb_utc_date = OB['DATE_OK']
tb_utc_time = OB['UTC Time']
tb_utc = OB['UTC']
tb_unix = OB['UNIX']
tb_gps = OB['Gps time(us)']
line = OB['Line'].astype(int)
ffid = OB['FFID'].astype(int)
latitude_sexagesimal = OB['Latitude']
longitude_sexagesimal = OB['Longitude']
# Replace the 0 values with mean lat and lon
latitude_sexagesimal = latitude_sexagesimal.replace(0, latitude_sexagesimal[latitude_sexagesimal != 0].mean())
longitude_sexagesimal = longitude_sexagesimal.replace(0, longitude_sexagesimal[longitude_sexagesimal != 0].mean())
In [ ]:
Copied!
def sexagesimal_to_decimal_degrees(sexagesimal):
# Convert the latitude to a string
coord_str = sexagesimal.astype(str)
# Find the position of the decimal separator
decimal_position = coord_str.str.find('.')
df_coord = pd.DataFrame({'coord_str': coord_str, 'decimal_position': decimal_position})
# Split the degrees, minutes, and seconds
degrees = df_coord.apply(lambda row: row['coord_str'][:row['decimal_position']-4], axis=1).astype(int)
minutes = df_coord.apply(lambda row: row['coord_str'][row['decimal_position']-4:row['decimal_position']-2], axis=1).astype(int)
seconds = df_coord.apply(lambda row: row['coord_str'][row['decimal_position']-2:], axis=1).astype(float)
# # Convert the minutes and seconds to decimaµl degrees
decimal_minutes = minutes / 60
decimal_seconds = seconds / 3600
# # Add the decimal degrees to the degrees
decimal = degrees + decimal_minutes + decimal_seconds
return decimal
def sexagesimal_to_decimal_degrees(sexagesimal):
# Convert the latitude to a string
coord_str = sexagesimal.astype(str)
# Find the position of the decimal separator
decimal_position = coord_str.str.find('.')
df_coord = pd.DataFrame({'coord_str': coord_str, 'decimal_position': decimal_position})
# Split the degrees, minutes, and seconds
degrees = df_coord.apply(lambda row: row['coord_str'][:row['decimal_position']-4], axis=1).astype(int)
minutes = df_coord.apply(lambda row: row['coord_str'][row['decimal_position']-4:row['decimal_position']-2], axis=1).astype(int)
seconds = df_coord.apply(lambda row: row['coord_str'][row['decimal_position']-2:], axis=1).astype(float)
# # Convert the minutes and seconds to decimaµl degrees
decimal_minutes = minutes / 60
decimal_seconds = seconds / 3600
# # Add the decimal degrees to the degrees
decimal = degrees + decimal_minutes + decimal_seconds
return decimal
In [ ]:
Copied!
latitude_decimal = sexagesimal_to_decimal_degrees(latitude_sexagesimal)
longitude_decimal = sexagesimal_to_decimal_degrees(longitude_sexagesimal)
latitude_decimal = sexagesimal_to_decimal_degrees(latitude_sexagesimal)
longitude_decimal = sexagesimal_to_decimal_degrees(longitude_sexagesimal)
In [ ]:
Copied!
def add_cardinal_direction(coordinate, is_latitude=True):
# Check if the coordinate is positive or negative
if coordinate >= 0:
# If the coordinate is positive, append the appropriate cardinal direction for positive coordinates
cardinal_direction = 'N' if is_latitude else 'E'
else:
# If the coordinate is negative, append the appropriate cardinal direction for negative coordinates
cardinal_direction = 'S' if is_latitude else 'W'
# Return the coordinate as a string with the cardinal direction appended
if is_latitude:
return f"{abs(coordinate*100):08.0f}{cardinal_direction}"
else:
return f"{abs(coordinate*100):09.0f}{cardinal_direction}"
# Add the cardinal direction to the latitude and longitude
latitude = latitude_sexagesimal.apply(add_cardinal_direction)
longitude = longitude_sexagesimal.apply(lambda coord: add_cardinal_direction(coord, is_latitude=False))
def add_cardinal_direction(coordinate, is_latitude=True):
# Check if the coordinate is positive or negative
if coordinate >= 0:
# If the coordinate is positive, append the appropriate cardinal direction for positive coordinates
cardinal_direction = 'N' if is_latitude else 'E'
else:
# If the coordinate is negative, append the appropriate cardinal direction for negative coordinates
cardinal_direction = 'S' if is_latitude else 'W'
# Return the coordinate as a string with the cardinal direction appended
if is_latitude:
return f"{abs(coordinate*100):08.0f}{cardinal_direction}"
else:
return f"{abs(coordinate*100):09.0f}{cardinal_direction}"
# Add the cardinal direction to the latitude and longitude
latitude = latitude_sexagesimal.apply(add_cardinal_direction)
longitude = longitude_sexagesimal.apply(lambda coord: add_cardinal_direction(coord, is_latitude=False))
In [ ]:
Copied!
d = {'STATION': station, 'TB UNIX': tb_unix, 'TB UTC': tb_utc_time, 'DATE': tb_utc_date, ' ' : '0 [OK]', ' ' : '0 [OK]', ' ' : '0.0 [OK]', 'LINE': line, 'FFID': ffid}
df = pd.DataFrame(data=d)
pd.set_option('display.float_format','{:.6f}'.format)
pd.set_option('display.max_rows', df.shape[0]+1)
d = {'STATION': station, 'TB UNIX': tb_unix, 'TB UTC': tb_utc_time, 'DATE': tb_utc_date, ' ' : '0 [OK]', ' ' : '0 [OK]', ' ' : '0.0 [OK]', 'LINE': line, 'FFID': ffid}
df = pd.DataFrame(data=d)
pd.set_option('display.float_format','{:.6f}'.format)
pd.set_option('display.max_rows', df.shape[0]+1)
In [ ]:
Copied!
sbs_ffid_file = filename[:-3]+'sbs_ffid'
df.to_csv(sbs_ffid_file,sep='\t',index=False,header=False,float_format='{:.6f}'.format)
sbs_ffid_file = filename[:-3]+'sbs_ffid'
df.to_csv(sbs_ffid_file,sep='\t',index=False,header=False,float_format='{:.6f}'.format)
In [ ]:
Copied!
df2 = pd.DataFrame({
'Line': line,
'Station': station,
'Median Latitude WGS84': latitude,
'Median Longitude WGS84': longitude,
'Serial Number': 123456789,
'Median Altitude_m': 0,
})
# Keep only unique pairs of Line and Station
df2 = df2.drop_duplicates(subset=['Line', 'Station'])
# Define or update the dictionary with the desired column widths
col_widths = {'Line': 17, 'Station': 7, 'Median Latitude WGS84': 8,
'Median Longitude WGS84': 8, 'Serial Number': 26,
'Median Altitude_m': 4}
# Define a dictionary to indicate which columns should have spaces around them
# True means add spaces around the column, False means do not add spaces
add_spaces = {'Line': True, 'Station': True, 'Median Latitude WGS84': False,
'Median Longitude WGS84': False, 'Serial Number': True,
'Median Altitude_m': False}
# Convert each column to a string and fix the width
for col in df2.columns:
if add_spaces[col]:
# Add spaces around the column content
df2[col] = df2[col].astype(str).apply(lambda x: f" {x} ".rjust(col_widths[col] + 1, ' '))
else:
# Do not add spaces around the column content
df2[col] = df2[col].astype(str).apply(lambda x: x.rjust(col_widths[col], ' '))
# Concatenate the columns to form the DataFrame string, selectively adding spaces
def concatenate_row(row):
row_string = ''
for col in df2.columns:
if add_spaces[col]:
# Add the column with spaces
row_string += row[col]
else:
# Add the column without extra spaces
row_string += row[col].strip()
return row_string
df2_string = df2.apply(concatenate_row, axis=1).str.cat(sep='\n')
# Add a newline character to the end of the string
df2_string += '\n'
sp1_file = filename[:-3] + 'sp1'
# Open a file in write mode
with open(sp1_file, 'w') as f:
# Write the string to the file
f.write(df2_string)
df2 = pd.DataFrame({
'Line': line,
'Station': station,
'Median Latitude WGS84': latitude,
'Median Longitude WGS84': longitude,
'Serial Number': 123456789,
'Median Altitude_m': 0,
})
# Keep only unique pairs of Line and Station
df2 = df2.drop_duplicates(subset=['Line', 'Station'])
# Define or update the dictionary with the desired column widths
col_widths = {'Line': 17, 'Station': 7, 'Median Latitude WGS84': 8,
'Median Longitude WGS84': 8, 'Serial Number': 26,
'Median Altitude_m': 4}
# Define a dictionary to indicate which columns should have spaces around them
# True means add spaces around the column, False means do not add spaces
add_spaces = {'Line': True, 'Station': True, 'Median Latitude WGS84': False,
'Median Longitude WGS84': False, 'Serial Number': True,
'Median Altitude_m': False}
# Convert each column to a string and fix the width
for col in df2.columns:
if add_spaces[col]:
# Add spaces around the column content
df2[col] = df2[col].astype(str).apply(lambda x: f" {x} ".rjust(col_widths[col] + 1, ' '))
else:
# Do not add spaces around the column content
df2[col] = df2[col].astype(str).apply(lambda x: x.rjust(col_widths[col], ' '))
# Concatenate the columns to form the DataFrame string, selectively adding spaces
def concatenate_row(row):
row_string = ''
for col in df2.columns:
if add_spaces[col]:
# Add the column with spaces
row_string += row[col]
else:
# Add the column without extra spaces
row_string += row[col].strip()
return row_string
df2_string = df2.apply(concatenate_row, axis=1).str.cat(sep='\n')
# Add a newline character to the end of the string
df2_string += '\n'
sp1_file = filename[:-3] + 'sp1'
# Open a file in write mode
with open(sp1_file, 'w') as f:
# Write the string to the file
f.write(df2_string)