{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## __Data Pipelines__ \n",
"Loading data from OpenStreetMap using overpass API"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"import pandas as pd\n",
"import re\n",
"import math\n",
"from typing import Tuple, List, Dict"
]
},
{
"cell_type": "code",
"execution_count": 97,
"metadata": {},
"outputs": [],
"source": [
"def fetch_osm_data(lat: float, lon: float, radius: int) -> List[Dict]:\n",
" overpass_url = \"http://overpass-api.de/api/interpreter\"\n",
" overpass_query = f\"\"\"\n",
" [out:json];\n",
" (\n",
" node[\"name\"](around:{radius},{lat},{lon});\n",
" way[\"name\"](around:{radius},{lat},{lon});\n",
" relation[\"name\"](around:{radius},{lat},{lon});\n",
" );\n",
" out center;\n",
" \"\"\"\n",
" \n",
" response = requests.get(overpass_url, params={'data': overpass_query})\n",
" data = response.json()\n",
" return data['elements']\n",
"\n",
"def determine_location_type(tags: Dict[str, str]) -> str:\n",
" # Residential\n",
" if 'building' in tags and tags['building'] in ['residential', 'house', 'apartments', 'detached', 'terrace', 'dormitory', 'bungalow']:\n",
" return 'Residential'\n",
" \n",
" # Commercial\n",
" if any(key in tags for key in ['shop', 'office', 'craft']):\n",
" return 'Commercial'\n",
" if 'building' in tags and tags['building'] in ['commercial', 'office', 'retail', 'supermarket', 'kiosk']:\n",
" return 'Commercial'\n",
" \n",
" # Industrial\n",
" if 'building' in tags and tags['building'] in ['industrial', 'warehouse', 'factory', 'manufacture']:\n",
" return 'Industrial'\n",
" if 'industrial' in tags or 'industry' in tags:\n",
" return 'Industrial'\n",
" \n",
" # Educational\n",
" if 'amenity' in tags and tags['amenity'] in ['school', 'university', 'college', 'library', 'kindergarten', 'language_school']:\n",
" return 'Educational'\n",
" \n",
" # Healthcare\n",
" if 'amenity' in tags and tags['amenity'] in ['hospital', 'clinic', 'doctors', 'dentist', 'pharmacy', 'veterinary']:\n",
" return 'Healthcare'\n",
" \n",
" # Food & Drink\n",
" if 'amenity' in tags and tags['amenity'] in ['restaurant', 'cafe', 'bar', 'fast_food', 'pub', 'food_court']:\n",
" return 'Food & Drink'\n",
" \n",
" # Leisure & Entertainment\n",
" if 'leisure' in tags or 'tourism' in tags:\n",
" return 'Leisure & Entertainment'\n",
" if 'amenity' in tags and tags['amenity'] in ['theatre', 'cinema', 'nightclub', 'arts_centre', 'community_centre']:\n",
" return 'Leisure & Entertainment'\n",
" \n",
" # Transportation\n",
" if 'amenity' in tags and tags['amenity'] in ['parking', 'bicycle_parking', 'bus_station', 'ferry_terminal']:\n",
" return 'Transportation'\n",
" if 'highway' in tags or 'railway' in tags or 'aeroway' in tags:\n",
" return 'Transportation'\n",
" \n",
" # Religious\n",
" if 'amenity' in tags and tags['amenity'] in ['place_of_worship', 'monastery']:\n",
" return 'Religious'\n",
" \n",
" # Government & Public Services\n",
" if 'amenity' in tags and tags['amenity'] in ['townhall', 'courthouse', 'police', 'fire_station', 'post_office']:\n",
" return 'Government & Public Services'\n",
" \n",
" # Parks & Recreation\n",
" if 'leisure' in tags and tags['leisure'] in ['park', 'playground', 'sports_centre', 'stadium', 'garden']:\n",
" return 'Parks & Recreation'\n",
" \n",
" # Natural\n",
" if 'natural' in tags:\n",
" return 'Natural'\n",
" \n",
" # Landuse\n",
" if 'landuse' in tags:\n",
" landuse = tags['landuse'].capitalize()\n",
" if landuse in ['Residential', 'Commercial', 'Industrial', 'Retail']:\n",
" return landuse\n",
" else:\n",
" return f'Landuse: {landuse}'\n",
" \n",
" # If no specific category is found, return 'Other'\n",
" return 'Other'\n",
"\n",
"def parse_osm_data(elements: List[Dict]) -> pd.DataFrame:\n",
" parsed_data = []\n",
" for element in elements:\n",
" tags = element.get('tags', {})\n",
" parsed_element = {\n",
" 'ID': f\"{element['type']}_{element['id']}\",\n",
" 'Location Name': tags.get('name', ''),\n",
" 'Location Type': determine_location_type(tags)\n",
" }\n",
" parsed_data.append(parsed_element)\n",
" if len(parsed_data) == 0:\n",
" return pd.DataFrame(columns=['ID', 'Location Name', 'Location Type'])\n",
" return pd.DataFrame(parsed_data)\n",
"\n",
"def get_osm_data(lat: float, lon: float, radius: int) -> pd.DataFrame:\n",
" raw_data = fetch_osm_data(lat, lon, radius)\n",
" return parse_osm_data(raw_data)\n",
"\n",
"def dms_to_decimal(coord_str):\n",
" # Regular expression to match the coordinate format\n",
" pattern = r'(\\d+)°(\\d+)\\'([\\d.]+)\"([NS])\\s*(\\d+)°(\\d+)\\'([\\d.]+)\"([EW])'\n",
" \n",
" match = re.match(pattern, coord_str)\n",
" if not match:\n",
" raise ValueError(\"Invalid coordinate format. Expected format: 19°03'08.6\\\"N 72°54'06.0\\\"E\")\n",
"\n",
" lat_deg, lat_min, lat_sec, lat_dir, lon_deg, lon_min, lon_sec, lon_dir = match.groups()\n",
"\n",
" # Convert to decimal degrees\n",
" lat = float(lat_deg) + float(lat_min)/60 + float(lat_sec)/3600\n",
" lon = float(lon_deg) + float(lon_min)/60 + float(lon_sec)/3600\n",
"\n",
" # Adjust sign based on direction\n",
" if lat_dir == 'S':\n",
" lat = -lat\n",
" if lon_dir == 'W':\n",
" lon = -lon\n",
"\n",
" return lat, lon"
]
},
{
"cell_type": "code",
"execution_count": 91,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Latitude: 19.015805555555556\n",
"Longitude: 72.89944444444446\n"
]
}
],
"source": [
"coord_str = '19°00\\'56.9\"N 72°53\\'58.0\"E'\n",
"radius_meters = 1000\n",
"try:\n",
" latitude, longitude = dms_to_decimal(coord_str)\n",
" print(f\"Latitude: {latitude}\")\n",
" print(f\"Longitude: {longitude}\")\n",
"except ValueError as e:\n",
" print(f\"Error: {e}\")"
]
},
{
"cell_type": "code",
"execution_count": 92,
"metadata": {},
"outputs": [],
"source": [
"result_df = get_osm_data(latitude, longitude, radius_meters)"
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" ID | \n",
" Location Name | \n",
" Location Type | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" node_622002639 | \n",
" Mahul | \n",
" Other | \n",
"
\n",
" \n",
" 1 | \n",
" node_622005407 | \n",
" Gowanpada | \n",
" Other | \n",
"
\n",
" \n",
" 2 | \n",
" node_1646222635 | \n",
" gadakary bus stop | \n",
" Transportation | \n",
"
\n",
" \n",
" 3 | \n",
" node_1646222681 | \n",
" vishnu nagar bus stop | \n",
" Other | \n",
"
\n",
" \n",
" 4 | \n",
" node_2932495033 | \n",
" Sree Dutta mandir | \n",
" Religious | \n",
"
\n",
" \n",
" 5 | \n",
" node_11954176622 | \n",
" Gavhanpada | \n",
" Other | \n",
"
\n",
" \n",
" 6 | \n",
" way_25587616 | \n",
" Bhikaji Damaji Patil Marg | \n",
" Transportation | \n",
"
\n",
" \n",
" 7 | \n",
" way_122289587 | \n",
" Mulund - Trombay 220 KV line | \n",
" Other | \n",
"
\n",
" \n",
" 8 | \n",
" way_151783563 | \n",
" Laxman Umaji Gadkari Marg | \n",
" Transportation | \n",
"
\n",
" \n",
" 9 | \n",
" way_151783570 | \n",
" Vishnu Nagar Road | \n",
" Transportation | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" ID Location Name Location Type\n",
"0 node_622002639 Mahul Other\n",
"1 node_622005407 Gowanpada Other\n",
"2 node_1646222635 gadakary bus stop Transportation\n",
"3 node_1646222681 vishnu nagar bus stop Other\n",
"4 node_2932495033 Sree Dutta mandir Religious\n",
"5 node_11954176622 Gavhanpada Other\n",
"6 way_25587616 Bhikaji Damaji Patil Marg Transportation\n",
"7 way_122289587 Mulund - Trombay 220 KV line Other\n",
"8 way_151783563 Laxman Umaji Gadkari Marg Transportation\n",
"9 way_151783570 Vishnu Nagar Road Transportation"
]
},
"execution_count": 93,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"result_df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": 94,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" ID | \n",
" Location Name | \n",
" Location Type | \n",
"
\n",
" \n",
" \n",
" \n",
" 11 | \n",
" way_430012316 | \n",
" track | \n",
" Residential | \n",
"
\n",
" \n",
" 12 | \n",
" way_430012318 | \n",
" Mumbai Refinery Mahul | \n",
" Industrial | \n",
"
\n",
" \n",
" 13 | \n",
" way_430012320 | \n",
" Mumbai Refinery | \n",
" Industrial | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" ID Location Name Location Type\n",
"11 way_430012316 track Residential\n",
"12 way_430012318 Mumbai Refinery Mahul Industrial\n",
"13 way_430012320 Mumbai Refinery Industrial"
]
},
"execution_count": 94,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"labelled_df = result_df[result_df['Location Type'] != 'Other']\n",
"labelled_df = labelled_df[labelled_df['Location Type'] != 'Religious']\n",
"labelled_df = labelled_df[labelled_df['Location Type'] != 'Transportation']\n",
"labelled_df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Location Name | \n",
" Location Type | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" track | \n",
" Residential | \n",
"
\n",
" \n",
" 1 | \n",
" Mumbai Refinery Mahul | \n",
" Industrial | \n",
"
\n",
" \n",
" 2 | \n",
" Mumbai Refinery | \n",
" Industrial | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Location Name Location Type\n",
"0 track Residential\n",
"1 Mumbai Refinery Mahul Industrial\n",
"2 Mumbai Refinery Industrial"
]
},
"execution_count": 95,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"## removing duplicates\n",
"\n",
"loc_types = []\n",
"for row in labelled_df.iterrows():\n",
" loc_type = (row[1]['Location Name'], row[1]['Location Type'])\n",
" if loc_type not in loc_types:\n",
" loc_types.append(loc_type)\n",
"\n",
"labelled_df = pd.DataFrame(loc_types, columns=['Location Name', 'Location Type'])\n",
"labelled_df.head(20)"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [],
"source": [
"row_of_dataset = ''\n",
"\n",
"for row in labelled_df.iterrows():\n",
" row_text = row[1]['Location Name'] + ' is a ' + row[1]['Location Type']\n",
" row_of_dataset += row_text + ', '"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Oswal Company Trees is a Natural, Newspaper stall is a Commercial, Shiv Polyclinic and Nursing Home is a Healthcare, राजपूत मेडिकल is a Healthcare, Bhabha Atomic Research Centre - BARC is a Industrial, BPCL Sports Club is a Leisure & Entertainment, New Bharat Nagar, Banjara tanda, Hasina Nagar is a Residential, Old Bharat Nagar is a Residential, Rashtriya Chemicals & Fertilizers is a Industrial, Koyna Colony is a Residential, D is a Residential, A-2 is a Residential, flip card is a Commercial, track is a Residential, Mumbai Refinery Mahul is a Industrial, Mumbai Refinery is a Industrial, Trombay Thermal Power Station is a Industrial, Vitta Sanchay Society is a Residential, E is a Residential, Acharya Sharad Narayan Udyan is a Leisure & Entertainment, bmc park is a Leisure & Entertainment, Mysore Colony Central Garden is a Leisure & Entertainment, BMC owned trees is a Natural, BMC PARK is a Leisure & Entertainment, Mysore colony eastern park is a Leisure & Entertainment, Trees owned by RCF is a Natural, Mysore Colony trees is a Natural, NAVAL KG School, TS MAHUL is a Educational, '"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"row_of_dataset"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is one row of the dataset, now writing a function to extract all these rows from a given large map area"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [],
"source": [
"## input point is at the bottom left of the map\n",
"\n",
"def calculate_distant_points(lat: float, lon: float, distance: float) -> tuple:\n",
" # Earth's radius in meters\n",
" R = 6371000\n",
"\n",
" # Convert latitude and longitude to radians\n",
" lat_rad = math.radians(lat)\n",
" lon_rad = math.radians(lon)\n",
"\n",
" # Calculate the point with the same latitude (moving east-west)\n",
" delta_lon = distance / (R * math.cos(lat_rad))\n",
" lon1 = lon + math.degrees(delta_lon)\n",
" \n",
" # Calculate the point with the same longitude (moving north-south)\n",
" delta_lat = distance / R\n",
" lat2 = lat + math.degrees(delta_lat)\n",
"\n",
" return ((lat, lon1), (lat2, lon))"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Original point: (40.7128, -74.006)\n",
"Point 1000m east: (40.712800, -73.709386)\n",
"Point 1000m north: (40.937630, -74.006000)\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" latitude = 40.7128 # New York City latitude\n",
" longitude = -74.0060 # New York City longitude\n",
" distance = 1000*25 # 1000 meters\n",
"\n",
" result = calculate_distant_points(latitude, longitude, distance)\n",
" print(f\"Original point: ({latitude}, {longitude})\")\n",
" print(f\"Point 1000m east: ({result[0][0]:.6f}, {result[0][1]:.6f})\")\n",
" print(f\"Point 1000m north: ({result[1][0]:.6f}, {result[1][1]:.6f})\")"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Bottom Left: (40.7128, -74.006)\n",
"Top Left: (40.93763040147969, -74.006)\n",
"Bottom Right: (40.7128, -73.7093855252233)\n",
"Top Right: (40.93763040147969, -73.7093855252233)\n"
]
}
],
"source": [
"bottom_left_latitude = 40.7128\n",
"bottom_left_longitude = -74.0060\n",
"\n",
"result = calculate_distant_points(bottom_left_latitude, bottom_left_longitude, 1000*25)\n",
"\n",
"top_left_latitude = result[1][0]\n",
"top_left_longitude = result[1][1]\n",
"\n",
"bottom_right_latitude = result[0][0]\n",
"bottom_right_longitude = result[0][1]\n",
"\n",
"top_right_latitude = top_left_latitude\n",
"top_right_longitude = bottom_right_longitude\n",
"\n",
"print(f\"Bottom Left: ({bottom_left_latitude}, {bottom_left_longitude})\")\n",
"print(f\"Top Left: ({top_left_latitude}, {top_left_longitude})\")\n",
"print(f\"Bottom Right: ({bottom_right_latitude}, {bottom_right_longitude})\")\n",
"print(f\"Top Right: ({top_right_latitude}, {top_right_longitude})\")"
]
},
{
"cell_type": "code",
"execution_count": 71,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(0.008993216059187433, 0.01186457899106813)"
]
},
"execution_count": 71,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"latitude_shift = top_left_latitude - bottom_left_latitude\n",
"longitude_shift = bottom_right_longitude - bottom_left_longitude\n",
"\n",
"latitude_unit = latitude_shift / 25\n",
"longitude_unit = longitude_shift / 25\n",
"\n",
"latitude_unit, longitude_unit"
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [],
"source": [
"## 2d map grid (0,0) --> bottom left\n",
"\n",
"def create_map_grid(bottom_left: Tuple[float, float], top_right: Tuple[float, float], rows: int, cols: int) -> List[List[Tuple[float, float]]]:\n",
" grid = []\n",
" lat_unit = (top_right[0] - bottom_left[0]) / rows\n",
" lon_unit = (top_right[1] - bottom_left[1]) / cols\n",
" \n",
" for i in range(rows):\n",
" row = []\n",
" for j in range(cols):\n",
" lat = bottom_left[0] + i * lat_unit\n",
" lon = bottom_left[1] + j * lon_unit\n",
" lat = lat + lat_unit / 2\n",
" lon = lon + lon_unit / 2\n",
" row.append((lat, lon))\n",
" grid.append(row)\n",
" \n",
" return grid"
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {},
"outputs": [],
"source": [
"grid = create_map_grid((bottom_left_latitude, bottom_left_longitude), (top_right_latitude, top_right_longitude), 25, 25)"
]
},
{
"cell_type": "code",
"execution_count": 108,
"metadata": {},
"outputs": [],
"source": [
"grid_dataset = []\n",
"for i, row in enumerate(grid):\n",
" for j, point in enumerate(row):\n",
" \n",
" grid_row = {\"row\": i, \"col\": j, \"latitude\": point[0], \"longitude\": point[1]}\n",
" grid_dataset.append(grid_row)\n",
"\n",
"grid_df = pd.DataFrame(grid_dataset)"
]
},
{
"cell_type": "code",
"execution_count": 83,
"metadata": {},
"outputs": [],
"source": [
"left_lat = 18.889833\n",
"left_lon = 72.779844"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"res1 = calculate_distant_points(left_lat, left_lon, 1000*35)\n",
"\n",
"right_lat = res1[1][0]\n",
"right_lon = res1[0][1]"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [],
"source": [
"grid = create_map_grid((left_lat, left_lon), (right_lat, right_lon), 35, 35)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"grid_dataset = []\n",
"for i, row in enumerate(grid):\n",
" for j, point in enumerate(row):\n",
" grid_row = {\"row\": i, \"col\": j, \"latitude\": point[0], \"longitude\": point[1]}\n",
" grid_dataset.append(grid_row)\n",
"\n",
"grid_df = pd.DataFrame(grid_dataset)\n",
"grid_df.head(25)"
]
},
{
"cell_type": "code",
"execution_count": 106,
"metadata": {},
"outputs": [],
"source": [
"## entire pipeline\n",
"\n",
"left_lat = 18.889833\n",
"left_lon = 72.779844\n",
"dist = 35\n",
"\n",
"res1 = calculate_distant_points(left_lat, left_lon, 1000*dist)\n",
"\n",
"right_lat = res1[1][0]\n",
"right_lon = res1[0][1]\n",
"grid = create_map_grid((left_lat, left_lon), (right_lat, right_lon), dist, dist)\n",
"\n",
"grid_dataset = []\n",
"for i, row in enumerate(grid):\n",
" for j, point in enumerate(row):\n",
" result_df = get_osm_data(point[0], point[1], 710)\n",
" # print(result_df.head(3))\n",
" labelled_df = result_df[result_df['Location Type'] != 'Other']\n",
" labelled_df = labelled_df[labelled_df['Location Type'] != 'Religious']\n",
" labelled_df = labelled_df[labelled_df['Location Type'] != 'Transportation']\n",
" loc_types = []\n",
" for row in labelled_df.iterrows():\n",
" loc_type = (row[1]['Location Name'], row[1]['Location Type'])\n",
" if loc_type not in loc_types:\n",
" loc_types.append(loc_type)\n",
"\n",
" labelled_df = pd.DataFrame(loc_types, columns=['Location Name', 'Location Type'])\n",
"\n",
" row_of_dataset = ''\n",
"\n",
" for row in labelled_df.iterrows():\n",
" row_text = row[1]['Location Name'] + ' is a ' + row[1]['Location Type']\n",
" row_of_dataset += row_text + '; '\n",
" ## replacing any coma in the text with a blank space\n",
"\n",
" row_of_dataset = row_of_dataset.replace(',', ' ')\n",
" \n",
" grid_row = {\"row\": i, \"col\": j, \"latitude\": point[0], \"longitude\": point[1], \"Map Data\": row_of_dataset}\n",
" grid_dataset.append(grid_row)\n",
"\n",
"grid_df = pd.DataFrame(grid_dataset)\n",
"grid_df.to_csv('MMR_DATASET.csv', index=False)"
]
},
{
"cell_type": "code",
"execution_count": 107,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" row | \n",
" col | \n",
" latitude | \n",
" longitude | \n",
" Map Data | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 18.894330 | \n",
" 72.784597 | \n",
" | \n",
"
\n",
" \n",
" 1 | \n",
" 0 | \n",
" 1 | \n",
" 18.894330 | \n",
" 72.794102 | \n",
" Prongs Reef is a Natural, | \n",
"
\n",
" \n",
" 2 | \n",
" 0 | \n",
" 2 | \n",
" 18.894330 | \n",
" 72.803607 | \n",
" United Services Club Golf Course is a Leisure ... | \n",
"
\n",
" \n",
" 3 | \n",
" 0 | \n",
" 3 | \n",
" 18.894330 | \n",
" 72.813112 | \n",
" Indian Meterological Department is a Commercia... | \n",
"
\n",
" \n",
" 4 | \n",
" 1 | \n",
" 0 | \n",
" 18.903323 | \n",
" 72.784597 | \n",
" | \n",
"
\n",
" \n",
" 5 | \n",
" 1 | \n",
" 1 | \n",
" 18.903323 | \n",
" 72.794102 | \n",
" | \n",
"
\n",
" \n",
" 6 | \n",
" 1 | \n",
" 2 | \n",
" 18.903323 | \n",
" 72.803607 | \n",
" Jagadish Canteen is a Food & Drink, Maratha St... | \n",
"
\n",
" \n",
" 7 | \n",
" 1 | \n",
" 3 | \n",
" 18.903323 | \n",
" 72.813112 | \n",
" Indian Meterological Department is a Commercia... | \n",
"
\n",
" \n",
" 8 | \n",
" 2 | \n",
" 0 | \n",
" 18.912316 | \n",
" 72.784597 | \n",
" | \n",
"
\n",
" \n",
" 9 | \n",
" 2 | \n",
" 1 | \n",
" 18.912316 | \n",
" 72.794102 | \n",
" | \n",
"
\n",
" \n",
" 10 | \n",
" 2 | \n",
" 2 | \n",
" 18.912316 | \n",
" 72.803607 | \n",
" Jagadish Canteen is a Food & Drink, Maratha St... | \n",
"
\n",
" \n",
" 11 | \n",
" 2 | \n",
" 3 | \n",
" 18.912316 | \n",
" 72.813112 | \n",
" Cafe Coffee Day is a Food & Drink, King Plaza ... | \n",
"
\n",
" \n",
" 12 | \n",
" 3 | \n",
" 0 | \n",
" 18.921309 | \n",
" 72.784597 | \n",
" | \n",
"
\n",
" \n",
" 13 | \n",
" 3 | \n",
" 1 | \n",
" 18.921309 | \n",
" 72.794102 | \n",
" | \n",
"
\n",
" \n",
" 14 | \n",
" 3 | \n",
" 2 | \n",
" 18.921309 | \n",
" 72.803607 | \n",
" | \n",
"
\n",
" \n",
" 15 | \n",
" 3 | \n",
" 3 | \n",
" 18.921309 | \n",
" 72.813112 | \n",
" Cafe Coffee Day is a Food & Drink, King Plaza ... | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" row col latitude longitude \\\n",
"0 0 0 18.894330 72.784597 \n",
"1 0 1 18.894330 72.794102 \n",
"2 0 2 18.894330 72.803607 \n",
"3 0 3 18.894330 72.813112 \n",
"4 1 0 18.903323 72.784597 \n",
"5 1 1 18.903323 72.794102 \n",
"6 1 2 18.903323 72.803607 \n",
"7 1 3 18.903323 72.813112 \n",
"8 2 0 18.912316 72.784597 \n",
"9 2 1 18.912316 72.794102 \n",
"10 2 2 18.912316 72.803607 \n",
"11 2 3 18.912316 72.813112 \n",
"12 3 0 18.921309 72.784597 \n",
"13 3 1 18.921309 72.794102 \n",
"14 3 2 18.921309 72.803607 \n",
"15 3 3 18.921309 72.813112 \n",
"\n",
" Map Data \n",
"0 \n",
"1 Prongs Reef is a Natural, \n",
"2 United Services Club Golf Course is a Leisure ... \n",
"3 Indian Meterological Department is a Commercia... \n",
"4 \n",
"5 \n",
"6 Jagadish Canteen is a Food & Drink, Maratha St... \n",
"7 Indian Meterological Department is a Commercia... \n",
"8 \n",
"9 \n",
"10 Jagadish Canteen is a Food & Drink, Maratha St... \n",
"11 Cafe Coffee Day is a Food & Drink, King Plaza ... \n",
"12 \n",
"13 \n",
"14 \n",
"15 Cafe Coffee Day is a Food & Drink, King Plaza ... "
]
},
"execution_count": 107,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"grid_df.head(20)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}