Hello fellows! , I am new to using the sentinel dataspace ecosystem, I am using senitnelHub API to request and download images in tiff format. I have made a basic website where I select polygon geometry, date range, and cloud cover and then search for images of sentinel 2l2a satellite. I want nvdi in goetiff format with georeferenced images which can be used in GIS software. When I download the image and use it in GIS software, it says the image is ‘out of place’. This is probably because the image does have I think defined coordinate system. Here is my code.
def get_sentinel_images(field_polygon, start_date, end_date, cloud_cover, selected_area):
area_in_meters =selected_area
# configuration
config = get_config()
# search for the catalogue
geometry = Geometry(geometry=field_polygon, crs=CRS.WGS84)
time_interval = start_date, end_date
catalog = SentinelHubCatalog(config=config)
search_iterator = catalog.search(
DataCollection.SENTINEL2_L2A,
time=time_interval,
filter="eo:cloud_cover < {}".format(cloud_cover),
fields={"include": ["id", "properties.datetime", "properties.eo:cloud_cover"], "exclude": []},
geometry=geometry,
)
results = list(search_iterator)
print("Total number of results:", len(results))
# separating the unique acquisitions
time_difference = dt.timedelta(hours=0)
all_timestamps = search_iterator.get_timestamps()
unique_acquisitions = filter_times(all_timestamps, time_difference)
print("Total number of unique acquisitions:", len(unique_acquisitions))
# make a request to process API for these acquisitions
true_color_evalscript = """
//VERSION=3
function setup() {
return {
input: ["B02", "B03", "B04"],
output: {
bands: 3,
sampleType: "AUTO", // default value - scales the output values from [0,1] to [0,255].
},
}
}
function evaluatePixel(sample) {
return [2.5 * sample.B04, 2.5 * sample.B03, 2.5 * sample.B02]
}
"""
nvdi_evalscript = """
//VERSION=3
function setup() {
return {
input: ["B04", "B08", "SCL", "dataMask"],
output: [
{ id: "default", bands: 4 },
{ id: "index", bands: 1, sampleType: "FLOAT32" },
{ id: "eobrowserStats", bands: 2, sampleType: "FLOAT32" },
{ id: "dataMask", bands: 1 },
],
};
}
function evaluatePixel(samples) {
let val = index(samples.B08, samples.B04);
let imgVals = null;
// The library for tiffs works well only if there is only one channel returned.
// So we encode the "no data" as NaN here and ignore NaNs on frontend.
const indexVal = samples.dataMask === 1 ? val : NaN;
if (val < -0.5) imgVals = [0.05, 0.05, 0.05, samples.dataMask];
else if (val < -0.2) imgVals = [0.75, 0.75, 0.75, samples.dataMask];
else if (val < -0.1) imgVals = [0.86, 0.86, 0.86, samples.dataMask];
else if (val < 0) imgVals = [0.92, 0.92, 0.92, samples.dataMask];
else if (val < 0.025) imgVals = [1, 0.98, 0.8, samples.dataMask];
else if (val < 0.05) imgVals = [0.93, 0.91, 0.71, samples.dataMask];
else if (val < 0.075) imgVals = [0.87, 0.85, 0.61, samples.dataMask];
else if (val < 0.1) imgVals = [0.8, 0.78, 0.51, samples.dataMask];
else if (val < 0.125) imgVals = [0.74, 0.72, 0.42, samples.dataMask];
else if (val < 0.15) imgVals = [0.69, 0.76, 0.38, samples.dataMask];
else if (val < 0.175) imgVals = [0.64, 0.8, 0.35, samples.dataMask];
else if (val < 0.2) imgVals = [0.57, 0.75, 0.32, samples.dataMask];
else if (val < 0.25) imgVals = [0.5, 0.7, 0.28, samples.dataMask];
else if (val < 0.3) imgVals = [0.44, 0.64, 0.25, samples.dataMask];
else if (val < 0.35) imgVals = [0.38, 0.59, 0.21, samples.dataMask];
else if (val < 0.4) imgVals = [0.31, 0.54, 0.18, samples.dataMask];
else if (val < 0.45) imgVals = [0.25, 0.49, 0.14, samples.dataMask];
else if (val < 0.5) imgVals = [0.19, 0.43, 0.11, samples.dataMask];
else if (val < 0.55) imgVals = [0.13, 0.38, 0.07, samples.dataMask];
else if (val < 0.6) imgVals = [0.06, 0.33, 0.04, samples.dataMask];
else imgVals = [0, 0.27, 0, samples.dataMask];
return {
default: imgVals,
index: [indexVal],
eobrowserStats: [val, isCloud(samples.SCL) ? 1 : 0],
dataMask: [samples.dataMask],
};
}
function isCloud(scl) {
if (scl == 3) {
// SC_CLOUD_SHADOW
return false;
} else if (scl == 9) {
// SC_CLOUD_HIGH_PROBA
return true;
} else if (scl == 8) {
// SC_CLOUD_MEDIUM_PROBA
return true;
} else if (scl == 7) {
// SC_CLOUD_LOW_PROBA
return false;
} else if (scl == 10) {
// SC_THIN_CIRRUS
return true;
} else if (scl == 11) {
// SC_SNOW_ICE
return false;
} else if (scl == 1) {
// SC_SATURATED_DEFECTIVE
return false;
} else if (scl == 2) {
// SC_DARK_FEATURE_SHADOW
return false;
}
return false;
}
"""
nvdi_process_requests = []
process_requests = []
desired_resolution = 1500
# Calculate the size in pixels
size_in_pixels = int(area_in_meters / desired_resolution)
max_dimension = 2500
if size_in_pixels > max_dimension:
size_in_pixels = max_dimension
for timestamp in unique_acquisitions:
request = SentinelHubRequest(
evalscript=true_color_evalscript,
input_data=[
SentinelHubRequest.input_data(
data_collection=DataCollection.SENTINEL2_L2A.define_from("s2l2a", service_url=config.sh_base_url),
time_interval=(timestamp - time_difference, timestamp + time_difference),
)
],
responses=[SentinelHubRequest.output_response("default", MimeType.PNG)],
geometry=geometry,
size=(size_in_pixels, size_in_pixels),
config=config,
)
process_requests.append(request)
print("requesting nvdi images ...")
# repeating the request but for nvdi images
for timestamp in unique_acquisitions:
request = SentinelHubRequest(
data_folder='data',
evalscript=nvdi_evalscript,
input_data=[
SentinelHubRequest.input_data(
data_collection=DataCollection.SENTINEL2_L2A.define_from("s2l2a", service_url=config.sh_base_url),
time_interval=(timestamp - time_difference, timestamp + time_difference),
)
],
responses=[SentinelHubRequest.output_response("default", MimeType.TIFF)],
geometry=geometry,
size=(size_in_pixels, size_in_pixels),
config=config,
)
nvdi_process_requests.append(request)
nvdi_process_requests.get_data(save_data=True)
# download the data
client = SentinelHubDownloadClient(config=config)
download_requests = [request.download_list[0] for request in process_requests]
data = client.download(download_requests)
images = convert_ndarray_to_base64(data)
return images, nvdi_process_requests
from io import BytesIO
from PIL import Image
import base64
def convert_ndarray_to_base64(ndarray_images):
base64_images = []
for img_array in ndarray_images:
img = Image.fromarray(img_array)
with BytesIO() as buffer:
img.save(buffer, format="PNG")
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
base64_images.append(img_str)
return base64_images
# the user clicks on 'get nvdi' button
# the api returns the index of the image from the list of images
# from the index of index , we will retrieve its request
# and download the image
def get_image_from_index(index, download_requests):
config = get_config()
print("download_requests .........." , download_requests)
print('index ..................', index)
if download_requests.__len__() == 0:
return IndexError("No download requests")
download_request = download_requests[index]
print("download_request .........." , download_request)
client = SentinelHubDownloadClient(config=config)
data = client.download([download_request.download_list[0]])
image_base64 = convert_ndarray_to_base64(data)
return image_base64
If you can help me where am i going wrong, I am very thankful to you. I have read the docs and read the questions from this forum but could not find the solution that could properly fit into the solution.