Parse and step thru large json

I have an large json that i want to step thru each validdate and find min and max values for t.
Is this possible in a rule?

part of json

{"approvedTime":"2017-02-28T22:11:03Z","referenceTime":"2017-02-28T20:00:00Z","geometry":{"type":"Point","coordinates":[[17.122390,60.638639]]},"timeSeries":[{"validTime":"2017-02-28T21:00:00Z","parameters":[{"name":"msl","levelType":"hmsl","level":0,"unit":"hPa","values":[986]},{"name":"t","levelType":"hl","level":2,"unit":"Cel","values":[2.3]},{"name":"vis","levelType":"hl","level":2,"unit":"km","values":[3.0]},{"name":"wd","levelType":"hl","level":10,"unit":"degree","values":[174]},{"name":"ws","levelType":"hl","level":10,"unit":"m/s","values":[2.0]},{"name":"r","levelType":"hl","level":2,"unit":"percent","values":[97]},{"name":"tstm","levelType":"hl","level":0,"unit":"percent","values":[3]},{"name":"tcc_mean","levelType":"hl","level":0,"unit":"octas","values":[8]},{"name":"lcc_mean","levelType":"hl","level":0,"unit":"octas","values":[7]},{"name":"mcc_mean","levelType":"hl","level":0,"unit":"octas","values":[0]},{"name":"hcc_mean","levelType":"hl","level":0,"unit":"octas","values":[7]},{"name":"gust","levelType":"hl","level":10,"unit":"m/s","values":[2.8]},{"name":"pmin","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmax","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"spp","levelType":"hl","level":0,"unit":"percent","values":[-9]},{"name":"pcat","levelType":"hl","level":0,"unit":"category","values":[0]},{"name":"pmean","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmedian","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"Wsymb","levelType":"hl","level":0,"unit":"category","values":[6]}]},{"validTime":"2017-02-28T22:00:00Z","parameters":[{"name":"msl","levelType":"hmsl","level":0,"unit":"hPa","values":[986]},{"name":"t","levelType":"hl","level":2,"unit":"Cel","values":[0.6]},{"name":"vis","levelType":"hl","level":2,"unit":"km","values":[2.0]},{"name":"wd","levelType":"hl","level":10,"unit":"degree","values":[201]},{"name":"ws","levelType":"hl","level":10,"unit":"m/s","values":[2.1]},{"name":"r","levelType":"hl","level":2,"unit":"percent","values":[98]},{"name":"tstm","levelType":"hl","level":0,"unit":"percent","values":[2]},{"name":"tcc_mean","levelType":"hl","level":0,"unit":"octas","values":[3]},{"name":"lcc_mean","levelType":"hl","level":0,"unit":"octas","values":[4]},{"name":"mcc_mean","levelType":"hl","level":0,"unit":"octas","values":[0]},{"name":"hcc_mean","levelType":"hl","level":0,"unit":"octas","values":[2]},{"name":"gust","levelType":"hl","level":10,"unit":"m/s","values":[4.2]},{"name":"pmin","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmax","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"spp","levelType":"hl","level":0,"unit":"percent","values":[-9]},{"name":"pcat","levelType":"hl","level":0,"unit":"category","values":[0]},{"name":"pmean","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmedian","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"Wsymb","levelType":"hl","level":0,"unit":"category","values":[2]}]},{"validTime":"2017-02-28T23:00:00Z","parameters":[{"name":"msl","levelType":"hmsl","level":0,"unit":"hPa","values":[986]},{"name":"t","levelType":"hl","level":2,"unit":"Cel","values":[-0.8]},{"name":"vis","levelType":"hl","level":2,"unit":"km","values":[2.0]},{"name":"wd","levelType":"hl","level":10,"unit":"degree","values":[189]},{"name":"ws","levelType":"hl","level":10,"unit":"m/s","values":[2.5]},{"name":"r","levelType":"hl","level":2,"unit":"percent","values":[98]},{"name":"tstm","levelType":"hl","level":0,"unit":"percent","values":[3]},{"name":"tcc_mean","levelType":"hl","level":0,"unit":"octas","values":[4]},{"name":"lcc_mean","levelType":"hl","level":0,"unit":"octas","values":[3]},{"name":"mcc_mean","levelType":"hl","level":0,"unit":"octas","values":[3]},{"name":"hcc_mean","levelType":"hl","level":0,"unit":"octas","values":[4]},{"name":"gust","levelType":"hl","level":10,"unit":"m/s","values":[4.6]},{"name":"pmin","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmax","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"spp","levelType":"hl","level":0,"unit":"percent","values":[-9]},{"name":"pcat","levelType":"hl","level":0,"unit":"category","values":[0]},{"name":"pmean","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"pmedian","levelType":"hl","level":0,"unit":"kg/m2/h","values":[0.0]},{"name":"Wsymb","levelType":"hl","level":0,"unit":"category","values":[3]}]},

/Mike

I solved it in a Python script.

Is this possible to do direct in a rule?

import sys
from urllib2 import urlopen
import json
import datetime

date = str(datetime.datetime.now())[:10]
date1 = str(datetime.datetime.now() + datetime.timedelta(days=1))[:10]
date2 = str(datetime.datetime.now() + datetime.timedelta(days=2))[:10]
url = 'http://opendata-download-metfcst.smhi.se/api/category/pmp2g/version/2/geotype/point/lon/17.11934/lat/60.65622/data.json'
response = urlopen(url)
#response = sys.argv[1]
json_obj = json.load(response)

tmax=tmax1=tmax2=-40
tmin=tmin1=tmin2=40

for ts in json_obj['timeSeries']:

	if date in ts['validTime']:
	
		if ts['parameters'][1]['values'][0] > tmax:
			tmax=ts['parameters'][1]['values'][0]
		if ts['parameters'][1]['values'][0] < tmin:
			tmin=ts['parameters'][1]['values'][0]
	
	if date1 in ts['validTime']:
		if ts['parameters'][1]['values'][0] > tmax1:
			tmax1=ts['parameters'][1]['values'][0]
		if ts['parameters'][1]['values'][0] < tmin1:
			tmin1=ts['parameters'][1]['values'][0]

	if date2 in ts['validTime']:
		if ts['parameters'][1]['values'][0] > tmax2:
			tmax2=ts['parameters'][1]['values'][0]
		if ts['parameters'][1]['values'][0] < tmin2:
			tmin2=ts['parameters'][1]['values'][0]

print '{"tmax": "' + str(tmax) + '","tmin": "' + str(tmin) + '","tmax1": "' + str(tmax1) + '","tmin1": "' + str(tmin1) + '","tmax2": "' + str(tmax2) + '","tmin2": "' + str(tmin2)+ '"}'

/Mike