. ** ** url.class - URL parsing, retrieval and caching functions */ class it_url { /* E.g. HTTP://falcon:joshua@www.Relog.CH.:80/default.asp */ var $url; /* E.g. http://www.relog.ch/ */ var $protocol; /* E.g. http */ var $hostname; /* E.g. relog.ch */ var $realhostname; /* E.g. www.relog.ch */ var $port; /* E.g. 80 */ var $explicitport; /* E.g. :80, explicitly set in rawurl */ var $path; /* E.g. / */ var $rawurl; /* E.g. HTTP://falcon:joshua@www.Relog.CH.:80/default.asp */ var $user; /* E.g. falcon */ var $pass; /* E.g. joshua */ var $cookies; /* key => values of cookies from server */ var $headers; /* Headers of page fetched by get() */ var $data; /* Data part, even if return code is not 200 */ var $result; /* Return code of get() */ var $redir = 0; /* Redirect count */ var $header; /* http header */ var $errstr; /* request error string */ /** * Constructor: canonicalize an URL * @param $url URL this object represents */ function it_url($url = null) { $this->rawurl = $url; $comp = parse_url($url); $this->protocol = strtolower($comp['scheme']) ?: "http"; $protoport = $this->protocol == 'https' ? 443 : 80; # port according to protocol $this->port = intval($comp['port'] ?: $protoport); # this is set even in default case $this->explicitport = $comp['port'] ? ':' . $comp['port'] : ''; # only set if explicitly specified in url, contains leading : $this->user = $comp['user']; $this->pass = $comp['pass']; $this->realhostname = strtolower($comp['host']); $this->hostname = preg_replace('/^www\./', '', $this->realhostname); $this->path = ltrim($comp['path'] . ($comp['query'] ? '?' . $comp['query'] : ''), '/'); # $this->path is named poorly, it includes path and query $this->url = "$this->protocol://$this->realhostname" . ($this->port != $protoport ? $this->explicitport : '') . "/$this->path"; $this->realhostname = idn_to_ascii($this->realhostname) ?: $this->realhostname; # punycode or original } /** * Check if a given url (currently http:port80-only) can be fetched * Note: Redirects are treated as succesful * $timeout Timeout for connection in seconds * @return true if url could be fetched */ function is_reachable($timeout = 5) { $result = false; if ($fp = @fsockopen($this->realhostname, $this->port, $dummy_errno, $dummy_errstr, $timeout)) { fputs($fp, "GET /$this->path HTTP/1.0\r\nHost: $this->realhostname\r\nUser-Agent: ITools\r\n\r\n"); $line = fgets($fp, 128); fclose($fp); #debug("it_url::is_reachable($this->rawurl: $line"); $result = preg_match("#^$this->protocol/[^ ]+ +[23]#i", $line); } return $result; } /** * Get simple URL with timeout and one retry. Can be called statically. Times out, calls it::error for all errs * * If the protocol is not http, only features of get_multi are supported. * * @param $p parameter array with the following keys * @param $p['url'] url to get, defaults to constructor URL * @param $p['headers'] optional associative array of HTTP headers to send * @param $p['safety'] DEPRECATED. 0 = ignore errors, 1 = errors, 2 = fatals * @param $p['it_error'] extra arguments for it_error or false to ignore errors * @param $p['timeout'] timeout per read in seconds, defaults to 5. fractions allowed. silent, see $p['safety'] * @param $p['totaltimeout'] timeout for the whole function call * @param $p['maxlength'] maximum length of response * @param $p['filemtime'] Add HTTP header to only fetch when newer than this, otherwise return true instead of data * @param $p['data'] POST data array with key-value pairs * @param $p['files'] [fieldname => filename] of files to upload * @param $p['retries'] Number of retries if download fails, default 1 * @param $p['retrysleep'] Number of seconds to wait before retry, fractions ok * @param $p['compression'] use compression (uses curl to do that) * @return contents of resulting page, considering redirects, excluding headers, or false on error */ function get($p=null, $timeout=5) { if (!is_int($timeout)) it::error("Wrong value for second argument of it_url::get()!"); if (!is_array($p)) $p = array('url' => $p, 'timeout' => $timeout); $p += array('retries' => 1); if (($filter = EDC('req')) && ($filter == 1 || strstr($p['url'], "/$filter."))) ED($p['url']); if (isset($this) && $this instanceof it_url) { $url = $this; if ($p['url']) $this->it_url($p['url']); } else # called statically $url = new it_url($p['url']); $result = $url->request($p + ['followlocation' => true]); if (!$result && $p['retries'] > 0 && $url->result < 400) { usleep($p['retrysleep']*1000000); $result = $url->get(array('retries' => $p['retries'] - 1) + $p); } if (($filter = EDC('res')) && strstr($p['url'], it::replace(array('1' => ":"), $filter))) ED($result); return $result; } function parse_http_header($header) { foreach (explode("\n", trim($header)) as $line) { $line = trim($line); if (preg_match('#^(HTTP)\S+\s(\d+)#', $line, $parts)) # Parse result code $this->headers[$parts[1]] = $this->result = $parts[2]; elseif (preg_match('#^([^:]+): (.*)$#', $line, $parts)) $this->headers[$parts[1]] = $parts[2]; if ($parts[1] == 'Set-Cookie' && preg_match('/^([^=]+)=([^;]*)/', $parts[2], $cookie)) $this->cookies[$cookie[1]] = $cookie[2]; } } static function _default_headers($url, $p) { $headers = [ 'Host' => $url->realhostname . $url->explicitport, 'User-Agent' => "Mozilla/5.0 (compatible; MSIE 9.0; ITools)", 'Accept-Language' => $p['headers']['Accept-Language'] ?: T_lang(), # can prevent loading of it_text 'Referer' => it::match('([-\w]+\.\w+)$', $url->hostname) == it::match('([-\w]+\.\w+)$', $_SERVER['HTTP_HOST']) ? it_url::absolute(U($_GET)) : null, 'X-Ultra-Https' => $_SERVER['HTTPS'], ]; if (is_int($p['filemtime'])) $headers['If-Modified-Since'] = date("r", $p['filemtime']); return $headers; } static function curl_opts($p=array()) { $p += array('totaltimeout' => "999999", 'timeout' => 5, 'followlocation' => true); $add = []; foreach ($p['headers'] as $header => $value) $headers[] = "$header: $value"; if ($p['maxlength']) { $maxlength = $p['maxlength']; $add += [ #CURLOPT_BUFFERSIZE => 1024 * 1024 * 10, CURLOPT_NOPROGRESS => false, CURLOPT_PROGRESSFUNCTION => function ($dummy0, $dummy1, $size, $dummy2, $dummy3) use ($maxlength) { return $size < $maxlength ? 0 : 1; }, ]; } # file upload foreach ((array)$p['files'] as $field => $filename) $p['data'][$field] = new CURLFile($filename, mime_content_type($filename)); if ($p['data']) $add += [ CURLOPT_POSTFIELDS => $p['data'] ]; if ($p['compression']) $add += [ CURLOPT_ENCODING => "" ]; if ($p['pass'] || $p['user']) $add += [ CURLOPT_HTTPAUTH => CURLAUTH_BASIC, CURLOPT_USERPWD => $p['user'] . ':' . $p['pass'] ]; if ($p['verbose'] || EDC('curlverbose')) $add += [ CURLOPT_VERBOSE => true ]; return $add + [ CURLOPT_HEADER => false, CURLOPT_RETURNTRANSFER => true, CURLOPT_TIMEOUT => $p['totaltimeout'], CURLOPT_LOW_SPEED_LIMIT => 5, CURLOPT_LOW_SPEED_TIME => $p['timeout'], CURLOPT_FOLLOWLOCATION => $p['followlocation'], CURLOPT_HTTPHEADER => $headers, CURLOPT_CUSTOMREQUEST => $p['method'] ?: null, CURLOPT_NOBODY => $p['method'] == 'HEAD', CURLOPT_SAFE_UPLOAD => true, # disable special meaning of @value in POST forms (security) CURLOPT_ACCEPT_ENCODING => "", # set Header to accept any supported encoding and enable automatic decompression CURLOPT_CAPATH => '/etc/ssl/certs/', CURLOPT_SSL_VERIFYPEER => !$p['allow_insecure_ssl'], CURLOPT_SSL_VERIFYHOST => $p['allow_insecure_ssl'] ? 0 : 2, ]; } /* * drop in replacement for request using curl * * @param $p['data'] POST data array with key-value pairs * @param $p['files'] [fieldname => filename] of files to upload * @param $p['method'] different HTTP method */ function request($p=array()) { $url = $this; if ($p['url']) $this->it_url($p['url']); $url->headers = array(); $p['headers'] = (array)$p['headers'] + self::_default_headers($url, $p); $opts = array(CURLOPT_HEADER => 1) + self::curl_opts($p + array('user' => $this->user, 'pass' => $this->pass, 'followlocation' => false)); $curl = curl_init($url->url); curl_setopt_array($curl, $opts); $got = curl_exec($curl); EDC('curlinfo', curl_getinfo($curl)); if ($got) { $headersize = curl_getinfo($curl, CURLINFO_HEADER_SIZE); $url->header = array_slice(explode("\r\n\r\n", trim(substr($got, 0, $headersize))), -1)[0] . "\r\n\r\n"; $url->data = substr($got, $headersize); $url->parse_http_header($url->header); if ($p['maxlength'] && (strlen($this->data) > $p['maxlength'])) { $result = $this->result = false; $this->errstr = "maxlength reached"; } else if ($p['filemtime'] && ($url->result == 304)) { $result = true; # Not modified, success but no data } else { $result =& $url->data; } } else { $result = $this->result = false; $this->errstr = curl_error($curl); $this->curlinfo = curl_getinfo($curl); } if ($got === false && $p['retries'] <= 0) it::error((array)$p['it_error'] + ['title' => "problem getting $url->url with curl: " . curl_error($curl), 'body' => curl_getinfo($curl)]); return $result; } /** * Get multiple URL in parallel with timeout. Needs to be called statically * @param $p parameter array with the following keys (same as it_url::get) * @param $p['urls']: array of urls to get * @param $p['timeout']: timeout per read in seconds, defaults to 5. (TODO: fractions allowed?) * @param $p['totaltimeout']: timeout for the whole function call * @param $p['headers']: optional array of HTTP headers to send * @return array of contents (or false for errors like timesou) of resulting page using same * keys as the urls input array, considering redirects, excluding headers */ function get_multi($p=null) { $p += array('retries' => 1); $url = new it_url; $p['headers'] = (array)$p['headers'] + array_diff_key(self::_default_headers($url, $p), ['Host' => null]); $opts = self::curl_opts($p); $mh = curl_multi_init(); $urls = array(); foreach ($p['urls'] as $key => $url) $urls[$key] = is_array($url) ? $url : array('url' => $url); $keys = $handles = $retries = []; $addhandle = function ($key) use (&$keys, &$handles, $urls, $opts, $mh) { $handle = curl_init(); curl_setopt($handle, CURLOPT_URL, it::replace([ '^//' => "http://" ], $urls[$key]['url'])); curl_setopt_array($handle, $opts); curl_multi_add_handle($mh, $handle); $keys[$handle] = $key; $handles[$key] = $handle; }; $closehandle = function ($key) use (&$keys, &$handles, $mh) { curl_multi_remove_handle($mh, $handles[$key]); curl_close($handles[$key]); unset($keys[$handles[$key]]); unset($handles[$key]); }; foreach ($urls as $key => $dummy) { $addhandle($key); $retries[$key] = 0; } $start = microtime(true); # curl_multi loop copied from example at http://php.net/manual/en/function.curl-multi-exec.php $active = null; do { $mrc = curl_multi_exec($mh, $active); } while ($mrc == CURLM_CALL_MULTI_PERFORM); $timeout = 0.001; # Very short timeout to work around problem with first select call on cURL 7.25.0 while (!$abort && $active && $mrc == CURLM_OK) { if (curl_multi_select($mh, $timeout) == -1) usleep($timeout * 1000000); do { $mrc = curl_multi_exec($mh, $active); while (($info = curl_multi_info_read($mh)) !== false) { if ($info['msg'] == CURLMSG_DONE) { $key = $keys[$info['handle']]; $content = curl_multi_getcontent($info['handle']); $closehandle($key); EDC('reqtimings', $key, $info['result'], (microtime(true) - $start) * 1000); if ($info['result'] == CURLE_OK) $results_unordered[$key] = $content; else if($retries[$key]++ < $p['retries']) $addhandle($key); else $results_unordered[$key] = false; if (($handler = $urls[$keys[$info['handle']]]['handler'])) $abort = $handler($info['result'], $results_unordered[$key]); } } } while ($mrc == CURLM_CALL_MULTI_PERFORM); $timeout = 0.1; # Longer delay to avoid busy loop but shorter than default of 1s in case we stil hit cURL 7.25.0 problem } foreach ($handles as $key => $dummy) $closehandle($key); curl_multi_close($mh); return it::filter_keys($results_unordered, array_keys($urls), ['reorder' => true]); } /** * Construct a local directory name to cache an URL. Named args: * @param $p['cachedir'] directory to store cache files in, defaults to $ULTRAHOME/var/urlcache * @param $p['id'] If you need more than one type of cache (e.g. different maxage) you can specify an id */ static function get_cache_dir($p) { $p += array('cachedir' => $GLOBALS['ULTRAHOME'] . "/var/urlcache", 'id' => "default"); return rtrim($p['cachedir'] . "/" . $p['id'], "/"); } /** * Construct a local file name to cache an URL. Takes language into account. Named args: * @param $p['url'] remote url to get * @param $p['cachedir'] directory to store cache files in, @see get_cache_dir * @param $p['cachefilename'] Use this filename instead of calculating your own if this is given * @param $p['data'] POST data array with key-value pairs * @param $p['id'] If you need more than one type of cache (e.g. different maxage) you can specify an id */ static function get_cache_filename($p) { if (!is_array($p)) $p = array('url'=>$p); $p['cachedir'] = it_url::get_cache_dir($p); $filename = $p['cachefilename'] ?: md5(T_lang() . $p['url'] . ($p['headers'] ? serialize($p['headers']) : "") . ($p['data'] ? serialize($p['data']) : "")); return $p['cachedir'] . "/" . substr($filename, 0, 2) . "/$filename"; } /** * Store contents of url in a file and return file name. Threadsafe: Provides locking. Called statically. * Requires webserver writeable directory in $p['cachdedir']. Params in associative array p: * @param $p['id'] RECOMMENDED: If you need more than one type of cache (e.g. different maxage) you should specify an id * @param $p['url'] url to get * @param $p['headers'] optional array of HTTP headers to send * @param $p['cachedir'] directory to store cache files in, @see get_cache_dir * @param $p['timeout'] timeout in seconds, default 10. fractions allowed * @param $p['maxage'] maximum age of cache entries in seconds, default 86400 * @param $p['cleanbefore'] maximum daytime when attempting cleanup, default 7200 * @param $p['preprocess'] callback function (or array for methods) to change received file or array('function' => ..., 'in' => $src, 'out' => $dst, ...) with callback function plus args * @param $p['safety'] DEPRECATED. see $p['it_error'] * @param $p['it_error'] parameters for it::error(), false means ignore errors, anything else gets passed to it::error() if errors occur * @param $p['keepfailed'] keep old versions of files if download fails (sending alerts conservatively) * @param $p['returnheaders'] Return array($path, $headers) instead of simply $path * @return Cache filename or false if fetch failed */ function get_cache($p = array()) { $p += ['timeout' => 10, 'maxage' => 86400, 'cleanbefore' => 7200, 'it_error' => $p['safety'] == 0 ? false : ($p['safety'] == 2 ? ['fatal' => true] : [])]; $p['totaltimeout'] = $p['timeout']; $path = it_url::get_cache_filename($p); # Must be before changing cachedir below $p['cachedir'] = it_url::get_cache_dir($p); @mkdir($p['cachedir']); @mkdir(dirname($path)); if (!is_writable(dirname($path))) it::error("parent dir not writable: " . trim(it::exec('ls -ld {dir} 2>&1', ['dir' => dirname($path)]))); if ($filemtime = it_url::_expired($path, $p['maxage'])) # Outdated(non-zero int) or non-existant(true)? { $fileexists = $filemtime !== true; if ($lock = it_url::_lock($path)) { # Touch existing file to prevent locking other getters while refreshing if ($fileexists) touch($path); EDC('getcache', "new", $filemtime, $p['url'], $path); $url = new it_url; if ($result = $url->get(array('it_error' => false) + $p + array('filemtime' => EDC('nocache') ? null : $filemtime))) # => true means not modified (no new data fetched) { $newfile = it_url::_atomicwrite($path, $result); if ($p['returnheaders']) it::file_put_contents("$path.headers", 'headers, true) . ";\n"); } else if ($p['keepfailed']) $result = $fileexists; else @unlink($path); # Expired and failed to get it_url::_unlock($path, $lock); } else { # Wait for file currently being transferred EDC('getcache', "wait", $p['url'], $path); $result = it_url::_waitforlockedfile($path, $p); # If file could no be fetched by other thread but exists and we are in keepfailed mode then return old file if (!$result && $p['keepfailed']) $result = $fileexists; } } else { # Get file from cache EDC('getcache', "cached", $p['url'], $path); $result = true; # Up to date } # Read headers before $path is modified for preprocessing if ($p['returnheaders']) $headers = @include("$path.headers"); if ($result && $p['preprocess']) { $srcpath = $path; $path .= substr(md5(serialize($p['preprocess'])), 0, 2); if ($filemtime = $newfile ? true : it_url::_expired($path, $p['maxage'])) # Outdated(non-zero int) or non-existant(true)? { if ($lock = it_url::_lock($path)) { # Touch existing file to prevent locking other getters while refreshing if ($filemtime !== true) touch($path); EDC('getcache', "processnew", $p['url'], $path); $dstpath = "$path.preprocesstmp"; if (is_array($p['preprocess']) && $p['preprocess']['function']) # Needs is_array as it can be a string where dereferencing gives first character! call_user_func($p['preprocess']['function'], array('in' => $srcpath, 'out' => $dstpath) + $p['preprocess']); else call_user_func($p['preprocess'], $srcpath, $dstpath); if (!($result = @filesize($dstpath) && @rename($dstpath, $path))) { @unlink($dstpath); if (!$p['keepfailed']) @unlink($path); $result = file_exists($path); } $cachemiss = 1; it_url::_unlock($path, $lock); } else { # Wait for file currently being processed EDC('getcache', "processwait", $p['url'], $path); $result = it_url::_waitforlockedfile($path, $p); } } } # cache cleanup at night if (date('H') >= 1 && (date('H')*3600 + date('i')*60 < $p['cleanbefore']) && (time()-@filemtime($p['cachedir'] . "/cleaned") > 80000)) { it::file_put($p['cachedir'] . "/cleaned", ""); # touch could have permission problems $maxagemin = intval($p['maxage']/60); exec("nohup bash -c 'cd {$p['cachedir']} && for i in ??; do sleep 20; ionice -c 3 find \$i -mmin +$maxagemin -delete; done' /dev/null 2>&1 &"); } if (EDC('getcachelog')) it::log('debug', 'getcachelog', "miss=" . intval($cachemiss), $p['url']); ### EDC('getcache', $result, $path); # too verbose return $result ? ($p['returnheaders'] ? array($path, $headers) : $path) : false; } /** * Fetch a file, cache it and return contents * @param see it_url::get_cache() */ function get_cache_contents($p) { return ($fn = self::get_cache($p)) ? it::file_get_contents($fn) : it::error((array)$p['it_error'] + ['title' => $p['safety'] == 0 ? false : "failed getting " . it_url::absolute($p['url']), 'body' => $p]); } /** * Check whether file at given path is older than maxage * @param $path File to check * @param $maxage Maximum age of file in seconds * @return Not expired: false | Non-existant file: true | Timestamp of expired file */ static function _expired($path, $maxage) { if ($result = EDC('nocache') ? false : @filemtime($path)) { if (time() - $result > $maxage) EDC('getcache', "expired", $maxage, $path); else $result = false; } else # File does not exists yet $result = true; return $result; } /** * Acquire lock for a given file * @param $path File to lock * @return Lock handle if successfully locked file */ static function _lock($path) { $force = EDC('nocache') || (($mtime = @filemtime("$path.lock")) && (time() - $mtime > 30)); # expire forgotten locks return @it::fopen("$path.lock", $force ? "w" : "x"); } /** * Release lock on a file * @param $path File to unlock * @param $lock Handle to lock acquird by _lock */ static function _unlock($path, $lock) { fclose($lock); @unlink("$path.lock"); } /** * Wait for file which is currently locked * @param $path File to wait for * @param $p Wait parameters, see @get_cache * @return Whether lock was released within timeout and file is still there */ static function _waitforlockedfile($path, $p) { $sleeptime = 0.1; # seconds to wait per pass # wait until cache is ready, then read from cache for ($maxpasses = $p['timeout'] / $sleeptime, $passes = 0; ($lockedbyother = file_exists("$path.lock")) && ($passes < $maxpasses); ++$passes) { usleep($sleeptime * 1000000); clearstatcache(); } if ($lockedbyother) it::error((array)$p['it_error'] + ['title' => ($passes < $maxpasses ? "error getting url" : "timeout") . " in it_url::get_cache(): url={$p['url']}, passes=$passes, maxpasses=$maxpasses, path={$p['path']}"]); return !$lockedbyother && file_exists($path); } /** * Write data to tmp file and atomically rename it to destination * @param $path Destination file to write data to * @param $data Data to write | true to just touch file * @return True if data was written to file */ static function _atomicwrite($path, $data) { $result = false; if ($data === true) # Not modified, no new data, just update timestamp touch($path); else if ($data !== false) { $tmpname = tempnam(dirname($path), "writetmp"); fputs($cachetmp = it::fopen($tmpname, "w"), $data); fclose($cachetmp); chmod($tmpname, 0664); $result = rename($tmpname, $path); } else @unlink($path); return $result; } /** * Make an URL absolute by using host and protocol from current Apache request (but not port number) * @param $url Optional URL ( foo.html, /foo.html, //host/bar.html, http://host/bar.html ), default self * @param $proto_force Optional protocol to enforce, default protocol of current request or http if in script context * @return absolute version of URL ( http[s]://host/bar.html ) */ static function absolute($url = null, $proto_force = null) { if (!isset($url)) $url = $_SERVER['PHP_SELF']; if (list($proto_url, $urltmp) = it::match('^(\w+):(.*)$', $url)) { $url = $urltmp; $proto = $proto_force ?: $proto_url; } else $proto = $proto_force ?: (isset($_SERVER['HTTPS']) ? 'https' : 'http'); if (!preg_match('#^//#', $url)) { $dir = preg_replace('#/[^/]*$#', '/', $_SERVER['PHP_SELF']); $url = preg_match('#^/#', $url) ? $url : "$dir$url"; $url = "//" . $_SERVER['HTTP_HOST'] . $url; } return "$proto:$url"; } /** * Craft a valid redirect URL, send Location: header and terminate execution * @param $url Optional URL ( foo.html, /foo.html, //host/bar.html, http://host/bar.html ), default self * @param $type Type of redirect, "temporary" or "permanent", default temporary * @return This method never returns. */ function redirect($url = null, $type = "temporary") { $codes = array('permanent' => 301, 'temporary' => 303); # NOTE: HTTP 303 is called "See Other", rather than Temporary (which would be HTTP 307), but is the behaviour one usually wants for temporary redirects if (!($code = $codes[$type]) || !$url) it::fatal("invalid redirect type or missing redirect url"); $url = preg_replace("/[\r\n].*/", '', it_url::absolute($url)); # Security: cut after CR/LF if (EDC('noredir')) { if (!function_exists('a')) new it_html(); echo a(array('href' => $url), Q($url)) . Q(" (HTTP/1.1 $code, $type redirect)") . br() . Q("Trace: " . it_debug::backtrace()); } else header('Location: ' . it_untaint($url, TC_SELF), true, $code); exit; } /** * Urlencode but leave some chars */ static function encode($str) { return strtr(urlencode($str), array("%2C"=>",", "%28"=>"(", "%29"=>")")); } /** * Create GET request from params, optionally only using given fields * @param $params Array to take values from, usually $_GET. Values of zero length are ignored. * @param $keys Keys to use; default: all */ static function params($params, $keys = null) { return join("&", it_url::_params($params, $keys)); } static function _params($params, $keys = null, $finalize = true) { $result = array(); if (!isset($keys)) $keys = array_keys($params); foreach ($keys as $key) { if (is_array($params[$key])) { foreach (it_url::_params($params[$key], null, false) as $value) { if (strlen($value)) $result[] = it::replace(array('^([^=\[]*)' => urlencode($key) . '[$1]'), $value); } } else if (strlen($params[$key])) $result[] = urlencode($key) . "=" . it_url::encode($params[$key]); } if ($finalize) $result = preg_replace(['#\[#', '#\]#'], ['%5B', '%5D'], $result); return $result; } /** * Similar to php's parse_str() but leaves . and space in arg names intact */ static function parse_str($query) { foreach (explode('&', $query) as $arg) { list($key, $value) = explode('=', $arg, 2); $result[urldecode($key)] = urldecode($value); } return (array)$result; } /** * Convert url into array with base url in $result[0] and GET params */ static function parse($url) { list($path, $query) = explode("?", $url, 2); parse_str((string)$query, $params); return (array)$path + (array)$params; } } ?>